content stringlengths 5 1.05M |
|---|
#import this # delete the first hashtag to see the truth
#String methods
#isAlpha возвращает True, если строка состоит только из букв и не является пустой
print ('hello'.isalpha())
print ('hello1'.isalpha())
#isalnum возвращает True , если строка состоит из букв и цифр
print ('hello12'.isalnum())
print ('hello'.isalnum())
#isdecimal возвращает True , если строка состоит только из цифровых символов и не является пустой
print ('123'.isdecimal())
print ('hello'.isdecimal())
#isspace возвращает значение True если строка состоит из символа пробела, табуляции , новой строки и не является пустой
print (' '.isspace())
print ('hello '.isspace())
#istitle Возвращает True , если все слова начинаются с большой буквы , а потом с маленькой
print ('Hello World'.istitle())
print ('Hello WORLD'.istitle())
#startswith and endswith - первый метод направлен на проверку, начинается ли строка с символа(или слова), который написан в скобках. Второй метод проверяет последнее слово
print ('hello mama'.startswith('hello'))
print ('hello mama'.endswith('mama'))
#join and split - Первый метод соединяет все слова с списке с помощью символа, который мы прописываем в начале. Второй метод - разделяет предложение (aka string) , на объекты в списке.
print (', '.join(['cats','dogs','lions'])
print ('my name is human'.split())
# если мы хотим разделить все это по определенному принципу, то надо прописать в split , по какому именно
print ('myABCnameABCisABChuman'.split('ABC'))
#center и rjust , ljust
print (('hello').center(20,'=') #Такой метод будет полезен, если мы хотим создать более-менее адекватный юзер интерфейс
#deleting and inserting different stuff in list
baba = ["mama","papa","babu"]
message = f"my first kiss was with\n\t{baba[1].title()}" # f перед ковычками дает нам возможность писать переменные в str формате, но только в спец скобочках {}
print (message)
baba.insert (1,"jui")
print (baba)
del baba[1]
print (baba)
#popping
poped_vary=baba.pop() #удалили последний элемент из списка
poped_vary1=baba.pop(1) # удалили элемент , который стоит на 1 месте (в списке - на 2)
print (baba)
print (poped_vary)
print (poped_vary1)
print (f"I really like my {poped_vary1.title()}\n\t so much")
#remove
kaag = ["mama","papa","babu","kaitu"]
kaag.remove("papa") #удалили элемент papa из этого списка ( и списков, которые могут быть ему равны !!!)
print (kaag)
kaag = ["mama","papa","babu","kaitu"]
print (kaag)
x="papa"
kaag.remove(x)
print (f"Well, sometimes some things happen, this is why i had to delete {x} , thats why i have now\n\t{kaag}")
#some additional tasks
guests = ["robert","Lusi","Karl","Mike","Piggy"]
for i in guests:
print (i)
print (f"\n{guests[3]} cant come, cause he is ill\n")
guests[3]="Josh"
guests.append("Bill")
for i in guests:
print (i)
print ("\nNew Guests\n")
guests.insert(0,"Clare")
guests.insert(4,"Michael")
for i in guests:
print (i)
print ("\nremoving guests\n")
while (len(guests))>2:
guests.pop()
print (guests)
for i in guests:
print (f"{i}, is still in list")
print ("\nDeleting each other\n")
while len(guests)!=0:
del guests[0]
print (guests)
#sort
gadgets = ["telephone","Tesla","headphones","fridge","pip-boy"]
print (f"{gadgets} before sorting")
gadgets.sort() #сортируем их а алфавитном порядке
print (f"{gadgets} after sorting")
gadgets.sort(reverse=True) #сортируем в реверсивном порядке - список наоборот
print (f"{gadgets} after reverse sorting\n")
#sorted
gadgets = ["telephone","Tesla","headphones","fridge","pip-boy"]
print (f"{gadgets} this is an original list\n")
print (f"{sorted(gadgets)} aint an original list\n")# сделали сортировку, но не сохранили в переменную
#reverse (rotates the list on 180 degrees)
gadgets = ["telephone","Tesla","headphones","fridge","pip-boy"]
print (gadgets)
gadgets.reverse() #перевернули список
print (f"{gadgets} this a reversed list\n")
#some additional tasks
countries=["UK","New-Zeland","America","Russia","Italy"]
print (countries)
print (sorted(countries))
print (countries)
print(sorted(countries, reverse=True))
print (countries)
countries.reverse()
print (countries)
countries.reverse()
print (countries)
countries.sort()
print (countries)
countries.sort(reverse=True)
print (countries)
count=["1","2","3","4"]
count.append("5") #добавили элемент
count.insert(3,"10")
del count[1]
count.reverse()
count.sort()
count.pop()
count.remove("3")
print (count)
#for
magic=["Love","Hate","Guns","Drugs"]
for mag in magic:
print (mag)
for mag in magic:
print (f"{mag.title()} nice ") #переменная mag идет в порядке возрастания в списке magic
print (f"mb we can have some fun, what u think, {mag.title()}?")
print ("\nThats all for today folks !")
#range
for value in range (1,5):
print (value) #будет печатать от 1 до 4 ( 5 не заденет, так как он увидел это значение и остановился)
print ("\nAnother example of usage 'range'")
for value in range (6):
print (value) #выводит числа от 0 до 5
#making a list by using range
numbers = list (range(1,6))
print (numbers)
#making a list with steps by using range
numbers=list(range (2,11,2)) #ВАЖНО ---->> Список начинается со значения 2, а затем увеличивается на 2. Конец только в том случае, когда наше число будет или больше или равно 11 !!!!
print (numbers)
#List of squared numbers from 1 to 10
squares=[]
for value in range(1,11):
square=value**2 # ** - обозначает степент. 2**3 обозначает 2^3==8
squares.append(square)
#OR YOU CAN DO IT THIS WAY
#squares.append(value**2)
print (squares)
print ("\n min, max, sum")
# min, max, sum
numbers=list(range(-2,15,2))
x= min (numbers) #записываем в переменную x - минимум в списке
print (x)
x= max (numbers) #нашли максимум
print (x)
x= sum(numbers) #сумма числового списка
print (x)
print ("\n list generator ")
# генератор списков
squares=[value**2 for value in range(1,11)]
print (squares)
#Some tasks from the book
#1
for i in range (1,21):
print (i)
#2
million=list(range(1,100_000_1))
#3
print (max(million))
print (min(million))
print (sum(million))
#4
spisok=list(range(1,21,2))
for i in spisok:
print (i)
#5
spisok=list(range(3,31,3))
for i in spisok:
print (i)
#6
spisok=[value**3 for value in range(1,11)]
print (spisok)
#working with the lists (slices)
players=["Michael","Lusiel","Mary","Comstock","Booker"]
print (players[0:3]) #выводит игроков от первого индекса до индекса, который мы указали -1
print (players[1:4])
print (players[:4]) # выводит всех игроков с самого начала списка до 4 индекса - 1
print (players[2:]) #выводит всех игроков со 2 индеса до конца
print (players[-3:]) #выводит всех игроков с самого начала до -3 индекса включительно ( что есть 2 индекс)
#brute force
print ("Here will be first 3 players\n")
for i in players[:3]:
print (i)
#copying lists and some experiments with them
play=players[:] #так мы можем копировать списки
print ("\n")
print (play)
uno=["case","club","money"]
des=uno[:]
uno.insert(2,"love")
des.append("Haram")
print (uno,des)
fir=["uno","des"]
sec=fir #так мы показали, что sec=fir, что показывает питону, что любое изменение в одном из них, должно примениться и в других списках
fir.append("5")
sec.insert(0,"MAMA")
print ("First list",fir,"\nSecond list",sec)
#some additional tasks
lis=play[:]
print ("First three elements are", lis[:3])
lis.append("sup")
lis.append("dub")
lis.append("har")
lis.append("bar")
print (lis)
print ("Three elements in the middle are ", lis[3:6])
print ("Three elements in the end are", lis[6:])
pizza=["Margarita","Pepperoni","Diablo"]
fr_pizza=pizza[:]
pizza.append("Pineapple")
fr_pizza.append("lasagnia")
print ("\nMy pizza:")
for i in pizza:
print (i)
print("\nFriend's pizza:")
for i in fr_pizza:
print (i)
#Tuple
dimension=(100,500)
print (dimension[0],dimension[1])
dimens=(102000,) #Кортеж с одним элементом
print (dimens[0])
for i in dimension: #перебор
print (i)
print ("non-modified tuple", dimension)
dimension=(500,1000)
print ("Modified tuple", dimension,"\n")
dimens=(100,200,300,400,500)
for i in dimens:
print (i)
# if
lis=["junk","dank","horny"]
for i in lis:
if i=="dank": #если переменная i будет равна dank, то в такое случае мы выводим 1 исход событий, в противном, 2 исход
print (i.upper())
else:
print (i.title())
x="DANk"
if x.lower()=="dank":
print("okay", x.lower())
#if + !=
x="Mom"
if x!="mom":
print ("Nice")
#check two cases at one time (and)
age1=20
age2=25
age=15
if (age1>age) and (age2>age):
print ("These both guys are olded than age")
#using or
age1=10
age2=15
age=13
if age1>age or age2>age:
print ("well-well")
#checking if item in the list
lis=["hello","brolo","hi","bro"]
k="hi"
if k in lis:
print ("He is in the list")
#check the lack of the item in the list
dis=lis[:]
k="bor"
if k not in dis:
print("He is not in list")
#some tasks
li=[1,2,3,4]
bi=4
if bi in li:
print (101)
if 5 not in li:
print ("drake")
k="GORGe"
lis=["gorge","floyd"]
if k.lower() in lis:
print ("Hell yeah")
#if else
age=17
if age==15: #проверяет переменную равна ли она 15 ( нет )
print("Nice")
else: # если условие в if не выполняется, то питон переходит к else
print("change your value")
#if-elif-else
price=25
price1=40
age=int(input())
if age<=4:
print ("free entrance")
elif age>4 and age<18:
print("pay",price)
else:
print("pay",price1)
#combos with elif
age=12
if age>20:
print("1")
elif age>15:
print("2")
elif age>10:
print("3")
#extra tasks
color="yellow" #Чтобы изменить исход, меняйте значение здесь
if color=="green":
print("hey pal, you got 5 points\n")
else:
print("Yo, you've just got 10 points!\n")
if color=="green":
print("strike with the 5 points!")
elif color.title()=="Yellow":
print("MMM, 10 points")
elif color=="red":
print("you've got now 15 points, hell yeah")
else:
print("thats loss")
age=int(input())
if age<2:
print("baby")
elif age>=2 and age<4:
print("Kinda child")
elif age>=4 and age <13:
print("Teenager")
elif age >=13 and age<20:
print("MMMM, AMATEUR")
elif age >=20 and age<65:
print("an adult")
else:
print("You live in russia, you should be dead right now")
#Using if with lists
requested_top=["mushrooms","green pepper","Chili peppers","onions"]
for request in requested_top:
print (request, "Some adds")
print ("Your pizza is done")
for request in requested_top:
if request!="green pepper":
print("Making your pizza...")
else:
print ("Oh shit i am sorry, your pizza will have a lack of a pack with green peppers")
requested_topp=[] # список пуст, поэтому if не будет выполняться (ибо он смотрит, есть ли что-нибудь в списке)
if requested_topp:
for request in requested_topp:
print("mmm, nice ingredient, m'am", request)
print ("Well, your pizza is ready to be consumed!\n")
else:
print("Strange, but your pizza is plain. You sure you want that one ?\n")
#Multiple lists
available_topping=["Mushrooms","Ketchup","Onions","lasagna"]
requested_topp=["Bikmakbetov","mushrooms","onions"]
for request in requested_topp:
if request.title() in available_topping:
print (f"Adding some {request}")
else:
print ("sorry, we dont have", request)
#Additional tasks
##1
users=["Jake","Robert","Falcon","admin"]
if users:
for user in users:
if user.title()=="Admin":
print("welcome home, king")
else:
print (f"Hey, {user}")
##2
users=[]
if users:
print("hello world")
else:
print("No users :(")
##3
us1=["Jake","Harold","Mick","Angie","Barnie"]
us2=["holt","Kolt","mick","jake"]
for user in us2:
if user.title() in us1 or user.lower() in us1 or user.upper() in us1:
print (f"Sorry ,but {user} is already taken by the other person")
else:
print("Proceeding...")
##4
ls1=[1,2,3]
ls2=[1,2,3,4,5,6,7,8,9]
for i in ls2:
if i in ls1:
if i==ls1[0]:
print (f"{i}st")
elif i==ls1[1]:
print (f"{i}nd")
else:
print(f"{i}rd")
else:
print (f"{i}th")
#Alphabet
alien_0={"colour" : "green" , "points" : 5} #Словарь состоит из Ключа и значения. Ключ это то, что стоит слева от ":" а значение это то, что стоит справа. Ключ - значение может быть бесконечное кол-во в словаре, так что пользуйтесь им, если есть возможность
print (alien_0["colour"]) #Чтобы обратиться к значению в ключе, надо прописать его ключ, что мы и сделали сейчас
print (alien_0["points"]) #Тут мы прописали ключ от "очков" и увидели значение 5
new_points=alien_0["points"]
print ("You've just earned", new_points,"points")
#Adding new pairs in the Alphabet
alien_0["x_position"] = 0
alien_0["y_position"] = 25
print (alien_0)
#Making an empty alphabet
alien_0={}
alien_0["Colour"]="Blue"
alien_0["points"]=10
print (alien_0)
#Change the variable in the alphabet
alien_0 = {"colour": "green"}
print (alien_0["colour"])
alien_0["colour"]= "Yellow" #Таким простым способом, мы изменили у ключа colour значение с green на Yellow
print (alien_0["colour"])
alien_0={"x_position" : 0,"y_position": 25,"speed": "medium"}
print (f"Position of the alien is {alien_0['x_position']} ")
print ("An alien is moving rightwards")
print ("Counting the size of the move to get that piece of shit")
if alien_0["speed"]=="slow":
x_increment=1
elif alien_0["speed"]=="medium":
x_increment=2
else:
print("He is so fast, my god")
x_increment=3
alien_0["x_position"]=alien_0["x_position"]+x_increment
print (alien_0["x_position"])
#Deleting key-variable
alien_0={"colour":"green","points":20}
print (alien_0)
del alien_0["points"]
print (alien_0)
#An alphabet with the same objects
favourite_language={
"jen":"python",
"sarah":"c",
"edward":"ruby",
"phill":"python"
}
print (favourite_language)
language=favourite_language["sarah"].title()
print (favourite_language)
print (language)
#GET
alien_0={'colour':"green","speed":"slow"}
point=alien_0.get("points","No point assigned")
print (point)
print (alien_0.get("Point"))
#More tasks to do!
##1
person={"First name": "David", "Second name":"Bowie","City":"Lodnon","Year": "1962"}
for i in person:
print (person[i])
##2
person={"Andrew": "1", "Hopki" : "5", "Jorge" : "21", "Lara" : 69, "Lony" : 999, "Donki" : "baba", "Karla" : 999}
for i in person:
print (i, person[i])
#Brute through the Dictionary
for key,value in person.items(): #Метод items нужен для вызова связки key - value. В key будут идти значения из ключа , а в value из значения ( key , value могут быть переименованны, как захотите)
print (key)
print (value,"\n")
#Brute all keys in the Dictionary
for i in person.keys(): #метод keys используется для присваивания i (или любой другой вашей переменной) значение ключа
print (i)
friends=["Donki","Andrew"]
for i in friends:
if i in person:
print (i,"ain't here and his favourite", person[i].title())
else:
print (i,"Is here ")
print ("\nJust making some space...\n")
#Brute keys in special queue
for name in sorted(person.keys()):
print (name)
#Brute all values in the dictionary
for i in person.values(): #Метод value работает также, как и keys, только вместо ключей, он выводит значения
print (i)
print ("\nJust making some space...\n")
for i in set(person.values()):
print (i)
words={"Love" : "Chemistry", "Health" : "Lack of something", "Will": "Something why we still live"}
words["Bank"]="Robery"
for i,k in sorted(words.items()):
print (f"I've got {i.title()} and {k.upper()}")
#Lists of dictionnaires
alien_0={"colour" : "green", "points" : 5}
alien_1={"colour" : "yellow", "points" : 10}
alien_2={"colour" : "red", "points" : 15}
aliens=[alien_0,alien_1,alien_2]
for alien in aliens:
print (alien)
print("\nJust making some space...\n")
aliens=[]
for alien in range (30):
alien={"colour":"green","points":5}
aliens.append(alien)
print (aliens[5:10])
print (len(aliens))
print("\nJust making some space...\n")
for alien in aliens[2:5:2]:
if alien["colour"]=="green":
alien["colour"]="Yellow"
alien["speed"]="Medium"
alien["points"]=10
for alien in aliens[0:10]:
if alien["colour"]=="green":
alien["colour"]="Yellow"
alien["speed"]="Medium"
alien["points"]=10
elif alien["colour"]=="Yellow":
alien["colour"]="Red"
alien["speed"]="High"
alien["points"]=15
print (aliens[0:10])
#lists in dictionnaires
pizza={"Wheat": "Natural", "Sauce":"mayonnaise", "toppings" : ["onions","garlic","potato"]}
for i,k in pizza.items():
if type(k)==str:
print (f"Adding {i} ")
else:
for j in k:
print (f"And your favourite toppings {j} are coming")
#Dictionary in the Dictionary
users={
"aeinstein":{
"first":"Albert",
"last":"einstein",
"city":"princetion"
},
"marie":{
"first":"Mary",
"last":"Poppins",
"city":"London"
},
}
for user,name in users.items():
print (user.title())
print (f"\tfull name {name['first']} {name['last']}")
print (f"\tcity {name['city'].upper()}")
#Additional tasks
#1
person_0={"Name":"David","Surname":"Bowie","City":"London"}
person_1={"Name":"Norman","Surname":"Reedus","City":"USA"}
person_2={"Name":"Evgeni","Surname":"Grinko","City":"Moscow"}
person=[person_0,person_1,person_2]
for per in person:
for i,k in per.items():
print (i.upper(),k)
print("\n")
#2
places={"Evgeni":["Moscow","Austria","Germany"], "Dan":["Denmark","Italy"]}
for i,k in places.items():
print(i.title(),"wants to visit")
for j in k:
print ("\t",j)
#3
cities={"Moscow":{
"population":"1000000",
"jobs":"Available",
"nature":"Dead"
},
"Afganistan":
{
"population":"-1",
"jobs":"soldiers",
"nature":"Alive AF"
},
"USA":
{
"population":"1000000000000",
"jobs":"OILY JOBS ONLY",
"nature":"Kinda alive"
}
}
for i,k in sorted(cities.items()): #i - Moscow\USA\Afganistan . k - информация которая хранится в словарях (значение)
print (i.upper())
for j in k: #j у нас выступает в качестве ключа в самом словаре (population,jobs,nature)
print (f"Here is stat about this country {j.title()} - {k[j]} ")#Именно поэтому мы и используем словарь k
#WHILE
#input
message=input("Tell me something sweet: ")# внутри input можно вписать все, что хочешь и пользователь это увидит, когда будет вводить значения
print ("nice",message,"\n")
promp="hey"
promp+="\tnice coding skills\n" #Можно таким образом соединить 2 сообщения в одно
message=input(promp) #а здесь , вместо текста, можно вывести наше сообщение, которые мы до этого писали
print ("AE", message)
#using int in input
age=input("How old are you? ") #даже вводя число, питон все равно видит его, как str (буквенно значение)
print (age)
print (type(age))
print ("\nMaking some space\n")
age=input("How old are you?\nVersion N2 ")
age=int(age) #Если мы ввели число, то в этом случае мы переводим его из str в int (из букв в цифры)
print (age>18)
# calculating residue by using % operator
print (4%3) #4-3=1 (остаток 1)
print (5%3) # 5 - 3 = 2 ( вычисляем остаток)
print (6/3) # 6 -3 -3 (идет простое деление на 2)
x=int(input("Введите число, которое вы хотите проверить на четность\n")) #ввели число (поставив int перед вводом, чтобы не пришлось расписывать это действие в 2 строках)
if x%2==0: #проверили, делится ли число, которое мы ввели на 2 без остатка
print ("Четное")
else:
print ("Нечет")
#Extra tasks
#1
x=input("Your favourite car\n")
print (f"I like your {x}")
#2
x=int(input("Введите сколько столов вам нужно\n"))
if x>8:
print ("Надо будет подождать")
else:
print ("Ждите, скоро все будет готово")
#3
x=int(input("Введите число, которое вы хотите проверить на кратность 10\n"))
if x%10==0:
print ("Оно кратно 10")
else:
print ("Некратно")
#Cycles with While
number=1
while number <=5:
print (number)
number+=1
#users chooses when to stop the programm
words="\tTell me anything you want and i will repeat it to u"
words+="\t\notherwise ,if you type 'quit' i will stop myself "
message=""
while message !="quit":
message=str(input("Enter your words\t"))
if message !="quit":
print (message)
#flags
flag=True
while flag: #Весь while работает по принципу -> пока правда - делаю. В данном случае flag мы изначально указали , как правда (True), поэтому цикл и выполняется
x=str(input("Введите сообщение и я вам его напишу\nЕсли захотите завершить программу, напишите 'quit' "))
if x!="quit": #спрашиваем питон, чему равен x , если все что-угодно, кроме quit, то тогда мы продолжаем программу
print (x)
else:
flag=False #в данном случае наш flag стал равен False - ложь и поэтому while заканчивает свою программу
#break in while
message="Введите слово и я его повторю"
message+="\n\tВ противном случае, введите 'quit' и программа остановится "
while True :
mes=input(message)
if mes=="quit":
break #break останавливает while (даже если тот находился в бесконечной рекурсии). ВНИМАНИЕ break хоть и полезен, но он влечет за собой большее время выполнения программы
else:
print (mes.title())
#continue in while
number=0
while number <=10:
if number%2==0:
number+=1
continue #continue продолжает код ( без прерываний)
else:
print (number)
number+=1
#Avoiding infinite cycles
x=1
while x<=5:
print (x) # в этом случае , будет бесконечный вывод единиц. Чтобы ,в таком случае, завершить программу , потребуется нажать или cntrl+c (Win) или command+c(IOS\APPLE)
#Extra tasks
#1
message="Вводите топпинги, которые вы хотите добавить в пиццу"
message+="\nЕсли захотите завершить заказ топпингов - напишите 'quit'\n"
flag=True
while flag:
x=input(message)
if x!='quit':
print (f"добавляем {x} в вашу пиццу\n")
else :
flag=False
#2
while True:
x=int(input('Введите ваш возраст\n'))
if x<3:
print ("Поздравляем, ваш билет бесплатный\n")
elif x>=3 and x<12:
print ("Стоимость билета 10 руб\n")
else:
print ("Стоимость билета 15 руб\n")
break
#3
flag=True
while True and flag:
x=input("Введите топпинг для пиццы. Если захотите завершить программа - напишите quit ")
if x!='quit':
print (f"добавляем {x}")
else:
flag=False
break
#Using while with lists and dictionaries
#Moving elements between lists
uncomfirmed_users=["lara","colin","dave","Gorge","kate"]
confirmed_users=[]
while uncomfirmed_users:
user=uncomfirmed_users.pop()
print (f"varification is coming for {user.upper()} ")
confirmed_users.append(user)
print (confirmed_users)
print ("an old list", uncomfirmed_users)
#delete all elements from this list with the stated variable
pets=["cats","dogs","penguin","parrot","elephant","Cats"]
while "cats" in pets or "Cats" in pets: #Пока одно значение в списке или другое значене в списке -> выполнять действие
if "cats" in pets:
pets.remove("cats")
else:
pets.remove("Cats")
print (pets)
#Filling the dictionary with user's data (using while)
responses = {}
active=True
while active:
name=str(input("Please enter your name\n"))
respons=input("Which mountain have ever wished to climb on\n")
responses[name]=respons
repeat = input("Would you like to continue the process ? (type 'no', to stop)\n")
if repeat=="no":
active=False
for i,k in responses.items():
print (f"\n{i} really would like to climb {k}\n")
#Extra tasks
#1 and 2
sandwich_orders=["pastrami","ham and cheese","pastrami","pastrami","Tuna","pastrami","beaver sandwhich","pastrami","Kadgit's sandwhcih"]
finished_sandwiches=[]
while sandwich_orders:
if "pastrami" in sandwich_orders:
while "pastrami" in sandwich_orders:
sandwich_orders.remove("pastrami")
finished_sandwiches.append(sandwich_orders.pop())
for i in finished_sandwiches:
print (f"we've done this sandwhich {i}")
#3
respons={}
while True:
x=input("Hello, please enter your name\n")
y=input(f"thanks {x}, now please enter a place where you want to chill out\n")
respons[x]=y
x=input("lets continue. If you want to stop, please enter '-' or 'no'\n")
if x=="no" or x=="-":
break
for i,k in respons.items():
print (f"\n{i} wants to go to {k}")
#FUNCTIONS
#ФУНКЦИИ ! Для чего они же нужны ? Ну... К примеру, если вам требуется выполнить какой-то определенный блок кода неск раз. Разумеется, можно использовать различные циклы и не только, но использование функций ведет за собой ускорение работы программы + код с функцией проще отлаживать, да и вообще проще читать, что влечет за собой уйму плюсов. Функции используются всеми и везде. Скоро вы сами в этом убедитесь
def greet (): #после def (это так называемый "идентификатор" для питона, который показывает, что МЫ СЕЙЧАС БУДЕМ ПИСАТЬ ФУНКЦИЮ) после def идет название нашей функции (можете назвать как хотите), но не надо называть функции, которые начинаются и заканчиваются на это __ (пример того, что не стоит писать ,пока не разберешься -> __init__ <- это название для функции служит для того, чтобы каждый раз при включении программы эта функция выполнялась (внутри самой функции можете писать что хотите))
print ("Hello there")
print ("Hello , General Kenoby")
greet()
"""В данном случае мы написали программу, которая решает лишь одну задачу. Написать пару реплик из звездных войн :)\nпозже я вам покажу, как функции могут в игры и вообще в разный тип простых/сложных задач """
#когда вы хотите что-то задокументировать, просто используйте тройные кавычки """ЧТО-ТО НАПИСАЛИ""" вот и все (в противном случае, можете продолжать использовать хэштэги (#))0))0
#Translating information to the function
def greet_user (username): #если мы хотим что-то включить в функцию, мы должны прописать это в в круглых скобочках
print ("Hello there")
print (f"Hello ,general {username.upper()}")
greet_user("monkey") #а вот здесь мы пишем, что именно мы хотим юзануть
greet_user("D.VA")
#virables and paramets
#def greet_user (USERNAME) что же такое username ? Это параметр, который может быть изменен, как вы захотите
#greet_user("monkey") в данном случае monkey (или D.VA) является аргументом, который мы передаем в функцию (или же в параметр в функции) после чего username сохраняет monkey в себе. Проще говоря, username(или что-то другое (все зависит от того, как вы это назовете)) это типа такая переменная, в которую можно передать любое говно, которое вы напишите при вызове функции. После этого переменная username может служить вам для разных целей НО ТОЛЬКО В ПРЕДЕЛАХ ФУНКЦИИ (Первое правильно функции: все что происходит в функции - остается в функции). Останутся вопросы - пишите мне в вк(ссылка на него есть в профиле)
#extra tasks
#1
def display_message():
print ("Hello guys, our today's theme is functions in python")
#2
def favourite_book(YaEbal):
print (f"One of my favourite books is {YaEbal}")
favourite_book("Alice in the wonderland")
#positional arguments
def describe_pet(animal_type, pet_name): #в функции ( и в вызове функции ) мы можем спокойно перечислять элементы (коих может быть беск число)
print (f"Тип животного {animal_type.title()}")
print (f"Имя сего существа {pet_name}")
describe_pet("hamster","DOGE")
describe_pet("Barbie","Doll")
#named arguments
def described_pet(animal_type, pet_name):
print (f"Тип животного {animal_type.title()}")
print (f"Имя сего существа {pet_name}")
described_pet(animal_type="Villain", pet_name="Joker") #Если перед каждым аргументом мы будет писать, что это за тип переменной, то питон без проблем будет все юзать ( да и не будет лишней возни)
described_pet(pet_name="Harley Davidson", animal_type="Bike") #в таких вызовах легко можно зафакапить все, потому что надо правильно написать имена параметров в функции, иначе будет ошибка
#Default argument
#В функции параметру можно присвоить значение по умолчанию. Если мы передаем в него что-то ,то значение опустится (чтобы лучшее это понять, смотрите код ниже)
def my_pet (pet_name, pet_type="Doge"):
print (f"My pet's name is {pet_name.title()}")
print (f"{pet_name}'s type is {pet_type}")
my_pet("willie") #тут не пишем 2 значение, потому что у нас есть уже значение по умолчанию
my_pet("Gary","cat") #тут мы пишем, потому что у нас кошка
my_pet ("larry") # тут мы опять не пишем, потому что у нас собака
#Extra tasks
#1
def make_shirt (size=0,text="No print"):
print (f"The size of the T-shirt is {size} and the text on it will be - {text}")
make_shirt(40,"hi")
make_shirt(10,69)
make_shirt(10)
#2
def describe_city (town="LA",country="the USA"):
print (f"{town} is in {country} !")
describe_city("Moscow","Russia")
describe_city()
#returning an object from the function
def form_name (first_name=None,last_name=None):
full_name=f"{first_name} {last_name}"
return (full_name.title()) #return тут работает, как способ, чтобы передать переменную из функции в переменную в основном коде (в данном случае это x)
x=form_name("jack","nickolson") #Чтобы return сработал, как надо, требуется , чтобы переменная обозначала вызов функции (как здесь). return возвращает, что нам надо из функции в переменную x, которую мы потом и используем
print (x)
#optional arguments
def formatted_name (first_name=None,last_name=None, middle_name=""): #необязательные аргументы нужны для того, чтобы в некоторых случая мы могли просто опустить некоторы данные ( как в нашем случае). Делается это точно также, как и при присваивании значения по умолчанию, только тут вместо текста стоит просто пространство (попробуйте сами, это рил просто)
full_name=f"{first_name} {middle_name} {last_name}"
return full_name.title()
homie=formatted_name(first_name="John",last_name="lee",middle_name="Hooker")
print (homie)
homie=formatted_name("John","Mordovich")
print(homie)
#returning of the dictionary
def build_person(first_name=None,last_name=None, age=None):
person={"first":first_name.title(),"second":last_name.lower()}
if age:
person['age']=age
return (person)
guy=build_person("jack","NICOLSON", age=69)
for i,k in guy.items():
print (i,k)
#using functions in while
def frr_name (first_name=None,second_name=None):
full_name=f"{first_name} {second_name}"
print (f"Hello {full_name}")
return full_name.title()
while True:
print ("Please tell me ur name m8\n")
x=input("Name: ")
y=input("Aaaand your surname: ")
name=frr_name(x,y)
print (name)
x=input("if you want to stop, please type 'no'\n")
if x=="no":
break
#Extra tasks
#1
world={}
def city_country(city,country,world):
world[country]=city
return world
for i in range(3):
x=input("Enter your country: ")
y=input("Enter your city: ")
city_country(y,x,world)
print (world)
#2
def make_album(name,producer,album,nub=None):
alb={}
alb["name: "]=name
alb["producer: "]=producer
alb["Album's name: "]=album
if nub:
alb["number of tracks: "]=nub
return (alb)
first_album=make_album("David Bowie","David Bowie","Blackstar",9)
second_album=make_album("Corey Taylor","Stone sour","death")
print (first_album)
print (second_album)
#transporting list
def greet_users(names):
for i in names:
msg=f"Hello {i}"
print (msg)
users=["hannag","Keil","Jake","Bro"]
greet_users(users)
#change list in the function
unprinted=["phone","Xbox","Playstation","PC"]
printed=[]
while unprinted:
current=unprinted.pop()
print (f"current design {current}")
printed.append(current)
for i in printed:
print (i)
"""OR WE CAN DO THIS WAY"""
def print_models(unprinted,completed):
while unprinted:
current=unprinted.pop()
print (f"{current} is in progress")
completed.append(current)
def show_models(completed):
for i in completed:
print (i)
unprinted=["phone","Xbox","Playstation","PC"]
completed=[]
print_models(unprinted,completed)
show_models(completed)
#restriction of changing list in function
def print_models(unprinted,completed):
while unprinted:
current=unprinted.pop()
print (f"{current} is in progress")
completed.append(current)
def show_models(completed):
for i in completed:
print (i)
unprinted=["phone","Xbox","Playstation","PC"]
completed=['hi']
print_models(unprinted[:],completed)
show_models(completed)
print ("showing the copied list")
show_models(unprinted)
#Extra tasks
#1
mes=["hi","thank you","you welcome"]
show_models(mes)
#2
def send_message(sending,sent):
while sending:
msg=sending.pop()
print (msg)
sent.append(msg)
mes=["hi","thank you","you welcome"]
sent=[]
send_message(mes,sent)
print ("messages that didnt get to the point")
show_models(mes)
print ("Messages which reached the point")
show_models(sent)
send_message(mes[:],sent)
print ("messages that didnt get to the point_1")
show_models(mes)
print ("Messages which reached the point_1")
show_models(sent)
#transfering custom variables
def make_pizza(*toppings): # * в данном случае создает кортеж в который можно запихнуть скок хош значений
print (toppings)
def made_pizza(*toppings): #теперь наша функция выводит не весь кортеж разом , а все элементы один за другим
for i in toppings:
print (i)
make_pizza('pep')
make_pizza("mushrooms","green peppers","extra cheese")
made_pizza('pep')
made_pizza("mushrooms","green peppers","extra cheese")
#positional arguments with custom sets of aruments (damn that sounds hard)
def mad_pizza(size,*toppings): #если вы заранее знаете , что у вас будет много разной инфы , которую можно запихнуть куда-угодно, то после определения нужных вам параметров, используйте *переменная. Чтобы лучше понять - смотрите код
print (size) #И да, лучше использовать определенное имя переменной *args - вот так, чтобы все точно понимали, что в эту переменную вы будете пихать много значений и не только
for i in toppings:
print ("-",i)
mad_pizza(69,"mushrooms")
mad_pizza(420,"mushrooms","green peppers","extra pepper","onions")
#using custom number of arguments
def build (first,last,**user_info): #** звездочки создают уже не кортеж, а словарь,в который можно добавлять бесконечное кол-во ключ-значений, что может быть очень полезно при написании парсера или прочих других массивов и баз данных
user_info['first_name']=first
user_info['last name']=last
return (user_info)
user_profile=build("albert","einstein",location='princerton',field="physics")
print (user_profile)
#extra tasks
#1
def sandwhich (type_of_bread,mains,type_of_cheese,*extras):
print (f"Oki-doki, your type of bread will be {type_of_bread} and ur main ingridients will be {mains}. Type of cheese is {type_of_cheese} and your extras: ")
for i in extras:
print (i)
print ("\n")
sandwhich("wheat","Cucumbers and meat","Funky one", "onions", "mushrooms")
sandwhich("wheat","Cucumbers and meat","Funky one", "onions")
sandwhich("wheat","Cucumbers and meat","Funky one", "onions","pepper souce", "mushrooms","extra milk")
#2
def build_task (first,last,**user):
user['first_name']=first.title()
user['last name']=last.title()
return (user)
uno=build_task("mark","kronbergs",job="programmer",children="None",games="WOW")
des=build_task("Dan","parsovich",meaning="There is no meaning of my life",love="there is no love",addictive_to="gaming",mental_issues="got them")
print (uno)
print (des)
#3
def automobile(maker,name_of_the_model,**extra_info):
extra_info["maker"]=maker.title()
extra_info["Name of the model"]=name_of_the_model.title()
return (extra_info)
x=automobile("x5","subaru",colour="blue",amount=5,quality="Good")
y=automobile("is","whore",you="your",mam="mama")
print (x)
print (y)
#storing functions in modules
#importing the whole module
#Импортирование ! Для чего же оно нужно и как все работает ? У вас есть функции и вы не хотите засорять код ? Тогда ответ прост ! Создайте какой-нибудь файл.py в том же каталоге(папке), что и ваш основной файл и спокойно пишите imoprt название вашего файла
#Line 4 (imports file)
import ports
ports.make_pizza(16,"pepperoni") #После того, как вы сделали импорт файла , используйте название файла.название функции и пишите код так, будто бы функция сейчас находится в вашем основном коде !
ports.make_pizza(69,420,"few cucumbers")
#imoprt chosen functions
#если не хотите импортировать ВЕСЬ , то можно заюзать метод, который будет описан ниже
from ports import make_pizza #после применения этого порта, вы можете спокойно использовать функции, которые находятся в нем
make_pizza(69,420,"few cucumbers")
make_pizza(20,"calculate","Karamba")
#making a pseudonym for a function
from ports import make_pizza as mp #Из файла ports мы импортируем функции make_pizza под именем mp(название основной функции не меняется)
mp(6,42,"few cucumbers ?")
mp(20,"calculate 1111","Karamba")
#making a pseudonym for the module
import ports as por
por.make_pizza(70,20,"few cucumbers")
por.make_pizza(21,"calculate","Karamba")
#import all the stuff
from ports import * #* в данном случае обозначает импортировать из этого файла ВСЁ. Хоть эта функция довольно-таки крута, но она может повлечь за собой очень негативные последствия (к примеру: в основном коде есть уже функция с похожим названием. Такой импорт повлечет за собой замену функции в основном коде на ту, которая импортировалась)
make_pizza(70000,21212310,"a few cucumbers")
make_pizza(1010201201,"KArak","Karamba")
#Extra tasks
#1
#Line 11 (imports.py)
from ports import *
unprinted=["phone","Xbox","Playstation","PC"]
completed=[]
print_models(unprinted,completed)
show_models(completed)
#2
import ports
from ports import print_models
from ports import print_models as pm
import ports as PR
from ports import *
#CLASSES
#КЛАССЫ
#Классы позволяют делать много разных вещей, к примеру , возьмем МОБов (или НПС) из видеоигр. Рассматривать мы будем Minecraft и житилей деревни. Как известно, в деревне есть жители (считай, это общий класс). У каждого жителя есть свои задачи и назначения, к примеру , есть простой житель, который гуляет по городу (это уже объект (или же функция def )). Получается, есть продавец, есть фермер и тд, и у каждого из них есть своя роль, но так как у многих жителей есть повторяющиеся роли, мы просто берем и юзаем функцию, чтобы назначить жителю роль. В итоге, если все максимально упростить, класс - это деревня (или же список со всеми ролями), а объекты - это роли, которые могут повторяться и которые можно изменять ( если залезть в сам класс)
class dog(): # Определяем новый класс (если создаем новый класс, то в скобках ничего не пишем)
def __init__(self, name, age): # метод __init__ (и прочие другие , но о них потом) уникален. Когда он находятся в классе, то при каждом вызове экземпляра (функции), автоматом выполняется этот экземпляр
#Инициализация атрибутов name и age
self.name=name # self нам , в данном случае, нужен для того, чтобы переменная была доступна во всем классе (в других функциях вы можете заметить, что там мы это уже не прописываем, потому что self дает нам возможность это не делать )
self.age=age
def sit (self): #self - что же это ? во-первых, метод self, всегда должен стоять на первом месте, когда мы пишем функцию (сейчас вы видите это в коде) и self нужен, чтобы питон кидал в него ссылку на экземпляр ака доступ к атрибутам и методам класса
print (f"{self.name} cел ")
def roll_over (self): #Короче говоря, всякий раз , когда будете юзать класс и функцию в нем , не забывайте писать в самой функции ( в списках аргументов) self на первом месте
print (f"{self.age} вот это возраст ! Удивительно, как он может вообще перекатываться")
my_dog=dog('willie',6) #чтобы использовать класс, мы сначала должны приказать питону создать экземпляр с кличкой willie и возрастом 6. Уже во время обработки этой строчки, вызывается метод __init__
print(f"My dog's name is {my_dog.name}")
print (f"My dog is {my_dog.age} years old")
#"talking with atributes"
print (my_dog.name) # Этот тип записи называется "точечная запись" и такая запись поможет нам взять значения атрибутов из класса на последний момент
#using method
my_dog.sit()
my_dog.roll_over()
#making a few specimen
my_dog=dog("Naggets",5)
your_dog=dog("Bangladesh", 11)
print (f"{my_dog.name} can't do a lot as {your_dog.name} ")
print (f"funny, but my dog is {my_dog.age} years old and yours is {your_dog.age} old")
#Extra tasks
#1
class Restaurant():
def __init__ (self,name,cuisine_type):
self.name=name
self.cuisine_type=cuisine_type
def desctibe (self):
print (self.name)
print (self.cuisine_type)
def open_restaurant(self):
print ("Restaurant is open now")
restaurant=Restaurant("Uzbechkina","Uzbekistan food")
print (restaurant.name)
print (restaurant.cuisine_type)
restaurant.desctibe()
restaurant.open_restaurant()
#2
restaurant1=Restaurant("Kalmik","Kazakh's famous food")
restaurant2=Restaurant("Arab tut bil","Arabic food")
restaurant1.desctibe()
restaurant1.open_restaurant()
restaurant2.desctibe()
restaurant2.open_restaurant()
#3
class User:
def __init__(self):
print("\nhello , admin\n")
def first_name(self,name):
self.name=name
def last_name(self,l_name):
self.l_name=l_name
def info (self,*args):
self.args=args
def describe_user(self):
print (f"Hello {self.name} {self.l_name} i know so much about you, for example: ")
for i in self.args:
print(i)
user=User()
user.first_name("Arkovich")
user.last_name("Mordovich")
user.info("Loves gaming","fan of night clubs","Cigarettes addictive")
user.describe_user()
#working with classes and speicmen
class Car():
def __init__(self,make,model,year):
self.make=make
self.model=model
self.year=year
self.odometr= 0 #giving atributes by the default
def desryption (self):
long_name=f"{self.year} {self.make} {self.model}"
return long_name.title()
def read_metr(self):
print (f"This car has {self.odometr} on it")
def update_odom(self,mile):
if mile >self.odometr:
self.odometr=mile
else:
print ("You can't roll back the odometr")
def increasement(self,miles):
self.odometr+=miles
my_car=Car('audi','a4',2019)
print (my_car.desryption())
my_car.read_metr()
#changing atributes in class
#1 method
my_car.odometr=23
my_car.read_metr()
#2 method
my_car.update_odom(20)
my_car.read_metr()
my_car.update_odom(-1)
my_car.read_metr()
#3 method
my_used=Car('subaru','outbreak',2015)
print (my_used.desryption())
my_used.update_odom(23_500)
my_used.read_metr()
my_used.increasement(1000)
my_used.read_metr()
#extra Tasks
#1
class Restaurant_1():
def __init__ (self,name,cuisine_type):
self.name=name
self.cuisine_type=cuisine_type
self.number_served=0
def desctibe (self):
print (self.name)
print (self.cuisine_type)
print(self.number_served, "people served")
def open_restaurant(self):
print ("Restaurant is open now")
def update_people(self,num):
if num>0:
self.number_served+=num
else:
print ("It is illegal !")
def set_peope(self,num):
self.number_served=num
rest=Restaurant_1("ching-chong","chinese food")
rest.set_peope(4)
rest.desctibe()
rest.update_people(-1)
rest.update_people(10)
rest.desctibe()
#2
class User_1:
def __init__(self,num):
self.num=num
print("\nhello , admin\n")
def first_name(self,name):
self.name=name
def last_name(self,l_name):
self.l_name=l_name
def info (self,*args):
self.args=args
def describe_user(self):
print (f"Hello {self.name} {self.l_name} i know so much about you, for example: ")
for i in self.args:
print(i)
print ("Number of your attempts is", self.num)
def login_increase(self,num):
if self.num>num:
print ("this is forbidden")
else:
self.num=num
us=User_1(0)
us.first_name("Leo")
us.last_name("Caprio")
us.info("Loves football","filming is his passion")
us.describe_user()
us.login_increase(1)
us.describe_user()
#inheritance
#Method __init__ for the class inheritance
class Car():
def __init__(self,make,model,year):
self.make=make
self.model=model
self.year=year
self.odometr= 0 #giving atributes by the default
def desryption (self):
long_name=f"{self.year} {self.make} {self.model}"
return long_name.title()
def read_metr(self):
print (f"This car has {self.odometr} on it")
def update_odom(self,mile):
if mile >self.odometr:
self.odometr=mile
else:
print ("You can't roll back the odometr")
def increasement(self,miles):
self.odometr+=miles
def fill_gas_tank(self,nub):
self.odometr+=nub
class Electric_car(Car):
#сейчас мы создаем потомка основного класса. Потомок является классом-потомком родителя, что значит, что он обладает теми же свойстами, что и родитель, только у него есть дополнения. К примеру, возьмем машину (класс - родитель) и электромашину (класс-потомок) и дизельную машину (еще один потомок). У всех этим машин есть что-то общее - 4 колеса , стекла , руль и тд, но чем-то они отличаются и именно для этого и создаются потомки, чтобы не было лишней путаницы и проблем в классе родителе. Так, в потомке электромашин мы указали топливо - электричество , а в другом потомке - топило дизель
def __init__(self,make,model,year):
super().__init__(make,model,year)# супер позволяте вызвать метод род класса. Она приказывает метод init класса Car
self.battery=75
def describe_bat(self):
print (f"This car's got {self.battery} KWH left")
def fill_gas_tank(self,nub):
print ("This type of car doesn't need it !")
my_tesla=Electric_car('tesla','models s','2019')
print (my_tesla.desryption())
my_tesla.fill_gas_tank(5)
#giving atributes and methods of parent class
my_tesla.describe_bat()
#changing methods of the parent class
# def fill_gas_tank(self): Этот метод добавляется в класс Elcetrocar и нужен он для того, чтобы не было таких проблем , как !. Предположим кто-то решил вызвать метод заправки топливом для машины (в классе Car) и тут все нормально и проблем нет, но если кто-то возьмет класс электрокар и попробует заполнить его бак, то это может вызвать конфуз для чего мы прописали метод ( С ТЕМ ЖЕ ИМЕНЕМ, что и в родителе ) в классе-потомке Электромобиль
# print ("This type of car doesn't need it !")
#example as an atribute
class Car():
def __init__(self,make,model,year):
self.make=make
self.model=model
self.year=year
self.odometr= 0 #giving atributes by the default
def desryption (self):
long_name=f"{self.year} {self.make} {self.model}"
return long_name.title()
def read_metr(self):
print (f"This car has {self.odometr} on it")
def update_odom(self,mile):
if mile >self.odometr:
self.odometr=mile
else:
print ("You can't roll back the odometr")
def increasement(self,miles):
self.odometr+=miles
def fill_gas_tank(self,nub):
self.odometr+=nub
class Battery():
def __init__(self,battery=75):
self.battery=battery
def describe_battery(self):
print ("That's how much juice left - ",self.battery)
def get_range(self):
if self.battery==75:
range=260
elif self.battery==100:
range=315
print (f"This car can go this far with this fuel {range}")
def upgrade_Battery(self):
if self.battery!=100:
self.battery=100
class Electric_car(Car):
def __init__(self,make,model,year):
super().__init__(make,model,year)
self.battery=Battery()#Здесь изменили
def fill_gas_tank(self,nub):
print ("This type of car doesn't need it !")
my_tesla=Electric_car('tesla','models s','2019')
my_tesla.battery.describe_battery() #Что же тут происходит ? Тут можно элементарно запутаться, поэтому будем двигаться аккуратно . Сначала мы сделали переменную my_tesla с фишечками род класса ( и его подкласса), после этого мы взяли атрибут battery (типа self.batter это как наша переменная. Вызов похож, как если бы мы создавали переменную my_tesla) и взяв этот атрибут мы сказали, какую функцию мы хотим выполнить и всё !
my_tesla.battery.get_range()
#Extra tasks
#1
class Restaurant():
def __init__ (self,name,cuisine_type):
self.name=name
self.cuisine_type=cuisine_type
self.flavours=["Vanilla", "mango", "raspberry", "chocolate"]
def ice_cream_stand (self):
print ("\nKinds of ice-cream: ")
for i in self.flavours:
print (i)
def desctibe (self):
print (self.name)
print (self.cuisine_type)
def open_restaurant(self):
print ("Restaurant is open now")
restaurant=Restaurant("Uzbechkina","Uzbekistan food")
print (restaurant.name)
print (restaurant.cuisine_type)
restaurant.desctibe()
restaurant.ice_cream_stand()
restaurant.open_restaurant()
#2
class User:
def __init__(self):
print("\nhello , admin\n")
def first_name(self,name=None):
self.name=name
return (self.name)
def last_name(self,l_name=None):
self.l_name=l_name
return l_name
def info (self,*args):
self.args=args
def describe_user(self):
print (f"Hello {self.name} {self.l_name} i know so much about you, for example: ")
for i in self.args:
print(i)
class Privileges():
def __init__(self,privileges="Can add users, can deleate users, can ban userss" ):
self.privileges=privileges
def show_privileges (self):
print (self.privileges)
class Admin(User):
def __init__(self):
self.admin=Privileges()
# def privileges (self, add="Can add users",forbidden="can deleate users" ,ban="can ban users"):
# self.forbidden=forbidden
# self.add=add
# self.ban=ban
# def show_privileges (self):
# first_name=super().first_name("Kruger")
# last_name=super().last_name("shonz")
# print ("Hello", first_name,last_name)
# print (self.add, self.forbidden, self.ban)
user=User()
user.first_name("Arkovich")
user.last_name("Mordovich")
user.info("Loves gaming","fan of night clubs","Cigarettes addictive")
user.describe_user()
# admin=Admin()
# admin.first_name("Genry")
# admin.last_name("Kubets")
# admin.privileges()
# admin.show_privileges()
admin=Admin()
admin.admin.show_privileges()
#3
my_tesla=Electric_car('tesla','models s','2019')
my_tesla.battery.describe_battery()
my_tesla.battery.get_range()
my_tesla.battery.upgrade_Battery()
my_tesla.battery.describe_battery()
my_tesla.battery.get_range()
#Importing one class
#24(imports)
from ports import Car
my_new_car=Car("Audi","A4",2018)
print (my_new_car.desryption())
my_new_car.odometr=23
my_new_car.read_metr()
#storing few classes in one module
from ports import Electric_car
tesla=Electric_car("tesla","a4",2019)
tesla.battery.describe_battery()
tesla.battery.get_range()
tesla.battery.upgrade_Battery()
tesla.battery.describe_battery()
tesla.battery.get_range()
#importing few classes from the module
from ports import Car, Electric_car
beetle=Car("Kaz","froggger",2020)
print (beetle.desryption())
bee=Electric_car("Tesla","a4",2222)
print (bee.desryption())
#importing the whole module
import ports
my_bee=ports.Car("Adui","202",1987)
print (my_bee.desryption())
bib=ports.Electric_car("Tesla","truck",1999)
print (bib.desryption())
#importing the whole module
from ports import * #не рекомендуется использовать этот метод, так как могут быть вызваны конфликты с именами в файле
#importing another way
from ports import Car
from portable import Electric_car
car=Car("Audi","A28",2090)
print (car.desryption())
t_car=Electric_car("Dudi",20,"kariz")
print (t_car.desryption())
#using pseudonyms
from ports import Electric_car as EC
my_tesla=EC("Twka","AE4",2020)
print (my_tesla.desryption())
#Extra tasks
#1
#78 (imoprts)
from ports import Restaurant
rest=Restaurant("kurkih","Asian food")
rest.open_restaurant()
#2
#99 (imports)
from ports import *
admin=Admin()
admin.admin.show_privileges()
#Standard
from random import randint
x=randint(1,6)
print (x)
from random import choice
players=["Alex","Miranda","Josh","Loie","Loice","Andrew"]
fir=choice(players)
print (fir)
#extra tasks
class Die():
def __init__(self):
self.sides=6
def roll_die(self,nub=None):
if nub==None:
nub=self.sides
from random import randint
x=randint(1,nub)
print (x)
for i in range(10):
rub=Die()
rub.roll_die()
#2
kort=[1,2,3,4,"a",5,"b",6,7,"d","k","r","F",10,100]
jez=[]
from random import choice
for i in range (4):
x=choice(kort)
jez.append(x)
for i in jez:
print (i)
#3
nub=0
win=[4,6,2,1]
jez=[]
while jez!=win:
for i in range (4):
x=choice(kort)
jez.append(x)
nub+=1
print (nub-1)
#FILES AND EXCEPTIONS
#reading from the file
with open('text.txt') as file_object: #Сегодня мы будем рассматривать открытие файлов. Начнем с распаковки простого txt файла, как file_object (или другая иная переменная)
contents=file_object.read() #в переменную контентс мы пихаем все, что было в файле (с помощью метода переменная.read())
print (contents) #После выполнения всех действий с файлом, его надо закрыть, но это не требуется, так как мы прописываем with, что как бы автоматом закрывает файл после всех действий с ним
print (contents.rstrip())# r.strip удаляет все лишние пустые строки в конце
#reading line by line
filename="text.txt"
with open (filename) as file_object:
for line in file_object:
print (line.rstrip())
#making a list of lines from the the file
filename="text.txt"
with open (filename) as file_object:
lines=file_object.readlines() #readlines читает каждую строку (НЕ ПУТАТЬ С readline)
for line in lines:
print (line.rstrip())
#Working with the context of file
filename="text.txt"
with open (filename) as file_object:
lines=file_object.readlines() #readlines читает каждую строку (НЕ ПУТАТЬ С readline)
pi_str=" "
for line in lines:
pi_str+=line.strip()
print (pi_str)
print (len(pi_str))
#Big files: million numbers
filename="text.txt"
with open (filename) as file_object:
lines=file_object.readlines()
pi_str=" "
for line in lines:
pi_str+=line.strip()
print (f"{pi_str[:52]}")
print (len(pi_str))
#Checking Birthday
filename="text.txt"
with open (filename) as file_object:
lines=file_object.readlines()
pi_str=" "
for line in lines:
pi_str+=line.strip()
birthday=input("Enter your birthday date in the form month day year (without spaces) : ")
if birthday in pi_str:
print ("Your birthday is in this number")
else:
print ("your birthday doesn't appear in the first million of pi numbers")
#Extra tasks
#1
with open ("text.txt") as fil:
for line in fil:
print (line)
with open ("text.txt") as fil:
print ("sec method ")
x=fil.read()
print (x)
with open ("text.txt") as fil:
print ("third method")
lines=fil.readlines()
print (lines)
#2
message="I like dog"
message=message.replace('dog',"cat")
print (message)
with open ("text.txt") as fil:
for line in fil:
if "python" in line:
line=line.replace('python','C')
print (line)
#write in the empty file
#ВИДЫ ЗАПИСИ В ФАЙЛ -> 'r' - открыть файл в режиме чтения . 'w' - режим записи . 'a' - присоединение . 'r+' - чтение и запись в файл
filen="program.txt"
with open (filen, 'w') as fil:
fil.write("I love programming")
#many lines writing
filen="program.txt"
with open (filen, 'w') as fil:
fil.write("I love programming\n")
fil.write('And here is the new line\n') #ADDED THIS LINE
#adding data to the file
with open (filen,'a') as fil:
fil.write('I know that sounds strange,but still ')
fil.write('\nOh, hi mark')
#Extra tasks
#1
x=str(input("Hello there, tell me ur name ! :"))
with open ('text.txt', 'a') as fil:
fil.write(x)
#2
flag=True
while flag:
x=str(input("Hello, please tell me ur name : "))
if x!='':
with open ('program.txt','a') as fil:
x+="\n"
fil.write(x)
else :
flag=False
#Exception Zero Division error
print (5/0)
#Using try-except
try:
print (5/0)
except ZeroDivisionError:
print ("You cant divide by zero")
#using Exceptions to reduce the amount of the emergency situations
print ("give me to numbers and i will divide them\n")
print ("to finish, press enter without entering any text\n")
while True:
first=input("Enter your first number: ")
if first=="":
print ("bye")
break
second=input("Enter your second number: ")
if second=="":
print ("bye")
break
try:
answ=int(first)/int(second)
except ZeroDivisionError:
print ("You can't divide by zero")
else:
print (answ)
#Analysing and excepting FileNotFound
filename="alice.txt"
try:
with open (filename, encoding='utf-8') as f:
content=f.read()
except FileNotFoundError:
print (f"Sorry, but there is no {filename} in this directory")
#Analysing the whole text
filename="alice.txt"
try:
with open (filename, encoding='utf-8') as f:
content=f.read()
except FileNotFoundError:
print (f"Sorry, but there is no {filename} in this directory")
else:
words=content.split()
num=len(words)
print (f"The file {filename} includes {num} number of words")
#Working with a few files
def count(filename):
try:
with open (filename, encoding='utf-8') as f:
content=f.read()
except FileNotFoundError:
print (f"Sorry, but there is no {filename} in this directory")
else:
words=content.split()
num=len(words)
print (f"The file {filename} includes {num} number of words")
filename="alice.txt"
count(filename)
#Errors without notifications to the user
def count_1(filename):
try:
with open (filename, encoding='utf-8') as f:
content=f.read()
except FileNotFoundError:
pass
else:
words=content.split()
num=len(words)
print (f"The file {filename} includes {num} number of words")
filename="alice.txt"
filen="Mob.txt"
count_1(filename)
count_1(filen)
#Extra Tasks
#1
fir=input("The first number: ")
sec=input("The second number: ")
try:
x=(int(fir)) + (int(sec))
except ValueError:
print ("You have words, but no numbers. Dont do it next time !")
else:
print (x)
#2
while True:
fir=input("The first number: ")
sec=input("The second number: ")
try:
x=(int(fir)) + (int(sec))
except ValueError:
print ("You have words, but no numbers. Dont do it next time !")
else:
print (x)
x=input("If you want to stop, just click enter ")
if x=="":
break
#3
try:
with open('alice.txt', encoding='utf-8') as g:
g=g.read()
with open ('text.txt', encoding='utf-8') as f:
f=f.read()
except FileNotFoundError:
print ("some of the files is missing")
else:
print (f)
print (g)
#4
try:
with open('alice.txt', encoding='utf-8') as g:
g=g.read()
with open ('text.txt', encoding='utf-8') as f:
f=f.read()
except FileNotFoundError:
pass
else:
print (f)
print (g)
#5
line='Row, row, rOw, rrr, ROW'
x=line.lower().count('row')
print (x)
#Saving data by using json
#json.dump() - Получает два аргумента - сохраняемые данные и объект файла
#json.load() - читает список обратно в память
import json
numbers=[2,3,5,7,11,13]
filename='numbers.json'
with open (filename, 'w') as f:
json.dump(numbers,f)
with open (filename) as f:
numbers=json.load(f)
print (numbers)
#saving and reading data made by user
username=input("Please input your name : ")
filename='numbers.json'
with open (filename, 'w') as f:
json.dump(username, f)
print (f'We will remember u, {username}')
"""Сейчас мы откроем файл и юзанем инфу из него """
filename='numbers.json'
with open (filename) as f:
username=json.load(f)
print (f'Welcome back, {username} !!!')
filename="numbers.json"
try:
with open (filename) as f:
username=json.load(f)
except FileNotFoundError:
username=input("Enter your name: ")
with open (filename, 'w') as f:
json.dump(username,f)
print ('We will remember u')
else:
print (f"welcome back {username}")
# Refactoring
import json
def get_stored():
filename="numbers.json"
try:
with open (filename) as f:
username=json.load(f)
except FileNotFoundError:
return None
else:
return username
def new_user():
username=input("Please input your name : ")
filename='numbers.json'
with open (filename, 'w') as f:
json.dump(username, f)
return username
def greet_user():
username=get_stored()
if username:
print (f'Love and greet , da supa {username}')
else:
username=new_user()
print (f'We will remember u, {username}')
greet_user()
#Extra tasks
#1
import json
filename='numbers.json'
x=int(input("Введите ваше любимое число: "))
with open (filename, "w") as f:
json.dump(x,f)
with open (filename) as f:
x=json.load(f)
print (f"Я знаю ваше любимое число, это - {x}")
#2
try:
with open (filename) as f:
x=json.load(f)
print (f"Я знаю ваше любимое число, это - {x}")
except FileNotFoundError:
filename='numbers.json'
x=int(input("Введите ваше любимое число: "))
with open (filename, "w") as f:
json.dump(x,f)
#Test scenario
import unittest
def get_format(first,last):
full=f'{first} {last}'
return full.title()
class NamesTest(unittest.TestCase):
def test_first(self):
formated=get_format('janis','griffin')
self.assertEqual(formated, 'Janis Griffin')
if __name__=='__main__':
unittest.main()
#Test scenario
import unittest
def get_format(first,middle,last=''):
if last:
full=f'{first} {middle} {last}'
else:
full=f'{first} {middle}'
return full.title()
class NamesTest(unittest.TestCase):
def test_first(self):
formated=get_format('janis','griffin')
self.assertEqual(formated, 'Janis Griffin')
def test_sec(self):
formate=get_format('wolfgang','mozart','amadeus')
self.assertEqual(formate,'Wolfgang Mozart Amadeus')
if __name__=='__main__':
unittest.main()
#Extra tasks
#1
import unittest
def country(city,capital):
fil=f"{city} {capital}"
return fil.title()
class NamesTest(unittest.TestCase):
def test_first(self):
fr=country('moscow','russia')
self.assertEqual(fr,"Moscow Russia")
if __name__=='__main__':
unittest.main()
#2
import unittest
def country(city,capital, population=None):
if population:
fil=f'{city} {capital} {population}'
else:
fil=f"{city} {capital}"
return fil.title()
class NamesTest(unittest.TestCase):
def test_first(self):
fr=country('moscow','russia')
self.assertEqual(fr,"Moscow Russia")
def test_two(self):
fr=country('new-York',"usa",'50000000')
self.assertEqual(fr,'New-York Usa 50000000')
if __name__=='__main__':
unittest.main()
# assertEqual(a,b) -> a==b
# assertNotEqual (a,b) -> a!=b
# assertTrue(x) -> x==True
# assertFalse(x) -> x==False
# assertIn(element , list) -> проверка , что элемент в списке
# assertNotIn(element , list) -> проверка , что элемент НЕ в списке
#class for testing
import unittest
class Anonym():
def __init__(self,question):
self.question=question
self.responses=[]
def show_question(self):
print (self.question)
def store_resp(self, new_resp):
self.responses.append(new_resp)
def show_res(self):
print ("Survey total : ")
for i in self.responses:
print (f'- {i}')
question="What is your first language ?"
my_survey=Anonym(question)
my_survey.show_question()
print ("\t\t\t\t\tPress enter in an empty field to stop the program\n")
while True:
x=input("So, what you think ? : ")
if x=='':
break
my_survey.store_resp(x)
my_survey.show_res()
class TestAnon(unittest.TestCase):
def test_store_inf(self):
question="What is your first language ?"
my_survey=Anonym(question)
my_survey.show_question()
my_survey.store_resp("English")
self.assertIn('English', my_survey.responses)
def test_store_three(self):
question="What is your first language ?"
my_survey=Anonym(question)
my_survey.show_question()
reps=["English","French","Spanish"]
for i in reps:
my_survey.store_resp(i)
for res in reps:
self.assertIn(res, my_survey.responses)
if __name__=='__main__':
unittest.main()
#class for testing
import unittest
class Anonym():
def __init__(self,question):
self.question=question
self.responses=[]
def show_question(self):
print (self.question)
def store_resp(self, new_resp):
self.responses.append(new_resp)
def show_res(self):
print ("Survey total : ")
for i in self.responses:
print (f'- {i}')
question="What is your first language ?"
my_survey=Anonym(question)
my_survey.show_question()
print ("\t\t\t\t\tPress enter in an empty field to stop the program\n")
while True:
x=input("So, what you think ? : ")
if x=='':
break
my_survey.store_resp(x)
my_survey.show_res()
class TestAnon(unittest.TestCase):
def test_store_inf(self):
question="What is your first language ?"
my_survey=Anonym(question)
my_survey.show_question()
my_survey.store_resp("English")
self.assertIn('English', my_survey.responses)
def test_store_three(self):
question="What is your first language ?"
my_survey=Anonym(question)
my_survey.show_question()
reps=["English","French","Spanish"]
for i in reps:
my_survey.store_resp(i)
for res in reps:
self.assertIn(res, my_survey.responses)
if __name__=='__main__':
unittest.main()
#method SetUp()
class TestAnonist(unittest.TestCase):
def setUp(self):
question="What is your first language ?"
self.my_survey=Anonym(question)
self.responses=['English',"French","Spanish"]
def test_store_inf(self):
self.my_survey.store_resp(self.responses[0])
self.assertIn(self.responses[0], self.my_survey.responses)
def test_store_three(self):
for response in self.responses:
self.my_survey.store_resp(response)
for response in self.responses:
self.assertIn(response, self.my_survey.responses)
if __name__=='__main__':
unittest.main()
#Extra Task
import unittest
class Employee():
def __init__(self, name, lastname, salary):
self.name=name
self.lastname=lastname
self.salary=salary
def give_raise(self, high=5000):
self.salary+=high
print (f"Now {self.name} {self.lastname} gets {self.salary} dollars per year ! ")
class TestEmp(unittest.TestCase):
def setUp(self):
self.emp=Employee("Richard","Gauss",50_000)
self.REE=55_000
self.rip=75_000
def test_give_raise_default(self):
self.emp.give_raise()
self.assertEqual(self.REE, self.emp.salary)
def test_custom (self):
self.emp.give_raise(25_000)
self.assertEqual(self.rip, self.emp.salary)
if __name__=='__main__':
unittest.main()
THE END OF THE PART 1
|
from __future__ import absolute_import, print_function
import inspect
import os
import shutil
from bs import config
from bs import objectives
class Action(object):
def __init__(self, name, description):
self.name = name
self.description = description
def add_arguments(self, parser):
pass
def invoke(self, args):
raise NotImplementedError
class Demo(Action):
def __init__(self):
Action.__init__(self,
'demo',
'Copy the demos into the current directory')
def invoke(self, args):
demo_root = os.path.join(os.path.dirname(__file__), 'demos')
# shutil.copytree(demo_root, '.')
for base, dirs, files in os.walk(demo_root):
for ff in files:
src = os.path.join(base, ff)
dest = os.path.join(base.replace(demo_root, '.'), ff)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
print(' copy {} -> {}'.format(src, dest))
shutil.copy(src, dest)
class Config(Action):
def __init__(self):
Action.__init__(self,
'config',
'Edit or intiialize the local configration')
def add_arguments(self, parser):
parser.add_argument('--list', '-l',
help='list the current configuration and exit',
action='store_true')
config.add_command_line_args(parser)
def invoke(self, args):
if args.list:
config.print_config()
exit(0)
config.save()
class AddObjective(Action):
def __init__(self):
Action.__init__(self,
'add',
'add an objective, really just adds some scaffolding; '
'you will probably need to update the objectives file')
self.objectives = {m[0].lower():m[1] for m in inspect.getmembers(objectives, inspect.isclass)
if not m[0].startswith('_')}
def add_arguments(self, parser):
parser.add_argument('objective_type',
help='type of the objective',
type=str,
choices=list(self.objectives.keys()))
parser.add_argument('name',
help='name of the objective',
type=str)
parser.add_argument('sources',
nargs='+',
type=str,
help='sources for the objective')
def invoke(self, args):
o = self.objectives[args.objective_type](args.name, args.sources)
objectives.save()
class Build(Action):
def __init__(self):
Action.__init__(self,
'build',
'build whatever build-system knows about')
def add_arguments(self, parser):
parser.add_argument('--flatten', '-F',
help='Also list flattened objectives (ignores the absence of --all)',
action='store_true')
parser.add_argument('--list', '-l',
help='list known objectives, excluding object files, and exit',
action='store_true')
parser.add_argument('--graph', '-g',
help='graph known objectives, excluding object files, and exit',
action='store_true')
parser.add_argument('--all', '-a',
help='modifies the behavior of list and graph to include all objectives',
action='store_true')
parser.add_argument('--debug', '-d',
help='use the debug compiler (and configuration)',
action='store_true')
def invoke(self, args):
# import here to prevent recursive import error
from bs import compilers_and_linkers
compilers_and_linkers.LIST = args.list
compilers_and_linkers.GRAPH = args.graph
compilers_and_linkers.LIST_ALL = args.all
compilers_and_linkers.FLATTEN = args.flatten
exec(compile(open(objectives.OBJECTIVES_FILE).read(), objectives.OBJECTIVES_FILE, 'exec'))
class Clean(Action):
def __init__(self):
Action.__init__(self,
'clean',
'clean all generated files')
def add_arguments(self, parser):
pass
def invoke(self, args):
# import here to prevent recursive import error
from bs import compilers_and_linkers
compilers_and_linkers.CLEAN = True
exec(compile(open(objectives.OBJECTIVES_FILE).read(), objectives.OBJECTIVES_FILE, 'exec'))
|
from .run_command import run_command
def make_volume_dict():
volume_dict = {}
for line in run_command("sudo blkid").stdout.strip("\n").split(sep="\n"):
line = line.split()
volume_name = line[0][:-1]
volume_dict[volume_name] = {}
for field_value in line[1:]:
field, value = field_value.replace('"', "").split(sep="=")
volume_dict[volume_name][field] = value
return volume_dict
|
import requests
import logging
import json
import dateutil.parser
import datetime
SOLR = "http://127.0.0.1:8983/solr/lgbt/select?"
def get_stats():
res = requests.get(SOLR + "wt=json&rows=0&q=*&stats=true&stats.field=date")
if res.status_code == 200:
return res.json()["stats"]["stats_fields"]["date"]
else:
logging.error("Bad Response: {0}".format(res.status_code))
def get_year_count(year):
res = requests.get(SOLR + "wt=json&rows=0&q=year:{0}".format(year))
if res.status_code == 200:
return res.json()["response"]["numFound"]
else:
logging.error("Bad Response: {0}".format(res.status_code))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
stats = get_stats()
start = dateutil.parser.parse(stats["min"]).year
end = dateutil.parser.parse(stats["max"]).year
year_counts = []
for i in range(start, end + 1):
year_counts.append([i, get_year_count(i)])
print json.dumps(year_counts)
|
# coding: utf-8
from workspacemanager.utils import *
import sh
def dispFreeze(theProjectDirectory=None):
# Get all dirs:
(thisLibPackageDirectory,
theProjectDirectory,
theProjectPackageDirectory,
thisLibName) = getDirs(theProjectDirectory=theProjectDirectory)
venvName = theProjectPackageDirectory.split('/')[-1] + "-venv"
# Work on:
print("pip freeze for " + venvName)
print(sh.pew("in", venvName, "pip", "freeze"))
if __name__ == '__main__':
dispFreeze()
|
import imageio
import metaimageio
import numpy as np
import pytest
from test_io import file_with_suffix, SUFFIX
@pytest.mark.parametrize('suffix', SUFFIX)
def test_imageio(suffix):
metaimageio.imageio()
with file_with_suffix(suffix) as f:
a = (100 * np.random.random_sample((2, 3, 4)))
imageio.imwrite(f, a, format='MetaImageIO')
b = imageio.imread(f, format='MetaImageIO')
np.testing.assert_almost_equal(b, a)
|
import numpy as np
import pandas as pd
from pandas import DataFrame
from typing import Tuple
from plotnine import *
import math
def fit_linear_regression(mat_x: np.array, res_vec: np.array) -> Tuple[np.array, np.array]:
"""
Linear Regression solver
Parameters:
:param mat_x: The design matrix X (np array)
:param res_vec: Response Vector y (np array)
Returns: Tuple of the coefficient vector and the singular values of X.
"""
ones_vec = np.ones(mat_x.shape[1]) # vectors of ones
mat_x = np.vstack((ones_vec, mat_x)) # adding ones to the matrix
mat_x_t = mat_x.transpose() # transposing after adding one
return np.linalg.pinv(mat_x_t) @ res_vec, np.linalg.svd(mat_x_t, compute_uv=False)
def predict(x: np.array, coef_v: np.array) -> np.array:
"""
Prediction function
:param x: Design matrix.
:param coef_v: Coefficient vector.
:return: The prediction of the result vector.
"""
return np.dot(x.transpose(), coef_v)
def mse(res_vec: np.array, prediction_vec: np.array) -> float:
"""
Mean Square Error function.
:param res_vec: Response vector.
:param prediction_vec: Prediction vector.
:return: The error.
"""
return (1/float(res_vec.size)) * (np.linalg.norm(prediction_vec - res_vec)**2)
def load_data(path: str) -> np.array:
"""
Loads the data into a matrix (np array).
:param path: The path to the csv of the data.
:return: Data design matrix.
"""
try:
data = pd.read_csv(path)
except FileNotFoundError:
print("FAILED TO FIND THE DATA LOCATION!")
return
except Exception:
print("AN ERROR OCCURRED WHILE LOADING THE DATA!")
return
# filt1 = data['bedrooms'] < 33
filt_non_positive = (data['price'] > 0) & (data['sqft_lot15'] > 0) & (data['sqft_living'] > 0) & \
(data['floors'] > 0)
filt_condition = (data['condition'] <= 5) & (data['condition'] >= 1)
filt_year_built = data['yr_built'] <= 2015
filt_date = data['date'].notnull()
filt_id = data['id'].notnull()
data = data.loc[filt_non_positive & filt_condition & filt_year_built & filt_date & filt_id] # apply filters
data = data.drop_duplicates() # drop duplicates
data = categorical_features(data) # address categorical features
data = data.drop(['id', 'date', 'zipcode'], axis=1) # drop the categorical columns and the id
# data.to_csv("./lol.csv") # save csv for myself
return data
def categorical_features(data: np.array) -> np.array:
"""
Addressing the categorical features with one hot encoding solution.
:param data: The data in a form of an np array.
:return: The processed data.
"""
# addressing zip code (One hot encoding)
zips = data['zipcode']
data = pd.concat([data, pd.get_dummies(zips)], axis=1)
# addressing dates (Cleaning + One hot encoding)
dates = data['date']
dates = pd.concat([dates.str.slice(0, 4), dates.str.slice(4, 6), dates.str.slice(6, 8)], axis=1)
dates.columns = ['year', 'month', 'day'] # renaming the columns for easier access
year, month, day = dates['year'], dates['month'], dates['day']
data = pd.concat([data, pd.get_dummies(year)], axis=1)
data = pd.concat([data, pd.get_dummies(month)], axis=1)
data = pd.concat([data, pd.get_dummies(day)], axis=1)
return data
def plot_singular_values(singular_values: iter):
"""
Given some singular values plots the scree plot.
:param singular_values: Singular values collection
:return: ggplot.
"""
y = singular_values
y.sort()
y = y[::-1]
x = [index for index in range(1, len(singular_values) + 1)]
df = DataFrame({'x': x, 'y': y})
return ggplot(df, aes(x='x', y='y')) + geom_point(size=1) + geom_line() + \
ggtitle("Scree plot of the singular values") + \
labs(y="Singular value", x="Component Number")
def question_15(data):
"""
loading the data and plotting the singular values
:return: plot of the singular values (scree plot)
"""
data = data.drop(['price'], axis=1) # drop price
data_np = data.transpose()
ones_vec = np.ones(data_np.shape[1]) # vectors of ones
mat_x = np.vstack((ones_vec, data_np)) # adding ones to the matrix
mat_x_t = mat_x.transpose() # transposing after adding one
singulars = np.linalg.svd(mat_x_t, compute_uv=False)
return plot_singular_values(singulars)
def split_data_train_and_test(data):
"""
Splits the data into train and test-sets randomly, such that the size
of the test set is 1/4 of the total data, and 3/4 of the data as training data.
:param data: Not splitted data.
:return: Splitted data.
"""
total_data = len(data)
np.random.seed(7)
msk = np.random.rand(total_data) < 0.75
train = data[msk]
test = data[~msk]
return train, test
def question_16(data):
training_data, testing_data = split_data_train_and_test(data)
real_price_vec = testing_data['price']
testing_data = testing_data.drop(['price'], axis=1)
testing_data = testing_data.transpose()
ones_vec = np.ones(testing_data.shape[1]) # vectors of ones
testing_data = np.vstack((ones_vec, testing_data)) # adding ones to the matrix
price_vector = training_data['price']
training_data = training_data.drop(['price'], axis=1)
mses = []
for i in range(1, 101):
train_number = i / 100
rows = math.floor(train_number*len(training_data))
mat_x = training_data[:math.floor(train_number*len(training_data))]
mat_x = mat_x.transpose()
w, singulars = fit_linear_regression(mat_x, price_vector[:rows])
pred = predict(testing_data, w)
mses.append(mse(real_price_vec, pred))
return mses
def plot_results(res):
"""
plots the MSE over the test set as a function of p%
:param res: results.
:return: plot
"""
x = [index for index in range(1, 101)]
df = DataFrame({'x': x, 'y': res})
return ggplot(df, aes(x='x', y='y')) + geom_point(size=1) + geom_line() + \
ggtitle("MSE over the test set as a function of p%") + \
labs(y="MSE", x="p% (precent of the data trained)")
def plot_scatter_features_values(vector_1, res_v, name):
"""
plots the non categorical features to the screen.
:param vector_1: the vector of the feature.
:param res_v: the price vector.
:param name: the name of the feature.
:return: a plot.
"""
cov_mat = np.cov(vector_1, res_v, ddof=1)
sigma1 = np.std(vector_1, ddof=1)
sigma2 = np.std(res_v, ddof=1)
pearson_correlation = (cov_mat[1][0]) / (sigma1 * sigma2)
df = DataFrame({'x': res_v, 'y': vector_1})
return ggplot(df, aes(x='x', y='y')) + geom_point(size=1)+ theme_bw() + geom_line() + \
ggtitle("Non-categorical feature ("+name+") vs the price\n The Pearson correlation is " +
str(pearson_correlation) + "\n") + \
labs(y=name+" feature", x="Response vector (=price)")
def feature_evaluation(mat_x: DataFrame, res_v: np.array):
"""
feature ecaluation, creates plots for each feature vs the price.
:param mat_x: the matrix of features
:param res_v: the prices vector
"""
relevant_columns = mat_x.iloc[:, :17]
for col in range(17):
vector_1 = relevant_columns.iloc[:, col]
print(plot_scatter_features_values(vector_1, res_v, vector_1.name))
if __name__ == "__main__":
PATH_TO_CSV = "kc_house_data.csv"
data = load_data(PATH_TO_CSV)
print(question_15(data)) # Question 15
res = question_16(data)
print(plot_results(res))
res_v = data['price']
mat_x = data.drop(['price'], axis=1) # drop prices
feature_evaluation(mat_x, res_v)
|
from urllib3 import disable_warnings
disable_warnings()
|
import os
import pickle
from jina.drivers.control import RouteDriver
from jina.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_dump_driver(tmpdir):
rd = RouteDriver(raise_no_dealer=True)
rd.idle_dealer_ids = ('hello', 'there')
with open(str(tmpdir / 'a.bin'), 'wb') as fp:
pickle.dump(rd, fp)
with open(str(tmpdir / 'a.bin'), 'rb') as fp:
p = pickle.load(fp)
# init args & kwargs values should be save
assert p.raise_no_dealer
# other stateful values should be reset to init()'s time
assert not p.idle_dealer_ids
def test_dump_excutor_with_drivers(tmpdir):
a = BaseExecutor.load_config(f'{cur_dir}/yaml/route.yml')
a.touch()
a._drivers['ControlRequest'][0].idle_dealer_ids = ('hello', 'there')
a.save(str(tmpdir / 'a.bin'))
print(a._drivers)
b = BaseExecutor.load(str(tmpdir / 'a.bin'))
print(b._drivers)
assert id(b._drivers['ControlRequest'][0]) != id(a._drivers['ControlRequest'][0])
assert not b._drivers['ControlRequest'][0].idle_dealer_ids
|
#!/usr/bin/python3
import logging
import unittest
import astropy.units as u
import simtools.io_handler as io
from simtools.simtel.simtel_events import SimtelEvents
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class TestSimtelEvents(unittest.TestCase):
def setUp(self):
self.testFiles = list()
self.testFiles.append(
io.getTestDataFile(
"run201_proton_za20deg_azm0deg-North-Prod5_test-production-5-mini.simtel.zst"
)
)
self.testFiles.append(
io.getTestDataFile(
"run202_proton_za20deg_azm0deg-North-Prod5_test-production-5-mini.simtel.zst"
)
)
def test_reading_files(self):
simtel_events = SimtelEvents(inputFiles=self.testFiles)
self.assertEqual(len(simtel_events.inputFiles), 2)
def test_loading_files(self):
simtel_events = SimtelEvents()
self.assertEqual(len(simtel_events.inputFiles), 0)
simtel_events.loadInputFiles(self.testFiles)
self.assertEqual(len(simtel_events.inputFiles), 2)
def test_loading_header(self):
simtel_events = SimtelEvents(inputFiles=self.testFiles)
simtel_events.loadHeaderAndSummary()
def test_select_events(self):
simtel_events = SimtelEvents(inputFiles=self.testFiles)
events = simtel_events.selectEvents()
self.assertEqual(len(events), 7)
def test_units(self):
simtel_events = SimtelEvents(inputFiles=self.testFiles)
# simtel_events.selectEvents()
# coreMax without units
with self.assertRaises(TypeError):
simtel_events.countSimulatedEvents(energyRange=[0.3 * u.TeV, 300 * u.TeV], coreMax=1500)
# energyRange without units
with self.assertRaises(TypeError):
simtel_events.countSimulatedEvents(energyRange=[0.3, 300], coreMax=1500 * u.m)
# energyRange with wrong units
with self.assertRaises(TypeError):
simtel_events.countSimulatedEvents(
energyRange=[0.3 * u.m, 300 * u.m],
coreMax=1500 * u.m
)
if __name__ == "__main__":
unittest.main()
|
from queryset_utils import annotate_mock_class, make_mock_list_from_args, get_keys_from_dict, make_mock_in_bulk_dict, make_mock_aggregate_dict, annotate_return_value
from unittest.mock import MagicMock
import unittest
class TestAnnotateMockClass(unittest.TestCase):
def test_annotate_mock_class(self):
mock_model = MagicMock()
kwargs = {'test':'test'}
mock_class = annotate_mock_class(kwargs, mock_model)
self.assertTrue(hasattr(mock_class, 'test'))
self.assertEqual(mock_class.test, 'test')
class TestMakeMockListFromArgs(unittest.TestCase):
def test_make_mock_list_from_args(self):
args = ['test', 'test2']
mock_values_list = make_mock_list_from_args(args)
self.assertEqual(mock_values_list, [1, 1])
def test_make_mock_list_from_args_empty_args(self):
args = []
mock_values_list = make_mock_list_from_args(args)
self.assertEqual(mock_values_list, [1])
class TestGetKeysFromDict(unittest.TestCase):
def test_get_keys_from_dict(self):
test_dict = {'key1': 1, 'key2': 2}
keys_list = get_keys_from_dict(test_dict)
self.assertEqual(keys_list, ['key1', 'key2'])
class TestMakeMockInBulkDict(unittest.TestCase):
def test_make_mock_in_bulk_dict(self):
args = ['test']
mock_in_bulk_dict = make_mock_in_bulk_dict(args)
self.assertEqual(mock_in_bulk_dict, {'test': ' '})
def test_make_mock_in_bulk_dict_empty_args(self):
args = []
mock_in_bulk_dict = make_mock_in_bulk_dict(args)
self.assertEqual(mock_in_bulk_dict, {'1': ' '})
class TestMakeMockAnnotateDict(unittest.TestCase):
def test_make_mock_aggregate_dict(self):
kwargs = {'test': 'test'}
mock_aggregate_dict = make_mock_aggregate_dict(kwargs)
self.assertEqual(mock_aggregate_dict, {'test': 'test'} )
class TestAnnotateReturnValue(unittest.TestCase):
def test_annotate_return_value(self):
kwargs = {'test':'test'}
return_value = {'original_key':'original_value'}
new_return_value = annotate_return_value(kwargs, return_value)
self.assertEqual(new_return_value, {'original_key': ' ', 'test': ' '})
|
from ir_sim.env import env_base
from math import sqrt, pi
from gym import spaces
from gym_env.envs.rvo_inter import rvo_inter
import numpy as np
class ir_gym(env_base):
def __init__(self, world_name, neighbors_region=5, neighbors_num=10, vxmax = 1.5, vymax = 1.5, env_train=True, acceler = 0.5, **kwargs):
super(ir_gym, self).__init__(world_name=world_name, **kwargs)
# self.obs_mode = kwargs.get('obs_mode', 0) # 0 drl_rvo, 1 drl_nrvo
# self.reward_mode = kwargs.get('reward_mode', 0)
self.radius_exp = kwargs.get('radius_exp', 0.2)
self.env_train = env_train
self.nr = neighbors_region
self.nm = neighbors_num
self.rvo = rvo_inter(neighbors_region, neighbors_num, vxmax, vymax, acceler, env_train, self.radius_exp)
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(5,), dtype=np.float32)
self.action_space = spaces.Box(low=np.array([-1, -1]), high=np.array([1, 1]), dtype=np.float32)
self.reward_parameter = kwargs.get('reward_parameter', (0.2, 0.1, 0.1, 0.2, 0.2, 1, -20, 20))
self.acceler = acceler
self.arrive_flag_cur = False
self.rvo_state_dim = 8
def cal_des_omni_list(self):
des_vel_list = [robot.cal_des_vel_omni() for robot in self.robot_list]
return des_vel_list
def rvo_reward_list_cal(self, action_list, **kwargs):
ts = self.components['robots'].total_states() # robot_state_list, nei_state_list, obs_circular_list, obs_line_list
rvo_reward_list = list(map(lambda robot_state, action: self.rvo_reward_cal(robot_state, ts[1], ts[2], ts[3], action, self.reward_parameter, **kwargs), ts[0], action_list))
return rvo_reward_list
def rvo_reward_cal(self, robot_state, nei_state_list, obs_cir_list, obs_line_list, action, reward_parameter=(0.2, 0.1, 0.1, 0.2, 0.2, 1, -10, 20), **kwargs):
vo_flag, min_exp_time, min_dis = self.rvo.config_vo_reward(robot_state, nei_state_list, obs_cir_list, obs_line_list, action, **kwargs)
des_vel = np.round(np.squeeze(robot_state[-2:]), 2)
p1, p2, p3, p4, p5, p6, p7, p8 = reward_parameter
dis_des = sqrt((action[0] - des_vel[0] )**2 + (action[1] - des_vel[1])**2)
max_dis_des = 3
dis_des_reward = - dis_des / max_dis_des # (0-1)
exp_time_reward = - 0.2/(min_exp_time+0.2) # (0-1)
# rvo reward
if vo_flag:
rvo_reward = p2 + p3 * dis_des_reward + p4 * exp_time_reward
if min_exp_time < 0.1:
rvo_reward = p2 + p1 * p4 * exp_time_reward
else:
rvo_reward = p5 + p6 * dis_des_reward
rvo_reward = np.round(rvo_reward, 2)
return rvo_reward
def obs_move_reward_list(self, action_list, **kwargs):
ts = self.components['robots'].total_states() # robot_state_list, nei_state_list, obs_circular_list, obs_line_list
obs_reward_list = list(map(lambda robot, action: self.observation_reward(robot, ts[1], ts[2], ts[3], action, **kwargs), self.robot_list, action_list))
obs_list = [l[0] for l in obs_reward_list]
reward_list = [l[1] for l in obs_reward_list]
done_list = [l[2] for l in obs_reward_list]
info_list = [l[3] for l in obs_reward_list]
return obs_list, reward_list, done_list, info_list
def observation_reward(self, robot, nei_state_list, obs_circular_list, obs_line_list, action, **kwargs):
robot_omni_state = robot.omni_state()
des_vel = np.squeeze(robot.cal_des_vel_omni())
done = False
if robot.arrive() and not robot.arrive_flag:
robot.arrive_flag = True
arrive_reward_flag = True
else:
arrive_reward_flag = False
obs_vo_list, vo_flag, min_exp_time, collision_flag = self.rvo.config_vo_inf(robot_omni_state, nei_state_list, obs_circular_list, obs_line_list, action, **kwargs)
radian = robot.state[2]
cur_vel = np.squeeze(robot.vel_omni)
radius = robot.radius_collision* np.ones(1,)
propri_obs = np.concatenate([ cur_vel, des_vel, radian, radius])
if len(obs_vo_list) == 0:
exter_obs = np.zeros((self.rvo_state_dim,))
else:
exter_obs = np.concatenate(obs_vo_list) # vo list
observation = np.round(np.concatenate([propri_obs, exter_obs]), 2)
# dis2goal = sqrt( robot.state[0:2] - robot.goal[0:2])
mov_reward = self.mov_reward(collision_flag, arrive_reward_flag, self.reward_parameter, min_exp_time)
reward = mov_reward
done = True if collision_flag else False
info = True if robot.arrive_flag else False
return [observation, reward, done, info]
def mov_reward(self, collision_flag, arrive_reward_flag, reward_parameter=(0.2, 0.1, 0.1, 0.2, 0.2, 1, -20, 15), min_exp_time=100, dis2goal=100):
p1, p2, p3, p4, p5, p6, p7, p8 = reward_parameter
collision_reward = p7 if collision_flag else 0
arrive_reward = p8 if arrive_reward_flag else 0
time_reward = 0
mov_reward = collision_reward + arrive_reward + time_reward
return mov_reward
def osc_reward(self, state_list):
# to avoid oscillation
dif_rad_list = []
if len(state_list) < 3:
return 0
for i in range(len(state_list) - 1):
dif = ir_gym.wraptopi(state_list[i+1][2, 0] - state_list[i][2, 0])
dif_rad_list.append(round(dif, 2))
for j in range(len(dif_rad_list)-3):
if dif_rad_list[j] * dif_rad_list[j+1] < -0.05 and dif_rad_list[j+1] * dif_rad_list[j+2] < -0.05 and dif_rad_list[j+2] * dif_rad_list[j+3] < -0.05:
print('osc', dif_rad_list[j], dif_rad_list[j+1], dif_rad_list[j+2], dif_rad_list[j+3])
return -10
return 0
def observation(self, robot, nei_state_list, obs_circular_list, obs_line_list):
robot_omni_state = robot.omni_state()
des_vel = np.squeeze(robot_omni_state[-2:])
obs_vo_list, _, min_exp_time, _ = self.rvo.config_vo_inf(robot_omni_state, nei_state_list, obs_circular_list, obs_line_list)
cur_vel = np.squeeze(robot.vel_omni)
radian = robot.state[2]
radius = robot.radius_collision* np.ones(1,)
if len(obs_vo_list) == 0:
exter_obs = np.zeros((self.rvo_state_dim,))
else:
exter_obs = np.concatenate(obs_vo_list) # vo list
propri_obs = np.concatenate([ cur_vel, des_vel, radian, radius])
observation = np.round(np.concatenate([propri_obs, exter_obs]), 2)
return observation
def env_reset(self, reset_mode=1, **kwargs):
self.components['robots'].robots_reset(reset_mode, **kwargs)
ts = self.components['robots'].total_states()
obs_list = list(map(lambda robot: self.observation(robot, ts[1], ts[2], ts[3]), self.robot_list))
return obs_list
def env_reset_one(self, id):
self.robot_reset(id)
def env_observation(self):
ts = self.components['robots'].total_states()
obs_list = list(map(lambda robot: self.observation(robot, ts[1], ts[2], ts[3]), self.robot_list))
return obs_list
@staticmethod
def wraptopi(theta):
if theta > pi:
theta = theta - 2*pi
if theta < -pi:
theta = theta + 2*pi
return theta
|
from functools import partial
from unittest.mock import MagicMock
import pytest
from nanopie.auth import (
Credential,
CredentialValidator,
AuthenticationHandler,
HTTPAPIKeyAuthenticationHandler,
HTTPAPIKeyModes,
HTTPBasicAuthenticationHandler,
HTTPOAuth2BearerJWTAuthenticationHandler,
HTTPOAuth2BearerJWTModes,
)
from nanopie.globals import request
from nanopie.misc.errors import AuthenticationError
from nanopie.services.http.io import HTTPResponse
from .marks import jwt_installed, cryptography_installed
from .constants import ISSUER, RS256_PUBLIC_KEY, RS256_TOKEN
credential = Credential()
credential_extractor = MagicMock(name="credential_extractor")
credential_extractor.extract.return_value = credential
credential_validator = MagicMock(name="credential_validator")
credential_validator.validate.return_value = None
@pytest.fixture
def authentication_handler():
authentication_handler = AuthenticationHandler(
credential_extractor=credential_extractor,
credential_validator=credential_validator,
)
return authentication_handler
@pytest.fixture
def http_api_key_authentication_handler_header():
authentication_handler = HTTPAPIKeyAuthenticationHandler(
mode=HTTPAPIKeyModes.HEADER,
key_field_name="api_key",
credential_validator=credential_validator,
)
return authentication_handler
@pytest.fixture
def http_api_key_authentication_handler_query():
authentication_handler = HTTPAPIKeyAuthenticationHandler(
mode=HTTPAPIKeyModes.URI_QUERY,
key_field_name="api_key",
credential_validator=credential_validator,
)
return authentication_handler
@pytest.fixture
def http_basic_authentication_handler():
authentication_handler = HTTPBasicAuthenticationHandler(
credential_validator=credential_validator
)
return authentication_handler
@pytest.fixture
def http_oauth2_bearer_jwt_authentication_handler_header():
authentication_handler = HTTPOAuth2BearerJWTAuthenticationHandler(
key_or_secret=RS256_PUBLIC_KEY,
algorithm="RS256",
mode=HTTPOAuth2BearerJWTModes.HEADER,
verify_iss=True,
issuer=ISSUER,
)
return authentication_handler
@pytest.fixture
def http_oauth2_bearer_jwt_authentication_handler_query():
authentication_handler = HTTPOAuth2BearerJWTAuthenticationHandler(
key_or_secret=RS256_PUBLIC_KEY,
algorithm="RS256",
mode=HTTPOAuth2BearerJWTModes.URI_QUERY,
verify_iss=True,
issuer=ISSUER,
)
return authentication_handler
def test_authentication_handler(setup_ctx, authentication_handler):
assert authentication_handler() == None
credential_extractor.extract.assert_called_with(request=request)
credential_validator.validate.assert_called_with(credential=credential)
def test_authentication_handler_before_authentication_failure_not_callable(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
authentication_handler.before_authentication(0)
assert "must decorate a callable" in str(ex.value)
def test_authentication_handler_before_authentication_failure_too_little_params(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
@authentication_handler.before_authentication
def before_authentication_multi_params(x): # pylint: disable=unused-variable
pass
assert (
"must decorate a callable with two arguments named auth_handler "
"and credential"
) in str(ex.value)
def test_authentication_handler_before_authentication_failure_too_many_params(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
@authentication_handler.before_authentication
def before_authentication_multi_params(
x, y, z
): # pylint: disable=unused-variable
pass
assert (
"must decorate a callable with two arguments named auth_handler "
"and credential"
) in str(ex.value)
def test_authentication_handler_before_authentication_failure_misspelled_param(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
@authentication_handler.before_authentication
def before_authentication_wrong_name(x, y): # pylint: disable=unused-variable
pass
assert (
"must decorate a callable with two arguments named auth_handler "
"and credential"
) in str(ex.value)
def test_authentication_handler_before_authentication(
setup_ctx, authentication_handler
):
credential_validator_alt = MagicMock(name="credential_validator_alt")
credential_validator_alt.validate.return_value = None
@authentication_handler.before_authentication
def before_authentication(
auth_handler, credential
): # pylint: disable=unused-variable
assert auth_handler == authentication_handler
assert credential == credential
return credential_validator_alt
assert authentication_handler() == None
credential_validator.assert_not_called()
credential_validator_alt.validate.assert_called_with(credential=credential)
def test_authentication_handler_after_authentication_failure_not_callable(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
authentication_handler.after_authentication(0)
assert "must decorate a callable" in str(ex.value)
def test_authentication_handler_after_authentication_failure_too_little_params(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
@authentication_handler.after_authentication
def after_authentication_multi_params(x): # pylint: disable=unused-variable
pass
assert (
"must decorate a callable with two arguments named auth_handler "
"and credential"
) in str(ex.value)
def test_authentication_handler_after_authentication_failure_too_many_params(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
@authentication_handler.after_authentication
def after_authentication_multi_params(
x, y, z
): # pylint: disable=unused-variable
pass
assert (
"must decorate a callable with two arguments named auth_handler "
"and credential"
) in str(ex.value)
def test_authentication_handler_after_authentication_failure_misspelled_param(
setup_ctx, authentication_handler
):
with pytest.raises(ValueError) as ex:
@authentication_handler.after_authentication
def after_authentication_wrong_name(x, y): # pylint: disable=unused-variable
pass
assert (
"must decorate a callable with two arguments named auth_handler "
"and credential"
) in str(ex.value)
def test_authentication_handler_after_authentication(setup_ctx, authentication_handler):
flag = MagicMock(return_value=None)
@authentication_handler.after_authentication
def after_authentication(
auth_handler, credential
): # pylint: disable=unused-variable
assert auth_handler == authentication_handler
assert credential == credential
return flag()
assert authentication_handler() == None
flag.assert_called()
def test_authentication_handler_http_api_key_header(
setup_ctx, http_api_key_authentication_handler_header
):
request.headers = {"api_key": "api-key"} # pylint: disable=assigning-non-slot
assert http_api_key_authentication_handler_header() == None
credential = credential_validator.validate.call_args[1]["credential"]
assert credential.key == "api-key"
def test_authentication_handler_http_api_key_query(
setup_ctx, http_api_key_authentication_handler_query
):
request.query_args = {"api_key": "api-key"} # pylint: disable=assigning-non-slot
assert http_api_key_authentication_handler_query() == None
def test_authentication_handler_http_api_key_header_failure_not_HTTP_request(
setup_ctx, http_api_key_authentication_handler_header
):
with pytest.raises(AttributeError) as ex:
http_api_key_authentication_handler_header()
assert "not a valid HTTP request" in str(ex.value)
def test_authentication_handler_http_api_key_header_failture_no_header(
setup_ctx, http_api_key_authentication_handler_header
):
request.headers = {} # pylint: disable=assigning-non-slot
with pytest.raises(AuthenticationError) as ex:
http_api_key_authentication_handler_header()
assert "does not have an API key" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 401
assert ex.value.response.headers == {}
assert ex.value.response.mime_type == "text/html"
assert "401 Unauthorized" in ex.value.response.data
def test_authentication_handler_http_api_key_query_failure_not_HTTP_request(
setup_ctx, http_api_key_authentication_handler_query
):
with pytest.raises(AttributeError) as ex:
http_api_key_authentication_handler_query()
assert "not a valid HTTP request" in str(ex.value)
def test_authentication_handler_http_api_key_query_failture_no_query_arg(
setup_ctx, http_api_key_authentication_handler_query
):
request.query_args = {} # pylint: disable=assigning-non-slot
with pytest.raises(AuthenticationError) as ex:
http_api_key_authentication_handler_query()
assert "does not have an API key" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 401
assert ex.value.response.headers == {}
assert ex.value.response.mime_type == "text/html"
assert "401 Unauthorized" in ex.value.response.data
def test_authentication_handler_http_basic(
setup_ctx, http_basic_authentication_handler
):
request.headers = { # pylint: disable=assigning-non-slot
"Authorization": "Basic dGVzdDoxMjPCow=="
}
assert http_basic_authentication_handler() == None
credential = credential_validator.validate.call_args[1]["credential"]
assert credential.username == "test"
assert credential.password == "123£"
def test_authentication_handler_http_basic_failure_not_HTTP_request(
setup_ctx, http_basic_authentication_handler
):
with pytest.raises(AttributeError) as ex:
http_basic_authentication_handler()
assert "not a valid HTTP request" in str(ex.value)
def test_authentication_handler_http_basic_failure_no_header(
setup_ctx, http_basic_authentication_handler
):
request.headers = {} # pylint: disable=assigning-non-slot
with pytest.raises(AuthenticationError) as ex:
http_basic_authentication_handler()
assert "does not have an HTTP Authorization request header" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 401
assert ex.value.response.headers == {"WWW-Authenticate": "Basic"}
assert ex.value.response.mime_type == "text/html"
assert "401 Unauthorized" in ex.value.response.data
def test_authentication_handler_http_basic_failure_wrong_type(
setup_ctx, http_basic_authentication_handler
):
request.headers = {"Authorization": "Bearer"} # pylint: disable=assigning-non-slot
with pytest.raises(AuthenticationError) as ex:
http_basic_authentication_handler()
assert (
"does not have an HTTP Authorization request header with the Basic type"
in str(ex.value)
)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 401
assert ex.value.response.headers == {"WWW-Authenticate": "Basic"}
assert ex.value.response.mime_type == "text/html"
assert "401 Unauthorized" in ex.value.response.data
def test_authentication_handler_http_basic_failure_corrupted_credential(
setup_ctx, http_basic_authentication_handler
):
request.headers = { # pylint: disable=assigning-non-slot
"Authorization": "Basic $ab123"
}
with pytest.raises(AuthenticationError) as ex:
http_basic_authentication_handler()
assert "Cannot decode the credential data" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 403
assert ex.value.response.headers == {"WWW-Authenticate": "Basic"}
assert ex.value.response.mime_type == "text/html"
assert "403 Forbidden" in ex.value.response.data
def test_authentication_handler_http_basic_failure_malformed_credential(
setup_ctx, http_basic_authentication_handler
):
request.headers = { # pylint: disable=assigning-non-slot
"Authorization": "Basic dGVzdA=="
}
with pytest.raises(AuthenticationError) as ex:
http_basic_authentication_handler()
assert "malformed" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 403
assert ex.value.response.headers == {"WWW-Authenticate": "Basic"}
assert ex.value.response.mime_type == "text/html"
assert "403 Forbidden" in ex.value.response.data
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_header(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_header
):
request.headers = { # pylint: disable=assigning-non-slot
"Authorization": "Bearer" + " " + RS256_TOKEN
}
assert http_oauth2_bearer_jwt_authentication_handler_header() == None
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_header_failure_not_HTTP_request(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_header
):
with pytest.raises(AttributeError) as ex:
http_oauth2_bearer_jwt_authentication_handler_header()
assert "not a valid HTTP request" in str(ex.value)
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_header_failure_no_header(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_header
):
request.headers = {} # pylint: disable=assigning-non-slot
with pytest.raises(AuthenticationError) as ex:
http_oauth2_bearer_jwt_authentication_handler_header()
assert "does not have an HTTP Authorization request header" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 401
assert ex.value.response.headers == {"WWW-Authenticate": "Bearer"}
assert ex.value.response.mime_type == "text/html"
assert "401 Unauthorized" in ex.value.response.data
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_header_failure_wrong_type(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_header
):
request.headers = {"Authorization": "Basic"} # pylint: disable=assigning-non-slot
with pytest.raises(AuthenticationError) as ex:
http_oauth2_bearer_jwt_authentication_handler_header()
assert (
"does not have an HTTP Authorization request header with the Bearer type"
in str(ex.value)
)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 401
assert ex.value.response.headers == {"WWW-Authenticate": "Bearer"}
assert ex.value.response.mime_type == "text/html"
assert "401 Unauthorized" in ex.value.response.data
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_header_failure_invalid_JWT(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_header
):
request.headers = { # pylint: disable=assigning-non-slot
"Authorization": "Bearer $ab123"
}
with pytest.raises(AuthenticationError) as ex:
http_oauth2_bearer_jwt_authentication_handler_header()
assert "JWT is not valid" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 403
assert ex.value.response.headers == {
"WWW-Authenticate": "Bearer error=invalid_token"
}
assert ex.value.response.mime_type == "text/html"
assert "403 Forbidden" in ex.value.response.data
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_query(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_query
):
request.query_args = { # pylint: disable=assigning-non-slot
"access_token": RS256_TOKEN
}
assert http_oauth2_bearer_jwt_authentication_handler_query() == None
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_query_failure_not_HTTP_request(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_query
):
with pytest.raises(AttributeError) as ex:
http_oauth2_bearer_jwt_authentication_handler_query()
assert "not a valid HTTP request" in str(ex.value)
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_query_failure_no_query_arg(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_query
):
request.query_args = {} # pylint: disable=assigning-non-slot
with pytest.raises(AuthenticationError) as ex:
http_oauth2_bearer_jwt_authentication_handler_query()
assert "does not have an access_token argument" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 401
assert ex.value.response.headers == {"WWW-Authenticate": "Bearer"}
assert ex.value.response.mime_type == "text/html"
assert "401 Unauthorized" in ex.value.response.data
@jwt_installed
@cryptography_installed
def test_authentication_handler_http_oauth2_bearer_jwt_query_failure_invalid_JWT(
setup_ctx, http_oauth2_bearer_jwt_authentication_handler_query
):
request.query_args = { # pylint: disable=assigning-non-slot
"access_token": "$ab123"
}
with pytest.raises(AuthenticationError) as ex:
http_oauth2_bearer_jwt_authentication_handler_query()
assert "JWT is not valid" in str(ex.value)
assert isinstance(ex.value.response, HTTPResponse)
assert ex.value.response.status_code == 403
assert ex.value.response.headers == {
"WWW-Authenticate": "Bearer error=invalid_token"
}
assert ex.value.response.mime_type == "text/html"
assert "403 Forbidden" in ex.value.response.data
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
from pyccel.stdlib.internal.blas import dgemm
from numpy import zeros
if __name__ == '__main__':
m = 4
k = 5
n = 4
a = zeros((m,k), 'double')
b = zeros((k,n), 'double')
c = zeros((m,n), 'double')
# ...
a[0,0] = 1.0
a[1,0] = 6.0
a[2,0] = 11.0
a[3,0] = 16.0
a[0,1] = 2.0
a[1,1] = 7.0
a[2,1] = 12.0
a[3,1] = 17.0
a[0,2] = 3.0
a[1,2] = 8.0
a[2,2] = 13.0
a[3,2] = 18.0
a[0,3] = 4.0
a[1,3] = 9.0
a[2,3] = 14.0
a[3,3] = 19.0
a[0,4] = 5.0
a[1,4] = 10.0
a[2,4] = 15.0
a[3,4] = 20.0
# ...
# ...
b[0,0] = 1.0
b[1,2] = 1.0
b[2,1] = 1.0
b[3,3] = 1.0
b[3,4] = 1.0
# ...
alpha = 2.0
beta = 1.0
dgemm('N', 'N', m, n, k, alpha, a, m, b, k, beta, c, m)
print(c)
|
from starlette.config import Config
MAX_QUESTIONS = 95
config = Config('.env')
MODULE_URL = config('MODULE_URL')
METABOT_URL = config('METABOT_URL')
MONGODB_URI = config('MONGODB_URI')
FEEDBACK_COLLECTION = config('FEEDBACK_COLLECTION', default='feedback')
HEARTBEAT_DELAY = config('HEARTBEAT_DELAY', cast=float, default='10')
CREATION_VIEW_ID = config('CREATION_VIEW_ID', default='feedback_creation_view')
ANSWER_VIEW_ID = config('ANSWER_VIEW_ID', default='feedback_answer_view')
TITLE_INPUT_ACTION_ID = config(
'TITLE_INPUT_ACTION_ID',
default='feedback_title_input'
)
RECIPIENTS_SELECT_ACTION_ID = config(
'RECIPIENTS_SELECT_ACTION_ID',
default='feedback_recipients_select'
)
QUESTION_INPUT_ACTION_ID = config(
'QUESTION_INPUT_ACTION_ID',
default='feedback_question_input_{}' # use .format()
)
ANSWER_INPUT_ACTION_ID = config(
'ANSWER_INPUT_ACTION_ID',
default='feedback_answer_input_{}' # use .format()
)
NOTIFY_ACTION_ID = config(
'NOTIFY_ACTION_ID',
default='feedback_notify_button'
)
ANSWER_ACTION_ID = config(
'ANSWER_ACTION_ID',
default='feedback_answer_button'
)
|
import uuid
from datetime import datetime
from nwb_conversion_tools import NWBConverter
from .shenoyblackrockrecordingdatainterface import ShenoyBlackRockRecordingDataInterface
from .shenoymatdatainterface import ShenoyMatDataInterface
class ChurchlandNWBConverter(NWBConverter):
data_interface_classes = dict(
A1=ShenoyBlackRockRecordingDataInterface,
B1=ShenoyBlackRockRecordingDataInterface,
A2=ShenoyBlackRockRecordingDataInterface,
B2=ShenoyBlackRockRecordingDataInterface,
A3=ShenoyBlackRockRecordingDataInterface,
B3=ShenoyBlackRockRecordingDataInterface,
A4=ShenoyBlackRockRecordingDataInterface,
B4=ShenoyBlackRockRecordingDataInterface,
Mat=ShenoyMatDataInterface,
)
def __init__(self, source_data):
"""
Converts mat and .nsx data associated with Churchland(2012) monkey experiments with Utah
multielectrode implants.
Parameters
----------
source_folder : dict
"""
self.subject_name = source_data.get("subject_name", "Jenkins")
self.session_date = source_data.get("date", datetime.now())
super().__init__(source_data)
@classmethod
def get_source_schema(cls):
base_schema = super().get_source_schema()
base_schema["additionalProperties"] = True
base_schema["properties"].update(subject_name=dict(type="string"))
return base_schema
def get_metadata(self):
metadata_base = super().get_metadata()
metadata_base["NWBFile"] = dict(
session_description="",
identifier=str(uuid.uuid4()),
session_start_time=datetime.isoformat(self.session_date),
experimenter=["Matthew T. Kaufman", "Mark M. Churchland"],
experiment_description="",
institution="Stanford University",
related_publications=" ".join(
[
"10.1038/nature11129",
"10.1152/jn.00892.2011",
"10.1038/nn.3643",
"10.1038/nn.4042",
"10.1146/annurev-neuro-062111-150509",
"10.7554/eLife.04677" "" "10.1523/ENEURO.0085-16.2016",
"10.1038/s41592-018-0109-9",
]
),
)
metadata_base["Subject"] = dict(
sex="M", species="Macaca mulatta", subject_id=self.subject_name
)
return metadata_base
|
"""Extract features from static moments of IMU data."""
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import skew
from biopsykit.utils.array_handling import sanitize_input_nd
from biopsykit.utils.time import tz
def compute_features(
data: pd.DataFrame,
static_moments: pd.DataFrame,
start: Optional[Union[str, pd.Timestamp]] = None,
end: Optional[Union[str, pd.Timestamp]] = None,
index: Optional[Union[int, str]] = None,
timezone: Optional[str] = None,
) -> Optional[pd.DataFrame]:
"""Compute features based on frequency and duration of static moments in given input signal.
This function computes the following features:
* ``sm_number``: number of static moments in data
* ``sm_max``: maximum duration of static moments, i.e., longest duration
* ``sm_max_position``: location of the beginning of the longest static moment in the input data normalized to
``[0, 1]`` where 0 = ``start`` and 1 = ``end``
* ``sm_median``: median duration of static moments
* ``sm_mean``: mean duration of static moments
* ``sm_std``: standard deviation of static moment durations
* ``sm_skewness``: skewness of static moment durations
The features are both computed on all detected static moments and on static moments that are longer than
60 seconds (suffix ``_60``).
Parameters
----------
data : :class:`~pandas.DataFrame`
input data
static_moments : :class:`~pandas.DataFrame`
dataframe with beginning and end of static moments
start : :class:`~pandas.Timestamp` or str, optional
start timestamp in input data for feature extraction or ``None`` to set start index to the first index in
``data``. All samples *before* ``start`` will not be used for feature extraction.
end : :class:`~pandas.Timestamp` or str, optional
end timestamp in input data for feature extraction or ``None`` to set end index to the last index in
``data``. All samples *after* ``end`` will not be used for feature extraction.
index : int or str, optional
index label of the resulting dataframe or ``None`` to assign a default label (0)
timezone : str, optional
timezone of the recorded data or ``None`` to use default timezone ("Europe/Berlin")
Returns
-------
:class:`~pandas.DataFrame`
dataframe with extracted static moment features
"""
if data.empty:
return None
start, end = _get_start_end(data, start, end, timezone)
total_time = end - start
static_moments = sanitize_input_nd(static_moments, ncols=2)
durations = np.array([static_moment_duration(data, sequence) for sequence in static_moments])
durations_60 = durations[durations >= 60]
loc_max_moment = data.index[static_moments[np.argmax(durations)][0]]
loc_max_moment_relative = (loc_max_moment - start) / total_time
feature_dict = {"sm_max_position": loc_max_moment_relative}
# feature_dict['sleep_bouts_number'.format(index)] = len(sleep_bouts)
# feature_dict['wake_bouts_number'] = len(wake_bouts)
# mean_orientations = mean_orientation(data, static_sequences)
# dominant_orientation = mean_orientations.iloc[mean_orientations.index.argmax()]
# dict_ori = {'sm_dominant_orientation_{}'.format(x): dominant_orientation.loc['acc_{}'.format(x)] for x
# in
# ['x', 'y', 'z']}
# feature_dict.update(dict_ori)
for dur, suffix in zip([durations, durations_60], ["", "_60"]):
feature_dict["sm_number{}".format(suffix)] = len(dur)
feature_dict["sm_max{}".format(suffix)] = np.max(dur)
feature_dict["sm_median{}".format(suffix)] = np.median(dur)
feature_dict["sm_mean{}".format(suffix)] = np.mean(dur)
feature_dict["sm_std{}".format(suffix)] = np.std(dur, ddof=1)
feature_dict["sm_skewness{}".format(suffix)] = skew(dur)
if index is None:
index = 0
return pd.DataFrame(feature_dict, index=[index])
def _get_start_end(
data: pd.DataFrame,
start: Union[str, pd.Timestamp],
end: Union[str, pd.Timestamp],
timezone: str,
) -> Tuple[Union[str, pd.Timestamp], Union[str, pd.Timestamp]]:
if timezone is None:
timezone = tz
if start is None:
start = data.index[0]
if end is None:
end = data.index[-1]
start = _to_timestamp(start, timezone)
end = _to_timestamp(end, timezone)
return start, end
def _to_timestamp(date: Union[str, pd.Timestamp], timezone: str) -> pd.Timestamp:
if isinstance(date, str):
date = pd.Timestamp(date, tz=timezone)
return date
def static_moment_duration(data: pd.DataFrame, start_end: np.array) -> float:
"""Compute duration of static moment.
Parameters
----------
data : :class:`~pandas.DataFrame`
input data
start_end : array
start and end index of static moment to compute duration
Returns
-------
float
duration in seconds
"""
return (data.index[start_end[1]] - data.index[start_end[0]]).total_seconds()
def mean_orientation(data: pd.DataFrame, static_moments: pd.DataFrame) -> pd.DataFrame:
"""Compute mean orientation of acceleration signal within static moment windows.
Parameters
----------
data : :class:`~pandas.DataFrame`
input data
static_moments : :class:`~pandas.DataFrame`
dataframe with start and end indices of static moments
Returns
-------
:class:`~pandas.DataFrame`
mean orientation (x, y, z) of acceleration signal for each static moment window
"""
static_moments = sanitize_input_nd(static_moments, 2)
mean_orientations = [data.iloc[start_end[0] : start_end[1]] for start_end in static_moments]
mean_orientations = {len(data): data.mean() for data in mean_orientations}
mean_orientations = pd.DataFrame(mean_orientations).T
# mean_orientations.rename(columns={'index': 'length'}, inplace=True)
return mean_orientations
|
def calibrateInertialSensorData(data, AM, SM, OV):
import numpy as np
# data: raw data to be calibrated
# AM: alignment matrix
# SM: sensitivity matrix
# OV: offset vector
# double[] tempdata = data;
# double[,] data2d = new double[3, 1];
# data2d[0, 0] = data[0];
# data2d[1, 0] = data[1];
# data2d[2, 0] = data[2];
tempdata = np.array(data, dtype=np.float)
data2d = np.zeros([3, 1], dtype=np.float)
data2d[0,0] = data[0]
data2d[1,0] = data[1]
data2d[2,0] = data[2]
# data2d = MatrixMultiplication(MatrixMultiplication(MatrixInverse3x3(AM), MatrixInverse3x3(SM)), MatrixMinus(data2d, OV));
data2d = np.dot(np.dot(np.linalg.inv(AM),np.linalg.inv(SM)),(data2d-OV))
# tempdata[0] = data2d[0, 0];
# tempdata[1] = data2d[1, 0];
# tempdata[2] = data2d[2, 0];
tempdata[0] = data2d[0, 0];
tempdata[1] = data2d[1, 0];
tempdata[2] = data2d[2, 0];
return tempdata;
# some test code
import numpy as np
ALIGNMENT_ACCEL = np.array([[0,-1.0,0],[-1.0,0,0],[0,0,-1.0]])
SENSITIVITY_ACCEL = np.array([[83.0,0,0],[0,83.0,0],[0,0,83.0]])
OFFSET_ACCEL = np.array([[2047.0],[2047.0],[2047.0]])
ALIGNMENT_GYRO = np.array([[0,-1.0,0],[-1.0,0,0],[0,0,-1.0]])
SENSITIVITY_GYRO = np.array([[65.5,0,0],[0,65.5,0],[0,0,65.5]])
OFFSET_GYRO = np.array([[0.0],[0.0],[0.0]])
print calibrateInertialSensorData([2033,2035,1237],ALIGNMENT_ACCEL,SENSITIVITY_ACCEL,OFFSET_ACCEL)
print calibrateInertialSensorData([-129,62,-13],ALIGNMENT_GYRO,SENSITIVITY_GYRO,OFFSET_GYRO)
|
"""Test Base module of python-openflow."""
import unittest
from pyof.foundation import base, basic_types
class TestGenericStruct(unittest.TestCase):
"""Testing GenericStruct class."""
def setUp(self):
"""Basic Test Setup."""
class AttributeA(base.GenericStruct):
"""Example class."""
a1 = basic_types.UBInt8(1)
a2 = basic_types.UBInt16(2)
class AttributeC(base.GenericStruct):
"""Example class."""
c1 = basic_types.UBInt32(3)
c2 = basic_types.UBInt64(4)
class AttributeB(base.GenericStruct):
"""Example class."""
c = AttributeC()
class Header(base.GenericStruct):
"""Mock Header class."""
version = basic_types.UBInt8(1)
message_type = basic_types.UBInt8(2)
length = basic_types.UBInt8(8)
xid = basic_types.UBInt8(4)
class MyMessage(base.GenericMessage):
"""Example class."""
header = Header()
a = AttributeA()
b = AttributeB()
i = basic_types.UBInt32(5)
def __init__(self):
"""Init method of example class."""
super().__init__(None)
self.MyMessage = MyMessage
def test_basic_attributes(self):
"""[Foundation/Base/GenericStruct] - Attributes Creation."""
message1 = self.MyMessage()
message2 = self.MyMessage()
self.assertIsNot(message1, message2)
self.assertIsNot(message1.i, message2.i)
self.assertIsNot(message1.a, message2.a)
self.assertIsNot(message1.b, message2.b)
self.assertIsNot(message1.a.a1, message2.a.a1)
self.assertIsNot(message1.a.a2, message2.a.a2)
self.assertIsNot(message1.b.c, message2.b.c)
self.assertIsNot(message1.b.c.c1, message2.b.c.c1)
self.assertIsNot(message1.b.c.c2, message2.b.c.c2)
class TestGenericType(unittest.TestCase):
"""Testing GenericType class."""
def test_basic_operator(self):
"""[Foundation/Base/GenericType] - Basic Operators."""
a = basic_types.UBInt32(1)
b = basic_types.UBInt32(2)
self.assertEqual(a + 1, 2)
self.assertEqual(1 + a, 2)
self.assertEqual(b + 1, 3)
self.assertEqual(1 + b, 3)
self.assertEqual(a - 1, 0)
self.assertEqual(1 - a, 0)
self.assertEqual(b - 1, 1)
self.assertEqual(1 - b, 1)
self.assertEqual(a & 1, 1)
self.assertEqual(1 & a, 1)
self.assertEqual(b & 1, 0)
self.assertEqual(1 & b, 0)
self.assertEqual(a | 1, 1)
self.assertEqual(1 | a, 1)
self.assertEqual(b | 1, 3)
self.assertEqual(1 | b, 3)
self.assertEqual(a ^ 1, 0)
self.assertEqual(1 ^ a, 0)
self.assertEqual(b ^ 1, 3)
self.assertEqual(1 ^ b, 3)
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from copy import copy
from jx_base.dimensions import Dimension
from jx_base.utils import is_variable_name
from jx_base.query import QueryOp
from jx_base.language import is_op
from jx_python.namespace import Namespace, convert_list
from mo_dots import Data, coalesce, is_data, is_list, listwrap, set_default, unwraplist, wrap, is_many
from mo_future import is_text
from mo_logs import Log
from mo_math import is_number
from mo_times.dates import Date
class Rename(Namespace):
def __init__(self, dimensions, source):
"""
EXPECTING A LIST OF {"name":name, "value":value} OBJECTS TO PERFORM A MAPPING
"""
dimensions = wrap(dimensions)
if is_data(dimensions) and dimensions.name == None:
# CONVERT TO A REAL DIMENSION DEFINITION
dimensions = {"name": ".", "type": "set", "edges":[{"name": k, "field": v} for k, v in dimensions.items()]}
self.dimensions = Dimension(dimensions, None, source)
def convert(self, expr):
"""
EXPAND INSTANCES OF name TO value
"""
if expr is True or expr == None or expr is False:
return expr
elif is_number(expr):
return expr
elif expr == ".":
return "."
elif is_variable_name(expr):
return coalesce(self.dimensions[expr], expr)
elif is_text(expr):
Log.error("{{name|quote}} is not a valid variable name", name=expr)
elif isinstance(expr, Date):
return expr
elif is_op(expr, QueryOp):
return self._convert_query(expr)
elif is_data(expr):
if expr["from"]:
return self._convert_query(expr)
elif len(expr) >= 2:
#ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION
return wrap({name: self.convert(value) for name, value in expr.leaves()})
else:
# ASSUME SINGLE-CLAUSE EXPRESSION
k, v = expr.items()[0]
return converter_map.get(k, self._convert_bop)(self, k, v)
elif is_many(expr):
return wrap([self.convert(value) for value in expr])
else:
return expr
def _convert_query(self, query):
output = QueryOp(None)
output.select = self._convert_clause(query.select)
output.where = self.convert(query.where)
output.frum = self._convert_from(query.frum)
output.edges = convert_list(self._convert_edge, query.edges)
output.having = convert_list(self._convert_having, query.having)
output.window = convert_list(self._convert_window, query.window)
output.sort = self._convert_clause(query.sort)
output.format = query.format
return output
def _convert_bop(self, op, term):
if is_list(term):
return {op: map(self.convert, term)}
return {op: {self.convert(var): val for var, val in term.items()}}
def _convert_many(self, k, v):
return {k: map(self.convert, v)}
def _convert_from(self, frum):
if is_data(frum):
return Data(name=self.convert(frum.name))
else:
return self.convert(frum)
def _convert_edge(self, edge):
dim = self.dimensions[edge.value]
if not dim:
return edge
if len(listwrap(dim.fields)) == 1:
#TODO: CHECK IF EDGE DOMAIN AND DIMENSION DOMAIN CONFLICT
new_edge = set_default({"value": unwraplist(dim.fields)}, edge)
return new_edge
new_edge.domain = dim.getDomain()
edge = copy(edge)
edge.value = None
edge.domain = dim.getDomain()
return edge
def _convert_clause(self, clause):
"""
JSON QUERY EXPRESSIONS HAVE MANY CLAUSES WITH SIMILAR COLUMN DELCARATIONS
"""
clause = wrap(clause)
if clause == None:
return None
elif is_data(clause):
return set_default({"value": self.convert(clause.value)}, clause)
else:
return [set_default({"value": self.convert(c.value)}, c) for c in clause]
converter_map = {
"and": Rename._convert_many,
"or": Rename._convert_many,
"not": Rename.convert,
"missing": Rename.convert,
"exists": Rename.convert
}
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
import csv
import requests
import random
import json
import os
from os.path import join, dirname
from dotenv import load_dotenv
# 環境変数読み込み
load_dotenv(join(dirname(__file__), '.env'))
url = os.environ.get("VUE_APP_IBM_CLIENT_URL2")
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"X-IBM-Client-Id": os.environ.get("VUE_APP_IBM_CLIENT_ID2"),
"X-IBM-Client-Secret": os.environ.get("VUE_APP_IBM_CLIENT_SECRET2"),
}
proxies = {
"http": os.environ.get("HTTP_PROXY"),
"https": os.environ.get("HTTPS_PROXY")
}
with open('../dummy-data/PoC-dummy-shelters.csv', 'r', encoding='shift_jis') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(csv_reader)
for row in csv_reader:
# 各行に対して
print(row)
shelter = {
"name": row[1],
"zipcode": row[0],
"address": row[2],
"email": "",
"phone": row[3],
"latlng": [float(row[4]), float(row[5])]
}
res = requests.post(url + "/shelters", json.dumps(shelter), headers=headers, proxies=proxies)
print(res.status_code)
print(res.json())
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from duckietown_msgs/LEDDetection.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import duckietown_msgs.msg
import genpy
class LEDDetection(genpy.Message):
_md5sum = "d1ac8691d7a30e838dc372a724aee94b"
_type = "duckietown_msgs/LEDDetection"
_has_header = False #flag to mark the presence of a Header object
_full_text = """time timestamp1 # initial timestamp of the camera stream used
time timestamp2 # final timestamp of the camera stream used
Vector2D pixels_normalized
float32 frequency
string color # will be ‘r’, ‘g’ or ‘b’
float32 confidence # some value of confidence for the detection (TBD)
# for debug/visualization
float64[] signal_ts
float32[] signal
float32[] fft_fs
float32[] fft
================================================================================
MSG: duckietown_msgs/Vector2D
float32 x
float32 y
"""
__slots__ = ['timestamp1','timestamp2','pixels_normalized','frequency','color','confidence','signal_ts','signal','fft_fs','fft']
_slot_types = ['time','time','duckietown_msgs/Vector2D','float32','string','float32','float64[]','float32[]','float32[]','float32[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
timestamp1,timestamp2,pixels_normalized,frequency,color,confidence,signal_ts,signal,fft_fs,fft
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(LEDDetection, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.timestamp1 is None:
self.timestamp1 = genpy.Time()
if self.timestamp2 is None:
self.timestamp2 = genpy.Time()
if self.pixels_normalized is None:
self.pixels_normalized = duckietown_msgs.msg.Vector2D()
if self.frequency is None:
self.frequency = 0.
if self.color is None:
self.color = ''
if self.confidence is None:
self.confidence = 0.
if self.signal_ts is None:
self.signal_ts = []
if self.signal is None:
self.signal = []
if self.fft_fs is None:
self.fft_fs = []
if self.fft is None:
self.fft = []
else:
self.timestamp1 = genpy.Time()
self.timestamp2 = genpy.Time()
self.pixels_normalized = duckietown_msgs.msg.Vector2D()
self.frequency = 0.
self.color = ''
self.confidence = 0.
self.signal_ts = []
self.signal = []
self.fft_fs = []
self.fft = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_4I3f.pack(_x.timestamp1.secs, _x.timestamp1.nsecs, _x.timestamp2.secs, _x.timestamp2.nsecs, _x.pixels_normalized.x, _x.pixels_normalized.y, _x.frequency))
_x = self.color
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_f.pack(self.confidence))
length = len(self.signal_ts)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.signal_ts))
length = len(self.signal)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.signal))
length = len(self.fft_fs)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.fft_fs))
length = len(self.fft)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.fft))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.timestamp1 is None:
self.timestamp1 = genpy.Time()
if self.timestamp2 is None:
self.timestamp2 = genpy.Time()
if self.pixels_normalized is None:
self.pixels_normalized = duckietown_msgs.msg.Vector2D()
end = 0
_x = self
start = end
end += 28
(_x.timestamp1.secs, _x.timestamp1.nsecs, _x.timestamp2.secs, _x.timestamp2.nsecs, _x.pixels_normalized.x, _x.pixels_normalized.y, _x.frequency,) = _struct_4I3f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.color = str[start:end].decode('utf-8')
else:
self.color = str[start:end]
start = end
end += 4
(self.confidence,) = _struct_f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.signal_ts = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.signal = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fft_fs = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fft = struct.unpack(pattern, str[start:end])
self.timestamp1.canon()
self.timestamp2.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_4I3f.pack(_x.timestamp1.secs, _x.timestamp1.nsecs, _x.timestamp2.secs, _x.timestamp2.nsecs, _x.pixels_normalized.x, _x.pixels_normalized.y, _x.frequency))
_x = self.color
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_f.pack(self.confidence))
length = len(self.signal_ts)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.signal_ts.tostring())
length = len(self.signal)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.signal.tostring())
length = len(self.fft_fs)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.fft_fs.tostring())
length = len(self.fft)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.fft.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.timestamp1 is None:
self.timestamp1 = genpy.Time()
if self.timestamp2 is None:
self.timestamp2 = genpy.Time()
if self.pixels_normalized is None:
self.pixels_normalized = duckietown_msgs.msg.Vector2D()
end = 0
_x = self
start = end
end += 28
(_x.timestamp1.secs, _x.timestamp1.nsecs, _x.timestamp2.secs, _x.timestamp2.nsecs, _x.pixels_normalized.x, _x.pixels_normalized.y, _x.frequency,) = _struct_4I3f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.color = str[start:end].decode('utf-8')
else:
self.color = str[start:end]
start = end
end += 4
(self.confidence,) = _struct_f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.signal_ts = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.signal = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fft_fs = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fft = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
self.timestamp1.canon()
self.timestamp2.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4I3f = struct.Struct("<4I3f")
_struct_f = struct.Struct("<f")
|
# references
# http://tcl.tk/man/tcl8.6/TkCmd/pack.htm pack
# https://docs.python.org/3/library/tkinter.html#the-packer docs
# import tkinter
# from typing import Text
#tinkter.<class name> to import singly
#turtle
import turtle
tim = turtle.Turtle()
tim.write("shreya", font = ("Times New Roman", 80, "bold")) #arguments with default values
#tkinter
from tkinter import * # to import all the classes of tkinter
window = Tk()
window.title("my first GUI program")
window.minsize(width = 500, height = 300)
#label
my_label = Label(text = "I am label", font = ("Arial", 24, "bold")) #create component (L is capital )
my_label.pack(expand=True) #to display in center #call the method pack to print the above code
#change the label name
my_label["text"] = "New text"
my_label.config(text = "New Text")
#buttons
def button_clicked(): #event listners
print("I got clicked") #this message will appear in terminal
new_text = input.get() #get methods holds the input
my_label.config(text = new_text) #new_text prints the value of input to output
button = Button(text = "click me", command = button_clicked)
button.pack()
#entry
input = Entry(width = 20) #to get an input box
input.pack()
print(input.get()) #to get hold of the input value as a string and print it
button_clicked()
window.mainloop() |
#!/usr/bin/python3
import sys
import random
import argparse
COMMON_BG = "авгеиКкмМнопрсТтух"
COMMON_EN = "aBreuKkMMHonpcTTyx"
def voidscate(text, rev=False):
temp = ""
if rev:
this, that = COMMON_BG, COMMON_EN
else:
this, that = COMMON_EN, COMMON_BG
for letter in text:
if letter in list(this):
for i,j in enumerate(list(this)):
if j == letter:
letter = that[i]
temp += letter
return temp
def get_args(args):
'''
Helper function for parsing arguments
Usage:
args = get_args(sys.argv)
'''
old_argv = sys.argv
sys.argv = args
parser = argparse.ArgumentParser(
description = 'Welcome to translateVoid.py')
parser.add_argument('-s', '--string', dest = 'sentence',
help = 'Passing an escaped strings')
parser.add_argument('-c', '--clipboard', dest = 'clipboard', action='store_true',
help = 'Grab a string from the clipboard')
parser.add_argument('-r', '--reverse', dest = 'reversed', action='store_true',
help = 'Substitute characters from string based on cyrilic characters')
args = parser.parse_args()
sys.argv = old_argv
globals().update(vars(args))
return args
if __name__ == "__main__":
args = get_args(sys.argv)
if args.clipboard:
# Grab from clipbard
print('Clipboard plugin is not available right now')
sys.exit(1)
if args.sentence:
# Parse the string
result = voidscate(args.sentence, rev=args.reversed)
print(result)
pass |
import xadmin
from xadmin import views
from auth_server.models import MobileData, KeyStroke, Mouse
class BaseSettings:
enable_themes = True
use_bootswatch = True
class GlobalSettings:
site_title = '西安交大数据采集系统后台管理'
site_footer = '西安交通大学'
menu_style = 'accordion'
class MobileDataAdmin:
list_display = ['user_name', 'acc_with_gravitys', 'rot_rates', 'acc_datas', 'acc_angle', 'touch_data']
search_fields = ['user_name']
list_filter = ['user_name']
class KeyStrokeAdmin:
list_display = ['user_name', 'login_times_keydowns', 'login_times_keydowns_and_ups', 'password_times_keydowns',
'password_times_keydowns_and_ups']
search_fields = ['user_name']
list_filter = ['user_name']
class MouseAdmin:
list_display = ['user_name', 'cor_pos', 'timestamps']
search_fields = ['user_name']
list_filter = ['user_name']
xadmin.site.register(Mouse, MouseAdmin)
xadmin.site.register(KeyStroke,KeyStrokeAdmin)
xadmin.site.register(MobileData,MobileDataAdmin)
xadmin.site.register(views.BaseAdminView,BaseSettings)
xadmin.site.register(views.CommAdminView,GlobalSettings)
|
myl1 = [1,2,3,4]
print("Before reversing list elements are: " + str(myl1))
myl1.reverse()
print("After reversing list elements are: " + str(myl1))
|
# -*- coding: utf-8 -*-
"""
@summary: Maya cooperative python library
@run: import coop.coopLib as lib (suggested)
@license: MIT
@repository: https://github.com/artineering-io/maya-coop
"""
from __future__ import print_function
from __future__ import unicode_literals
import os, sys, subprocess, shutil, re, math, traceback, platform
from functools import wraps
import maya.mel as mel
import maya.cmds as cmds
from . import logger as clog
from . import list as clist
# python api 2.0
import maya.api.OpenMaya as om
def maya_useNewAPI():
pass
try:
basestring # Python 2
except NameError:
basestring = (str,) # Python 3
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
# Please follow google style docstrings!
"""
This is an example of Google style.
Args:
param1: This is the first param.
param2: This is a second param.
Returns:
This is a description of what is returned.
Raises:
KeyError: Raises an exception.
"""
LOG = clog.logger("coop.lib")
# _ _
# __| | ___ ___ ___ _ __ __ _| |_ ___ _ __ ___
# / _` |/ _ \/ __/ _ \| '__/ _` | __/ _ \| '__/ __|
# | (_| | __/ (_| (_) | | | (_| | || (_) | | \__ \
# \__,_|\___|\___\___/|_| \__,_|\__\___/|_| |___/
#
def timer(f):
"""
Decorator to time functions
Args:
f: function to be timed
Returns:
wrapped function with a timer
"""
@wraps(f) # timer = wraps(timer) | helps wrap the docstring of original function
def wrapper(*args, **kwargs):
import time
time_start = time.time()
try:
return f(*args, **kwargs)
except:
traceback.print_exc()
finally:
time_end = time.time()
LOG.debug("[Time elapsed at {0}: {1:.4f} sec]".format(f.__name__, time_end - time_start))
return wrapper
def undo(f):
"""
Puts the wrapped `func` into a single Maya Undo action
Args:
f: function to be undone
Returns:
wrapped function within an undo chunk
"""
@wraps(f)
def undo_wrapper(*args, **kwargs):
try:
# start an undo chunk
cmds.undoInfo(openChunk=True, cn="{0}".format(f))
return f(*args, **kwargs)
except:
traceback.print_exc()
finally:
# after calling the func, end the undo chunk
cmds.undoInfo(closeChunk=True, cn="{0}".format(f))
return undo_wrapper
def keep_selection(f):
"""
Saves and restores the selection after running a function
Args:
f: function to be addressed
Returns:
wrapped function where the change of selection doesn't matter
"""
@wraps(f)
def selection_wrapper(*args, **kwargs):
try:
selection = cmds.ls(sl=True, l=True)
return f(*args, **kwargs)
except:
traceback.print_exc()
finally:
# after calling the func, restore the previous selection
cmds.select(selection, r=True)
return selection_wrapper
######################################################################################
# GENERAL UTILITIES
######################################################################################
def get_host():
"""
Checks for host application of python environment
Returns:
(unicode): Host name (e.g., Maya, Blender)
"""
host = os.path.basename(sys.executable)
if "maya" in host:
return "Maya"
if "blender" in host:
return "Blender"
def get_py_version(version=0):
"""
Checks for the version of python currently running
Args:
version (float): desired version to check with
Returns:
Python version (unicode): The version currently running if no version is supplied
Version check (bool): True if current version is higher than the given version. False if not
"""
v = platform.python_version()
f_v = float(v[:v.rindex('.')])
if not version:
return f_v
if f_v >= float(version):
return True
return False
def get_maya_version():
"""
Returns the current Maya version (E.g. 2017.0, 2018.0, 2019.0, etc)
"""
return mel.eval("getApplicationVersionAsFloat")
def get_local_os():
"""
Returns the operating system (OS) of the local machine
Returns:
(unicode): Either "win", "mac" or "linux"
"""
if cmds.about(mac=True):
return "mac"
elif cmds.about(linux=True):
return "linux"
return "win"
def get_os_separator():
"""
Returns the path separator for the operating system (OS) on the local machine
Returns:
(unicode): Either ':' or ';'
"""
sep = ':' # separator
if get_local_os() == "win":
sep = ';'
return sep
def plugin_ext():
"""
Returns the plugin extension depending on the local operating system
Returns:
(unicode): Either "mll", "bundle" or "so"
"""
extensions = {"win": "mll", "mac": "bundle", "linux": "so"}
return extensions[get_local_os()]
def get_env_dir():
"""
Gets the environment directory
Returns:
directory (unicode): the directory of the Maya.env file
"""
env_dir = os.path.abspath(cmds.about(env=True, q=True))
return os.path.dirname(env_dir)
def get_module_path(module):
"""
Gets the path of a Maya module, if it exists
Args:
module (unicode): Name of the module
Returns:
(unicode): Path to the module (empty if it doesn't exist)
"""
try:
return cmds.moduleInfo(path=True, moduleName=module)
except RuntimeError:
return ""
def get_lib_dir():
"""
Gets the coop library directory
Returns:
directory (unicode): the directory where the coopLib is found at
"""
return Path(__file__).parent().path
def open_url(url):
"""
Opens the url in the default browser
Args:
url (unicode): The URL to open
"""
import webbrowser
webbrowser.open(url, new=2, autoraise=True)
def downloader(url, dest):
"""
Downloads a file at the specified url to the destination
Args:
url: URL of file to download
dest: destination of downloaded file
Returns:
bool: True if succeeded, False if failed
"""
import urllib
dwn = urllib.URLopener()
try:
dwn.retrieve(url, dest)
except:
traceback.print_exc()
return False
return True
def restart_maya(brute=True):
"""
Restarts maya (CAUTION)
Args:
brute (bool): True if the Maya process should stop, False if Maya should be exited normally
"""
if not brute:
maya_py_dir = Path(sys.executable).parent().child("mayapy.exe")
script_dir = Path(__file__).parent().child("coopRestart.py")
subprocess.Popen([maya_py_dir.path, script_dir.path])
cmds.quit(force=True)
else:
os.execl(sys.executable, sys.executable, *sys.argv)
def run_cmd(cmd, cwd):
"""
Run a command in a separate shell and print its results
Args:
cmd (unicode): Command to run in a shell
cwd (unicode): Current working directory (path where command will be executed from)
"""
print("> {}".format(cmd))
process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, error) = process.communicate()
if output:
print("Output: {}".format(output))
if error:
print("Error: {}".format(error))
def run_python_as_admin(py_cmd, close=True):
"""
Runs a python command in a separate interactive python shell with admin rights prompt
Args:
py_cmd (unicode): One liner python command
close (bool): Close python shell if no errors occurred
"""
import ctypes
py_cmd = py_cmd.rstrip()
if close:
if py_cmd[-1] != ';':
py_cmd += ';'
py_cmd += " import os; os.kill(os.getpid(), 9);"
mayapy = Path(sys.executable).parent().child("mayapy.exe").path
ctypes.windll.shell32.ShellExecuteW(None, "runas", mayapy,
subprocess.list2cmdline([str("-i"), str("-c"), py_cmd]), None, 1)
def dialog_restart(brute=True):
"""
Opens restart dialog to restart maya
Args:
brute (bool): True if the Maya process should stop, False if Maya should be exited normally
"""
restart = cmds.confirmDialog(title='Restart Maya',
message='Maya needs to be restarted in order to show changes\n'
'Would you like to restart maya now?',
button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No',
ma='center')
if restart == 'Yes':
restart_maya(brute)
def dialog_save(starting_directory="", title="Save as...", file_filter="All Files (*.*)"):
"""
Simple save dialog in Maya
Args:
starting_directory (unicode): Starting directory. (default: project root directory)
title (unicode): Dialog title. (default: "Save as...")
file_filter (unicode): File filter. (default: "All Files (*.*)")
Returns:
(unicode): Path to save as
"""
if not starting_directory:
starting_directory = cmds.workspace(rd=True, q=True)
save_path = cmds.fileDialog2(fileFilter=file_filter, fileMode=0,
startingDirectory=starting_directory,
cap=title, dialogStyle=2)
if not save_path:
display_error("Filepath not specified", True)
return ""
return save_path[0]
def dialog_open(starting_directory="", title="Open file", file_filter="All Files (*.*)"):
"""
Simple open dialog in Maya
Args:
starting_directory (unicode): Starting directory. (default: project root directory)
title (unicode): Dialog title. (default: "Open file")
file_filter (unicode): File filter. (default: "All Files (*.*)")
Returns:
(unicode): Path to open
"""
if not starting_directory:
starting_directory = cmds.workspace(rd=True, q=True)
open_path = cmds.fileDialog2(fileFilter=file_filter, fileMode=1,
startingDirectory=starting_directory,
cap=title, dialogStyle=2)
if not open_path:
display_error("No path specified", True)
return open_path[0]
def dialog_select_dir(starting_directory="", title="Open file"):
"""
Simple browse and select dir dialog
Args:
starting_directory (unicode): Starting directory. (default: project root directory)
title (unicode): Dialog title. (default: "Select folder")
Returns:
(unicode): Path to selected directory
"""
if not starting_directory:
starting_directory = cmds.workspace(rd=True, q=True)
selected_dir = cmds.fileDialog2(dir=starting_directory,
fileMode=3, cap=title,
dialogStyle=2)
if not selected_dir:
display_error("No directory specified", True)
return selected_dir[0]
def dialog_yes_no(title="Confirmation", message="Are you sure?", icon=""):
"""
Simple Yes/No confirmation dialog
Args:
title (unicode): Title of the dialog (default: Confirmation)
message (unicode): Dialog message (default: "Are you sure?")
icon (unicode): "question", "information", "warning" or "critical" (default: "")
Returns:
(unicode): "Yes" or "No"
"""
confirm = cmds.confirmDialog(title=title, message=message,
button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No',
ma='center', icon=icon)
if confirm == "Yes":
return True
return False
######################################################################################
# MAYA UTILITIES
######################################################################################
def create_empty_node(input_name):
"""
Creates a completely empty node
Args:
input_name (unicode): Name of the new empty node
"""
cmds.select(cl=True)
cmds.group(em=True, name=input_name)
node_name = cmds.ls(sl=True)
keyable_attributes = cmds.listAttr(node_name, k=True)
for attribute in keyable_attributes:
cmds.setAttr('{0}.{1}'.format(node_name[0], attribute), l=True, k=False)
def get_node_data(node_name, settable=True, quiet=False):
"""
Returns the node data in a dictionary
Args:
node_name (unicode): Name of the node to get data from
settable (bool): Only the data that can be set (default: bool)
quiet (bool):
Returns:
Dictionary containing a dictionary with attribute: value
"""
data = dict()
node_attrs = cmds.listAttr(node_name, settable=settable)
for attr in node_attrs:
try:
if cmds.attributeQuery(attr, node=node_name, attributeType=True) != "compound":
data[attr] = cmds.getAttr("{}.{}".format(node_name, attr))
else:
for sub_attr in cmds.attributeQuery(attr, node=node_name, listChildren=True):
data[sub_attr] = cmds.getAttr("{}.{}".format(node_name, sub_attr))
except RuntimeError as err:
if not quiet:
print("get_node_data() -> Couldn't get {}.{} because of: {}".format(node_name, attr, err))
return data
def set_node_data(node_name, node_data):
"""
Sets the node data contained in a dictionary
Args:
node_name (unicode): Name of the node to set data to
node_data (dict): Dictionary of node data {attribute: value}
"""
for attr in node_data:
set_attr(node_name, attr, node_data[attr])
def purge_missing(objects):
"""
Deletes non-existing objects within a list of objects
Args:
objects []: List of objects to check
Returns:
List of existing objects
"""
objs = []
for obj in objects:
if isinstance(obj, list) or isinstance(obj, tuple):
objs.extend(purge_missing(obj))
else:
if cmds.objExists(obj):
objs.append(obj)
return objs
def get_active_model_panel():
"""
Get the active model editor panel
Returns:
modelPanel name (unicode)
"""
active_panel = cmds.getPanel(wf=True)
if cmds.getPanel(typeOf=active_panel) == 'modelPanel':
return active_panel
else:
return cmds.playblast(ae=True)
def get_viewport_render_api():
"""
Returns:
(unicode): Either DirectX11, OpenGL or Legacy depending on the active API
"""
engine = mel.eval("getPreferredRenderingEngine")
if engine.startswith("DirectX11"):
return "DirectX11"
elif engine == "OpenGL":
return "Legacy"
return "OpenGL"
def detach_shelf():
"""
Detaches the current shelves
"""
shelf_top_level = mel.eval('$tempMelVar=$gShelfTopLevel')
shelf_name = cmds.shelfTabLayout(shelf_top_level, st=True, q=True)
shelf_paths = os.path.abspath(cmds.internalVar(ush=True)).split(';')
shelf_file = "shelf_{0}.mel".format(shelf_name)
# find path of shelf
shelf_file_path = ""
for shelfPath in shelf_paths:
files = os.listdir(shelfPath)
if shelf_file in files:
shelf_file_path = os.path.join(shelfPath, shelf_file)
if not shelf_file_path:
display_error("Can't detach shelf, try closing Maya with the shelf open and try again")
return
# read mel shelf file
with open(shelf_file_path, 'r') as shelf_file:
text = shelf_file.read()
# build new mel command
mel_commands = ""
buttons = 0
lines = [line for line in text.splitlines() if line] # get rid of empty lines
for line in lines:
if line.strip() == "shelfButton":
buttons += 1
if buttons > 0:
mel_commands += line
mel_commands = mel_commands[:-2]
# check if window doesn't already exist
window_title = "{} popup shelf".format(shelf_name)
window_name = "{}_popup_shelf".format(shelf_name)
if cmds.window(window_name, exists=True):
cmds.showWindow(window_name)
return
# make a window, give it a layout, then make a model editor
window = cmds.window(window_title, w=800, h=46, rtf=True)
cmds.shelfLayout(spa=5)
mel.eval(mel_commands)
cmds.setParent('..')
# show window
cmds.window(window, w=46 * buttons, e=True)
cmds.showWindow(window)
def get_shapes(objects, renderable=False, l=False, quiet=False):
"""
Get shapes of objects/components
Args:
objects (list, unicode): List of objects or components
renderable (bool): If shape needs to be renderable
l (bool): If full path is desired or not
quiet (bool): If command should print errors or not
Returns:
(list): List of shapes
"""
passed_objs = clist.flatten_list(objects)
# convert any passed components and avoid duplicates (keeping the order)
objs = list()
for obj in passed_objs:
if is_string(obj):
obj = obj.split(".")[0]
if obj not in objs:
objs.append(obj)
else:
print_error("Passing non-string values to get_shapes(): {}".format(objects))
if not objs and not quiet:
print_error("No mesh or component to extract the shape from: {}".format(objects))
return []
objs = purge_missing(objs) # make sure all objects exist
shapes = []
for obj in objs:
potential_shape = cmds.ls(obj, shapes=True, l=l)
if not potential_shape:
potential_shape = cmds.listRelatives(obj, shapes=True, noIntermediate=True, path=True, fullPath=l) or []
# check if renderable
if renderable and potential_shape:
if not is_renderable(potential_shape[0]):
continue
# add potential shape to list
shapes.extend(potential_shape)
if not shapes and not quiet:
print_warning("No shape nodes found in {0}".format(objects))
return shapes
def get_view_camera(shape=False):
"""
Get the view camera transform or shape.
Maya is quite inconsistent on what cmds.lookThru() returns, ergo this function
Args:
shape (bool): If the shape should be returned instead of the transform
Returns:
(unicode): Transform or shape of the current view camera
"""
camera = cmds.lookThru(q=True)
if shape:
if not cmds.ls(camera, shapes=True):
camera = get_shapes(camera)
elif cmds.ls(camera, shapes=True):
camera = get_transform(camera)
return u_stringify(camera)
def is_renderable(obj, quiet=True):
"""
Checks if object is renderable
Args:
obj (unicode): Name of object to verify
quiet (bool): If the function should keep quiet (default=True)
Returns:
(bool) if its renderable or not
"""
# unit test
# make sure we are not working with components/attributes
obj = cmds.ls(obj, objectsOnly=True, l=True)
if isinstance(obj, list) or isinstance(obj, tuple):
if len(obj) == 1:
obj = obj[0]
else:
LOG.error("isRenderable - {0} cannot be checked".format(obj))
return False
if not cmds.objExists(obj):
if not quiet:
LOG.error("{0} does not exist, skipping it".format(obj))
return False
# doIt
if cmds.getAttr("{0}.template".format(obj)):
if not quiet:
LOG.error("{0} is a template object, skipping it".format(obj))
return False
if not cmds.getAttr("{0}.visibility".format(obj)):
# Let's check if it has any in-connection (its animated)
if not cmds.listConnections("{0}.visibility".format(obj)):
if not quiet:
LOG.error("{0} is not visible, skipping it".format(obj))
return False
if not cmds.getAttr("{0}.lodVisibility".format(obj)):
# Let's check if it has any in-connection (its animated)
if not cmds.listConnections("{0}.lodVisibility".format(obj)):
if not quiet:
LOG.error("{0} has no lodVisibility, skipping it".format(obj))
return False
# TODO Display layer override check
renderable = True
# check parents
parent = cmds.listRelatives(obj, parent=True, path=True)
if parent:
renderable = renderable and is_renderable(parent[0])
return renderable
def get_transforms(objects, full_path=False):
"""
Get transform nodes of objects
Args:
objects (list): List of objects
full_path (bool): If full path or not
Returns:
List of transform nodes
"""
transforms = []
for node in objects:
transforms.append(get_transform(node, full_path))
return transforms
def get_transform(node, full_path=False):
"""
Get transform node of object
Args:
node (unicode): Name of node
full_path (bool): If full path or not
Returns:
Name of transform node
"""
if 'transform' != cmds.nodeType(node):
if is_component(node):
return get_transform(get_shapes(node, l=True)[0], full_path)
try:
return cmds.listRelatives(node, type='transform', fullPath=full_path, parent=True)[0]
except TypeError:
cmds.warning("{} doesn't have a transform".format(node))
return ""
else:
return node
def copy_attributes(attributes):
"""
Batch copy attributes of first selected object to the rest of selected objects:
e.g. lib.copyAttributes(['jointOrientX', 'jointOrientY', 'jointOrientZ'])
Args:
attributes (list): List of attributes (unicode)
"""
selected = cmds.ls(sl=True)
if selected:
source = selected.pop(0)
for attribute in attributes:
source_value = cmds.getAttr("{0}.{1}".format(source, attribute))
for target in selected:
set_attr(target, attribute, source_value)
def split_node_attr(node_attr):
"""
Split a node.attr in their individual elements
Args:
node_attr (unicode): A node with its attributes
Returns:
(unicode, unicode): The node and the attribute separated
"""
u_stringify(node_attr)
if node_attr:
split_idx = node_attr.find('.')
node = node_attr[:split_idx]
attr = node_attr[split_idx + 1:]
return node, attr
else:
print_error("'{}' could not be split into node and attribute".format(node_attr), True)
def set_attr(obj, attr, value, silent=False):
"""
Generic setAttr convenience function which changes the Maya command depending on the data type
Args:
obj (unicode): node
attr (unicode): attribute
value (any): the value to set
silent (bool): if the function is silent when errors occur
"""
try:
if isinstance(value, basestring):
cmds.setAttr("{0}.{1}".format(obj, attr), value, type="string")
elif isinstance(value, list) or isinstance(value, tuple):
if len(value) == 3:
cmds.setAttr("{0}.{1}".format(obj, attr), value[0], value[1], value[2], type="double3")
elif len(value) == 2:
cmds.setAttr("{0}.{1}".format(obj, attr), value[0], value[1], type="double2")
elif len(value) == 1:
# check for list within a list generated by getAttr command
if isinstance(value[0], list) or isinstance(value[0], tuple):
set_attr(obj, attr, value[0])
return
cmds.setAttr("{0}.{1}".format(obj, attr), value[0])
elif cmds.attributeQuery(attr, node=obj, attributeType=True) == "compound":
idx = 0
for sub_attr in cmds.attributeQuery(attr, node=obj, listChildren=True):
set_attr(obj, sub_attr, value[idx])
idx += 1
else:
cmds.setAttr("{0}.{1}".format(obj, attr), tuple(value), type="doubleArray")
else:
cmds.setAttr("{0}.{1}".format(obj, attr), value)
return True
except RuntimeError:
if not silent:
cmds.warning("{0}.{1} could not be set to {2}.".format(obj, attr, value))
LOG.debug("Attribute of type: {0}.".format(type(value)))
return False
def set_attrs(objs, attr_data, specific_attrs=None, silent=False):
"""
Sets attributes of attr_data to obj
Args:
objs (unicode, list): Objects to set attributes onto
attr_data (dict): Dictionary of attribute data { "attribute_name": value }
specific_attrs (list): Specific attributes to set attr_data onto (if None, all attr_data)
silent (bool): If warnings should be shown when attribute data could not be assigned
"""
objs = u_enlist(objs)
for obj in objs:
if specific_attrs:
for attr in specific_attrs:
set_attr(obj, attr, attr_data[attr], silent)
else:
for attr in attr_data:
set_attr(obj, attr, attr_data[attr], silent)
def break_connections(objs, attrs, delete_inputs=False):
"""
Breaks all connections to specific attributes within objs
Args:
objs (unicode, list): Object which has connections
attrs (unicode, list): Attribute to disconnect
delete_inputs (bool): If the any remaining input nodes should be deleted
"""
objs = u_enlist(objs)
attrs = u_enlist(attrs)
for obj in objs:
for attr in attrs:
source = "{}.{}".format(obj, attr)
plugs = cmds.listConnections(source, p=True) or []
for plug in plugs:
if cmds.listConnections(source, s=True, d=False) is None:
cmds.disconnectAttr(source, plug)
else:
# source is a 'destination' (right side of connection)
if delete_inputs:
cmds.delete(source, inputConnectionsAndNodes=True)
else:
cmds.disconnectAttr(plug, source)
def disconnect_attrs(source, source_attr, dest, dest_attr):
"""
Checks and disconnects source attribute from destination attribute, if possible
Args:
source (unicode): Source object
source_attr (unicode): Source attribute
dest (unicode): Destination object
dest_attr (unicode): Destination attribute
"""
if cmds.isConnected("{}.{}".format(source, source_attr), "{}.{}".format(dest, dest_attr)):
cmds.disconnectAttr("{}.{}".format(source, source_attr), "{}.{}".format(dest, dest_attr))
def get_next_free_multi_index(node, attr, idx=0):
"""
Find the next unconnected multi index starting at the passed index
Args:
node (unicode): node to search in
attr (unicode): attribute to search in
idx (int): starting index to search from
Returns:
The next free index
"""
while idx < 10000000: # assume a max of 10 million connections
if len(cmds.listConnections('{0}.{1}[{2}]'.format(node, attr, idx)) or []) == 0:
return idx
idx += 1
# No connections means the first index is available
return 0
def get_next_free_multi_index_considering_children(node, attr, idx=0):
"""
Find the next unconnected multi index, considering children attributes, starting at the passed index
Args:
node (unicode): node to search in
attr (unicode): attribute to search in
idx (int): starting index to search from
Returns:
The next free index
"""
while idx < 10000000: # assume a max of 10 million connections
if len(cmds.listConnections('{0}.{1}[{2}]'.format(node, attr, idx)) or []) == 0:
free = True
child_attrs = cmds.attributeQuery(attr, n=node, listChildren=True) or []
for childAttr in child_attrs:
if cmds.attributeQuery(childAttr, n="{0}.{1}".format(node, attr), multi=True):
if get_next_free_multi_index_considering_children("{0}.{1}[{2}]".format(node, attr, idx),
childAttr) > 0:
free = False
break
if free:
return idx
idx += 1
# No connections means the first index is available
return 0
def get_common_objs(objs1, objs2):
"""
Get common objects (intersection) between obj lists
Note: This does NOT keep the order of the elements in the lists
Args:
objs1 (list): List of objects (short or long names)
objs2 (list): List of objects (short or long names)
Returns:
(list): list of common objects (long names) between both lists
"""
objs1 = set(cmds.ls(objs1, l=True))
objs2 = cmds.ls(objs2, l=True)
return list(objs1.intersection(objs2))
def distance_between(obj1, obj2):
"""
Distance between objects
Args:
obj1 (unicode): object 1
obj2 (unicode): object 2
Returns:
Distance between the objects (in world space)
"""
v1_world = cmds.xform('{0}'.format(obj1), q=True, worldSpace=True, piv=True) # list with 6 elements
v2_world = cmds.xform('{0}'.format(obj2), q=True, worldSpace=True, piv=True) # list with 6 elements
return distance(v1_world, v2_world)
def snap(source='', targets=None, snap_type="translation"):
"""
Snap targets objects to source object
If not specified, the first selected object is considered as source, the rest as targets
Args:
source (unicode): Source transform name
targets (list): List of target transform names (unicode)
snap_type: Either "translation" (default), "rotation" or "position" (translation + rotation)
Note:
Targets should not have their transformations frozen
"""
# check if there are source and targets defined/selected
if not source:
selected = cmds.ls(sl=True)
if selected:
source = selected.pop(0)
if not targets:
targets = selected
else:
cmds.error("No source specified or selected.")
if not targets:
targets = cmds.ls(sl=True)
else:
if isinstance(targets, basestring):
targets = [targets]
if not targets:
cmds.error("No targets to snap defined or selected")
# using xform brings pr
# proceed to snap
if snap_type == "translation":
world_translate_xform = cmds.xform('{0}'.format(source), q=True, worldSpace=True,
piv=True) # list with 6 elements
for target in targets:
cmds.xform('{0}'.format(target), worldSpace=True,
t=(world_translate_xform[0], world_translate_xform[1], world_translate_xform[2]))
print_info("Translation snapped")
if snap_type == "rotation":
source_xform = cmds.xform('{0}'.format(source), q=True, worldSpace=True, ro=True)
for target in targets:
cmds.xform('{0}'.format(target), worldSpace=True, ro=(source_xform[0], source_xform[1], source_xform[2]))
print_info("Rotation snapped")
if snap_type == "position":
source_pos = cmds.xform('{0}'.format(source), q=True, worldSpace=True, piv=True) # list with 6 elements
source_rot = cmds.xform('{0}'.format(source), q=True, worldSpace=True, ro=True)
for target in targets:
cmds.xform('{0}'.format(target), worldSpace=True, t=(source_pos[0], source_pos[1], source_pos[2]))
cmds.xform('{0}'.format(target), worldSpace=True, ro=(source_rot[0], source_rot[1], source_rot[2]))
print_info("Position snapped")
######################################################################################
# RENDERING UTILITIES
######################################################################################
IMGFORMATS = {'.jpg': 8, '.png': 32, '.tif': 3, '.exr': 40, '.iff': 7}
IMGFORMATS_ORDER = ['.png', '.jpg', '.exr', '.tif', '.iff']
QUALITIES_ORDER = {'Standard', 'FXAA', '4x SSAA', 'TAA'}
def is_component(obj):
"""
Check if an object is a component or not
Args:
obj (unicode): Object name to check if its a component
Returns:
(bool): True if it is a component
"""
if "." in obj:
return True
return False
def change_texture_path(path):
"""
Change all texture paths
Useful when moving around the textures of the scene
Args:
path (string): Relative path from project (e.g. "sourceimages/house")
"""
all_file_nodes = cmds.ls(et='file')
for node in all_file_nodes:
file_path = cmds.getAttr("{0}.fileTextureName".format(node))
file_name = os.path.basename(file_path)
cmds.setAttr("{0}.fileTextureName".format(node), "{0}/{1}".format(path, file_name), type='string')
def screenshot(file_dir, width, height, img_format=".jpg", override="", ogs=True):
# check if fileDir has image format
if img_format not in file_dir:
file_dir += img_format
# get existing values
prev_format = cmds.getAttr("defaultRenderGlobals.imageFormat")
prev_override = cmds.getAttr("hardwareRenderingGlobals.renderOverrideName")
# set render settings
cmds.setAttr("defaultRenderGlobals.imageFormat", IMGFORMATS[img_format])
if override:
cmds.setAttr("hardwareRenderingGlobals.renderOverrideName", override, type="string")
if ogs:
# render viewport
rendered_dir = cmds.ogsRender(cv=True, w=width, h=height) # render the frame
shutil.move(os.path.abspath(rendered_dir), os.path.abspath(file_dir)) # move to specified dir
else:
frame = cmds.currentTime(q=True)
cmds.playblast(cf=file_dir, fo=True, fmt='image', w=width, h=height,
st=frame, et=frame, v=False, os=True)
# bring everything back to normal
cmds.setAttr("defaultRenderGlobals.imageFormat", prev_format)
cmds.setAttr("hardwareRenderingGlobals.renderOverrideName", prev_override, type="string")
print_info("Image saved successfully in {0}".format(file_dir))
return file_dir
# __ __ _ ____ ___ ____ ___
# | \/ | __ _ _ _ __ _ / \ | _ \_ _| |___ \ / _ \
# | |\/| |/ _` | | | |/ _` | / _ \ | |_) | | __) || | | |
# | | | | (_| | |_| | (_| | / ___ \| __/| | / __/ | |_| |
# |_| |_|\__,_|\__, |\__,_| /_/ \_\_| |___| |_____(_)___/
# |___/
def get_m_object(node, get_type=False):
"""
Gets mObject of a node (Python API 2.0)
Args:
node (unicode): Name of node
get_type (bool): If the api type should be returned
Returns:
(MObject, unicode): The MObject of the node or its API type
"""
selection_list = om.MSelectionList()
selection_list.add(node)
o_node = selection_list.getDependNode(0)
if not get_type:
return o_node
else:
return o_node.apiTypeStr
def print_info(info):
"""
Prints the information statement in the command response (to the right of the command line)
Args:
info (unicode): Information to be displayed
"""
om.MGlobal.displayInfo(info)
def display_info(info):
"""
Displays the information on the viewport
Prints the information statement in the command response (to the right of the command line)
Args:
info (unicode): Information to be displayed
"""
if get_maya_version() > 2018 and not cmds.about(batch=True):
m = '<span style="color:#82C99A;">{}</span>'.format(info)
cmds.inViewMessage(msg=m, pos="midCenter", fade=True)
print_info(info)
def print_warning(warning):
"""
Prints the warning statement in the command response (to the right of the command line)
Args:
warning (unicode): Warning to be displayed
"""
om.MGlobal.displayWarning(warning)
def display_warning(warning):
"""
Displays a warning on the viewport
Prints the warning statement in the command response (to the right of the command line)
Args:
warning (unicode): Warning to be displayed
"""
if get_maya_version() > 2018 and not cmds.about(batch=True):
m = '<span style="color:#F4FA58;">Warning: </span><span style="color:#DDD">{}</span>'.format(warning)
cmds.inViewMessage(msg=m, pos="midCenter", fade=True)
print_warning(warning)
def print_error(error, show_traceback=False):
"""
Prints the error statement in the command response (to the right of the command line)
Args:
error (unicode): Error to be displayed
show_traceback (bool): If the error should stop the execution and show a traceback
"""
if not show_traceback:
om.MGlobal.displayError(error)
else:
cmds.evalDeferred(lambda: print_error(error))
raise RuntimeError(error)
def display_error(error, show_traceback=False):
"""
Displays an error on the viewport
Prints the error statement in the command response (to the right of the command line)
Args:
error (unicode): Error to be displayed
show_traceback (bool): If python should error our and show a traceback
"""
if get_maya_version() > 2018 and not cmds.about(batch=True):
m = '<span style="color:#F05A5A;">Error: </span><span style="color:#DDD">{}</span>'.format(error)
cmds.inViewMessage(msg=m, pos="midCenterBot", fade=True)
print_error(error, show_traceback)
# _ _
# _ __ __ _| |_| |__
# | '_ \ / _` | __| '_ \
# | |_) | (_| | |_| | | |
# | .__/ \__,_|\__|_| |_|
# |_|
class Path(object):
def __init__(self, path):
if isinstance(path, str):
self.path = u_decode(path)
elif get_py_version() < 3:
if isinstance(path, unicode):
self.path = path
else:
print_error("{} is not a string".format(path), True)
def parent(self):
"""
Navigates to the parent of the path
"""
self.path = os.path.abspath(os.path.join(self.path, os.pardir))
return self
def child(self, child):
"""
Joins the child to the path
Args:
child: folder to join to the path
"""
self.path = os.path.abspath(os.path.join(self.path, u_stringify(child)))
return self
def create_dir(self):
"""
Creates the directory of the path, if it doesn't exist already
"""
if not os.path.exists(self.path):
os.makedirs(self.path)
return self
def delete(self):
"""
Deletes all contents of current Path
Returns:
Path (obj): parent of current path
"""
if self.exists():
import shutil
try:
shutil.rmtree(self.path)
except:
# it is most likely a file
os.remove(self.path)
return self
def basename(self):
"""
Returns the basename of the path
"""
return os.path.basename(self.path)
def exists(self):
"""
Returns if current path exists or not
Returns:
bool
"""
return os.path.exists(self.path)
def swap_extension(self, new_extension):
"""
Swaps the current file extension, if available
Returns:
Path (obj): modified path obj
"""
self.path, ext = os.path.splitext(self.path)
self.path += new_extension
return self
def slash_path(self):
"""
Returns the path with forward slashes
Returns:
path (unicode): path with forward slashes
"""
return self.path.replace(os.sep, '/')
def list_dir(self):
"""
List everything in a directory - files and directories.
Returns:
(list): list with everything in the directory
"""
contents = []
try:
contents = os.listdir(self.path)
except WindowsError:
traceback.print_exc()
return contents
def find_all(self, filename, relative=True):
"""
Finds the filename and lists its locations in the Path.
Returns:
(list): list with everything in the directory
"""
found = []
for root, dirs, files in os.walk(self.path):
if filename in files:
found.append(os.path.join(root, filename))
if relative:
return [os.path.relpath(f, self.path) for f in found]
else:
return found
def file_size(self):
"""
Returns the file size of the path in MB
Returns:
(float): Size of the file in MB
"""
if self.exists():
return os.path.getsize(self.path) / 1024.0 / 1024.0
return 0
# _ _
# ___| |_ _ __(_)_ __ __ _
# / __| __| '__| | '_ \ / _` |
# \__ \ |_| | | | | | | (_| |
# |___/\__|_| |_|_| |_|\__, |
# |___/
def is_string(v):
"""
Returns if a variable is a string
Args:
v (unicode): variable to check if it's a string
Returns:
(bool): If variable is a string
"""
return isinstance(v, basestring) # basestring is 'str' in Python 3
def to_camel_case(text, split=" "):
"""
Converts text to camel case, e.g. ("the camel is huge" => "theCamelIsHuge")
Args:
text (string): Text to be camel-cased
split (char): Char to split text into
"""
camel_case_text = text
splitter = text.split(split)
if splitter:
camel_case_text = splitter[0][0].lower()
if len(splitter[0]) > 1:
camel_case_text += splitter[0][1:]
for index in range(1, len(splitter)):
camel_case_text += splitter[index].capitalize()
return camel_case_text
def de_camelize(text):
"""
Converts camel case to normal case, e.g. ("theCamelIsHuge" => "the camel is huge")
Args:
text (string): Text to be decamelized
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', text)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1).title()
def to_pascal_case(text, split=" "):
"""
Converts text to pascal case, e.g. ("the camel is huge" => "TheCamelIsHuge")
Args:
text (unicode): Text to be pascal-cased
split (char): Char to split text into
Returns:
(unicode) string as PascalCase
"""
pascal_case_text = ""
text = to_camel_case(text, split)
if text:
pascal_case_text = text[0].upper()
if len(text) > 1:
pascal_case_text += text[1:]
return pascal_case_text
# _ _
# _ __ ___ __ _| |_| |__
# | '_ ` _ \ / _` | __| '_ \
# | | | | | | (_| | |_| | | |
# |_| |_| |_|\__,_|\__|_| |_|
#
def clamp(min_v, max_v, value):
"""
Clamps a value between a min and max value
Args:
min_v: Minimum value
max_v: Maximum value
value: Value to be clamped
Returns:
Returns the clamped value
"""
return min_v if value < min_v else max_v if value > max_v else value
def lerp(value1=0.0, value2=1.0, parameter=0.5):
"""
Linearly interpolates between value1 and value2 according to a parameter.
Args:
value1: First interpolation value
value2: Second interpolation value
parameter: Parameter of interpolation
Returns:
The linear intepolation between value 1 and value 2
"""
return value1 + parameter * (value2 - value1)
def saturate(value):
"""
Saturates the value between 0 and 1
Args:
value: Value to be saturated
Returns:
The saturated value between 0 and 1
"""
return clamp(0.0, 1.0, value)
def linstep(min_v=0.0, max_v=1.0, value=0.5):
"""
Linear step function
Args:
min_v: minimum value
max_v: maximum value
value: value to calculate the step in
Returns:
The percentage [between 0 and 1] of the distance between min and max (e.g. linstep(1, 3, 2.5) -> 0.75).
"""
return saturate((value - min_v) / (max_v - min_v))
def distance(v1, v2):
"""
Distance between vectors v1 and v2
Args:
v1 (list): vector 1
v2 (list): vector 2
Returns:
Distance between the vectors
"""
v1_v2 = [v2[0] - v1[0], v2[1] - v1[1], v2[2] - v1[2]]
return math.sqrt(v1_v2[0] * v1_v2[0] + v1_v2[1] * v1_v2[1] + v1_v2[2] * v1_v2[2])
def remap(value, old_min, old_max, new_min, new_max):
"""
Remaps the value to a new min and max value
Args:
value: value to remap
old_min: old min of range
old_max: old max of range
new_min: new min of range
new_max: new max of range
Returns:
The remapped value in the new range
"""
return new_min + (((value - old_min) / (old_max - old_min)) * (new_max - new_min))
#
# _ __ __ _ _ __ ___ ___ ___ _ __ __ _ ___ ___ ___
# | '_ \ / _` | '_ ` _ \ / _ \/ __| '_ \ / _` |/ __/ _ \/ __|
# | | | | (_| | | | | | | __/\__ \ |_) | (_| | (_| __/\__ \
# |_| |_|\__,_|_| |_| |_|\___||___/ .__/ \__,_|\___\___||___/
# |_|
def get_namespaces(objects=None):
"""
Get a list of all namespaces within objects or of the entire scene
Args:
objects (unicode, list): List of objects to get namespaces from
Returns:
List of namespaces
"""
# if no objects were specified
if not objects:
# get all namespaces in the scene
namespaces = cmds.namespaceInfo(listOnlyNamespaces=True, recurse=True)
try:
namespaces.remove('UI')
except ValueError:
pass
try:
namespaces.remove('shared')
except ValueError:
pass
else:
# in case a string is passed as argument, enlist it
if isinstance(objects, basestring):
objects = [objects]
# get namespaces of objects
namespaces = []
for obj in objects:
namespace = obj.rpartition(':')[0]
if namespace:
namespaces.append(namespace)
namespaces = set(namespaces) # make sure only one entry in list
return list(namespaces)
def change_namespace(object_name, change_dict):
"""
Changes the namespaces of the object
Args:
object_name (unicode): Name to change namespace
change_dict (dict): Dictionary of keys {str), values (unicode) to change namespaces to (key->value)
Returns:
String with the namespaces changed
"""
namespace = get_namespaces(object_name)
if namespace:
namespace = namespace[0] + ":"
if namespace in change_dict:
object_name = object_name.replace(namespace, change_dict[namespace])
return object_name
def remove_namespace_from_string(object_name):
"""
Removes the namespace from string
Args:
object_name (unicode): Object name to remove namespace from
Returns:
String: New name without namespaces
"""
if len(object_name) == 1:
object_name = object_name[0]
if isinstance(object_name, basestring):
parts = object_name.split(':')
if len(parts) > 1:
return parts[-1]
return parts[-1]
else:
print_warning("No string found in {0}".format(object_name))
return ""
# _ _ _ _
# _ _ _ __ (_) |_ | |_ ___ ___| |_ ___
# | | | | '_ \| | __| | __/ _ \/ __| __/ __|
# | |_| | | | | | |_ | || __/\__ \ |_\__ \
# \__,_|_| |_|_|\__| \__\___||___/\__|___/
#
def u_enlist(arg, silent=True):
"""
Unit test to check if given argument is not a list
Args:
arg: argument to put into a list
silent (bool): If the function should print warnings if the wrong data was given (default=False)
Returns:
List: The argument in a list
"""
if isinstance(arg, basestring):
if not silent:
LOG.info("{0} is a string, enlisting it".format(arg))
return [arg]
elif isinstance(arg, int):
if not silent:
LOG.info("{0} is an int, enlisting it".format(arg))
return [arg]
elif arg is None:
return []
return arg
def u_stringify(arg, silent=True):
"""
Unit test to check if given argument is not a string
Args:
arg: argument to put into a string
silent (bool): If the function should print warnings if the wrong data was given (default=False)
Returns:
(unicode): The argument in a string
"""
if isinstance(arg, list) or isinstance(arg, tuple):
if not silent:
LOG.info("{0} is a list/tuple, taking first element".format(arg))
arg = arg[0]
elif isinstance(arg, int):
arg = str(arg)
elif arg is None:
arg = ""
return arg
def u_decode(text):
"""
Unit test to make sure text is decoded
Args:
text (unicode, str)
"""
if sys.getfilesystemencoding() != "utf-8":
text = text.decode()
return text
def u_internet():
"""
Unit test to check if computer has internet connection
Returns:
Bool: True if computer has internet
"""
try:
import httplib
except ImportError:
import http.client as httplib
conn = httplib.HTTPConnection("microsoft.com", timeout=5)
try:
conn.request("HEAD", "/")
return True
except:
return False
finally:
conn.close()
# _
# ___(_)_ __
# |_ / | '_ \
# / /| | |_) |
# /___|_| .__/
# |_|
def save_zip():
"""
Compress the saved opened scene file within the same directory
Returns:
Path (unicode): Path to saved zip file
"""
import zipfile
file_name = cmds.file(q=True, sn=True, shn=True)
if not file_name:
print_error("Current scene has not been saved before")
return
file_path = Path(cmds.file(q=True, sn=True))
zip_path = Path(file_path.path).swap_extension(".zip")
zip_out = zipfile.ZipFile(zip_path.path, 'w', zipfile.ZIP_DEFLATED)
try:
zip_out.write(file_path.path, file_name)
finally:
zip_out.close()
LOG.info("Saved scene compressed as: {0}".format(zip_path.path))
return zip_path.path
# __ _
# / _(_)_ _____ ___
# | |_| \ \/ / _ \/ __|
# | _| |> < __/\__ \
# |_| |_/_/\_\___||___/
#
def remove_callback(procedure):
"""
Remove callbacks that do not exist anymore (created for example from a plugin which you don't have)
E.g., remove_callback("CgAbBlastPanelOptChangeCallback")
Args:
procedure (unicode): Name of callback procedure
Returns:
"""
for mp in cmds.getPanel(typ="modelPanel"):
# Get callback of the model editor
callback = cmds.modelEditor(mp, query=True, editorChanged=True)
# If the callback is the erroneous
if callback == procedure:
# Remove the callbacks from the editor
cmds.modelEditor(mp, edit=True, editorChanged="")
print_info("{0} successfully removed".format(procedure))
|
# -*- coding:utf-8 -*-
import os
import time
import sys
import traceback
from selenium import webdriver
# from pyvirtualdisplay import Display
"""
wget https://github.com/mozilla/geckodriver/releases/download/v0.19.0/geckodriver-v0.19.0-linux64.tar.gz
tar -zxvf geckodriver-v0.19.0-linux64.tar.gz
mv geckodriver /usr/bin/geckodriver
export PATH=$PATH:/usr/bin/geckodriver
pip install pyvirtualdisplay
If it says Error: GDK_BACKEND does not match available displays then install pyvirtualdisplay:
"""
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../edm_web'))
SCRIPT = os.path.join(ROOT, 'script')
GeckoDriver_Path = '/usr/bin/geckodriver'
# GeckoDriver_Path = os.path.join(SCRIPT, "geckodriver")
GeckoDriver_Path = 'D:\geckodriver\geckodriver.exe'
def get_error_addrs(username, passwd, check_addrs=None):
res = None
if not check_addrs: return []
try:
# display = Display(visible=0, size=(800, 600))
# display.start()
driver = webdriver.Firefox(executable_path=GeckoDriver_Path)
driver.set_page_load_timeout(10) # 防止页面加载个没完
# QQ邮箱登录界面
driver.get("https://mail.qq.com/")
# 切换iframe
driver.switch_to.frame("login_frame")
driver.find_element_by_id("switcher_plogin").click()
elem_user = driver.find_element_by_name("u")
elem_user.clear()
elem_user.send_keys(username)
elem_pwd = driver.find_element_by_name("p")
elem_pwd.clear()
elem_pwd.send_keys(passwd)
elem_but = driver.find_element_by_id("login_button")
# elem_pwd.send_keys(Keys.RETURN)
elem_but.click()
# 直接跳出所有frame
driver.switch_to_default_content()
time.sleep(0.1)
# 点击写信
elem_but_w = driver.find_element_by_id("composebtn")
elem_but_w.click()
#切换至右侧 主iframe
main_Frame1 = driver.find_element_by_id("mainFrame")
driver.switch_to_frame(main_Frame1)
# 发件人
time.sleep(1)
driver.find_element_by_xpath('''//div[@id="toAreaCtrl"]/div[2]/input''').send_keys(check_addrs)
time.sleep(1)
print dir(driver)
count = 10
while count:
print '----------------', count
_t = driver.find_element_by_xpath('''//div[@id="toAreaCtrl"]''')
errors = _t.find_elements_by_css_selector("div.addr_base.addr_error")
res = [e.text for e in errors]
if res: break
count -= 1
time.sleep(1)
driver.refresh()
except BaseException as e:
print >>sys.stderr, e
print >>sys.stderr, traceback.format_exc()
finally:
# driver.quit()
# display.stop()
return res
############################################################
# 安全调用对象
def safe_call(fn, *args, **kwargs):
try :
return fn(*args, **kwargs)
except Exception, e:
# sys.stderr.write('call "%s" failure\n %s' % (fn.__name__, e.message))
# sys.stderr.write(traceback.format_exc())
print >>sys.stderr, 'call "%s" failure\n %s' % (fn.__name__, e.message)
print >>sys.stderr, traceback.format_exc()
return None
# 等待调用成功 (有超时时间)
def time_call(fn, *args, **kwargs):
try_count=3
while try_count > 0 :
res = safe_call(fn, *args, **kwargs)
if res is not None:
return res
print >>sys.stderr, 'try call "%s" count: %d' % (fn.__name__, try_count)
# sys.stderr.write('try call "%s" count: %d' % (fn.__name__, try_count))
try_count -= 1
time.sleep(3)
return
if __name__ == "__main__":
# 用户名 密码
username="2948906420@qq.com"
passwd="lanlan13266734099"
# username="1793302800@qq.com"
# passwd="marxkarlmmx"
check_addrs = "fdsg54ge10001@qq.com;fdsg54ge10002@qq.com;fdsg54ge10003@qq.com;fdsg54ge10004@qq.com;fdsg54ge10005@qq.com;fdsg54ge10006@qq.com;fdsg54ge10007@qq.com;fdsg54ge10008@qq.com;fdsg54ge10009@qq.com;fdsg54ge10010@qq.com;fdsg54ge10011@qq.com;fdsg54ge10012@qq.com;fdsg54ge10013@qq.com;fdsg54ge10014@qq.com;fdsg54ge10015@qq.com;fdsg54ge10016@qq.com;fdsg54ge10017@qq.com;fdsg54ge10018@qq.com;fdsg54ge10019@qq.com;fdsg54ge10020@qq.com;fdsg54ge10021@qq.com;fdsg54ge10022@qq.com;fdsg54ge10023@qq.com;fdsg54ge10024@qq.com;fdsg54ge10025@qq.com;fdsg54ge10026@qq.com;fdsg54ge10027@qq.com;fdsg54ge10028@qq.com;fdsg54ge10029@qq.com;fdsg54ge10030@qq.com;fdsg54ge10031@qq.com;fdsg54ge10032@qq.com;fdsg54ge10033@qq.com;fdsg54ge10034@qq.com;fdsg54ge10035@qq.com;fdsg54ge10036@qq.com;fdsg54ge10037@qq.com;fdsg54ge10038@qq.com;fdsg54ge10039@qq.com;fdsg54ge10040@qq.com;fdsg54ge10041@qq.com;fdsg54ge10042@qq.com;fdsg54ge10043@qq.com;fdsg54ge10044@qq.com;fdsg54ge10045@qq.com;fdsg54ge10046@qq.com;fdsg54ge10047@qq.com;fdsg54ge10048@qq.com;fdsg54ge10049@qq.com;fdsg54ge10050@qq.com;fdsg54ge10051@qq.com;fdsg54ge10052@qq.com;fdsg54ge10053@qq.com;fdsg54ge10054@qq.com;fdsg54ge10055@qq.com;fdsg54ge10056@qq.com;fdsg54ge10057@qq.com;fdsg54ge10058@qq.com;fdsg54ge10059@qq.com;fdsg54ge10060@qq.com;fdsg54ge10061@qq.com;fdsg54ge10062@qq.com;fdsg54ge10063@qq.com;fdsg54ge10064@qq.com;fdsg54ge10065@qq.com;fdsg54ge10066@qq.com;fdsg54ge10067@qq.com;fdsg54ge10068@qq.com;fdsg54ge10069@qq.com;fdsg54ge10070@qq.com;fdsg54ge10071@qq.com;fdsg54ge10072@qq.com;fdsg54ge10073@qq.com;fdsg54ge10074@qq.com;fdsg54ge10075@qq.com;fdsg54ge10076@qq.com;fdsg54ge10077@qq.com;fdsg54ge10078@qq.com;fdsg54ge10079@qq.com;fdsg54ge10080@qq.com;fdsg54ge10081@qq.com;fdsg54ge10082@qq.com;fdsg54ge10083@qq.com;fdsg54ge10084@qq.com;fdsg54ge10085@qq.com;fdsg54ge10086@qq.com;fdsg54ge10087@qq.com;fdsg54ge10088@qq.com;fdsg54ge10089@qq.com;fdsg54ge10090@qq.com;fdsg54ge10091@qq.com;fdsg54ge10092@qq.com;fdsg54ge10093@qq.com;fdsg54ge10094@qq.com;fdsg54ge10095@qq.com;fdsg54ge10096@qq.com;fdsg54ge10097@qq.com;fdsg54ge10098@qq.com;fdsg54ge10099@qq.com;fdsg54ge10100@qq.com;"
# check_addrs = "1248644045@qq.com;1@qq.com;"
# check_addrs = "fdsg54ge10001@qq.com;fdsg54ge10002@qq.com;fdsg54ge10003@qq.com;fdsg54ge10004@qq.com;fdsg54ge10005@qq.com;fdsg54ge10006@qq.com;fdsg54ge10007@qq.com;fdsg54ge10008@qq.com;"
t1 = time.time()
res = time_call(get_error_addrs, username, passwd, check_addrs)
print res
print '-------------------', time.time() - t1
|
# -*- coding: utf-8 -*-
"""
authorization handlers base module.
"""
from abc import abstractmethod
import pyrin.security.session.services as session_services
import pyrin.utils.misc as misc_utils
from pyrin.core.globals import _, _n
from pyrin.core.exceptions import CoreNotImplementedError
from pyrin.security.authorization.interface import AbstractAuthorizerBase
from pyrin.security.exceptions import UserIsNotActiveError
from pyrin.security.authorization.handlers.exceptions import AuthorizerNameIsRequiredError
from pyrin.security.authorization.exceptions import UserNotAuthenticatedError, \
PermissionDeniedError
class AuthorizerBase(AbstractAuthorizerBase):
"""
authorizer base class.
all application authorizers must be subclassed from this.
"""
# each subclass must set an authorizer name in this attribute.
_name = None
def __init__(self, *args, **options):
"""
initializes and instance of AuthorizerBase.
:raises AuthorizerNameIsRequiredError: authorizer name is required error.
"""
super().__init__()
if not self._name or self._name.isspace():
raise AuthorizerNameIsRequiredError('Authorizer [{instance}] does not '
'have a name.'.format(instance=self))
def _authorize_access(self, user, **options):
"""
authorizes the given user for custom accesses.
this method could be overridden in subclasses if required.
it must raise an error if authorization failed.
:param user: user identity to be checked if it is authorized.
:keyword dict user_info: user info to be used to check for user.
:raises AuthorizationFailedError: authorization failed error.
"""
pass
def _is_superuser(self, user, **options):
"""
gets a value indicating that given user is superuser.
this method could be overridden in subclasses to provide actual
implementation for checking that a user is superuser.
otherwise this method will always return False.
:param user: user identity to be checked if it is superuser.
:keyword dict user_info: user info to be used to check for user.
:rtype: bool
"""
return False
def _is_active(self, user, **options):
"""
gets a value indicating that the given user is active.
this method could be overridden in subclasses to provide actual
implementation for checking that a user is active.
otherwise this method will always return True.
:param user: user identity to be checked if it is active.
:keyword dict user_info: user info to be used to check for user.
:rtype: bool
"""
return True
def _authorize_permissions(self, user, permissions, **options):
"""
authorizes the given user for specified permissions.
if user does not have each one of the specified
permissions, an error will be raised.
:param user: user identity to be checked if it is authorized.
:param PermissionBase | list[PermissionBase] permissions: permissions to check
for user authorization.
:keyword dict user_info: user info to be used for authorization.
:raises PermissionDeniedError: permission denied error.
"""
permissions = misc_utils.make_iterable(permissions, tuple)
if len(permissions) > 0:
if self._has_permission(user, permissions, **options) is not True:
message = _n('You do not have the required permission {permissions}',
'You do not have the required permissions {permissions}',
len(permissions))
raise PermissionDeniedError(message.format(permissions=list(permissions)))
def authorize(self, user, permissions, **options):
"""
authorizes the given user for specified permissions.
if user does not have each one of the specified
permissions, an error will be raised.
:param user: user identity to authorize permissions for.
:param PermissionBase | list[PermissionBase] permissions: permissions to check
for user authorization.
:keyword dict user_info: user info to be used for authorization.
:raises UserNotAuthenticatedError: user not authenticated error.
:raises UserIsNotActiveError: user is not active error.
:raises AuthorizationFailedError: authorization failed error.
:raises PermissionDeniedError: permission denied error.
"""
if user is None:
raise UserNotAuthenticatedError(_('User has not been authenticated.'))
if self._is_active(user, **options) is not True:
raise UserIsNotActiveError(_('Your user is not active. If you think that this '
'is a mistake, please contact the support team.'))
if self._is_superuser(user, **options) is not True:
self._authorize_access(user, **options)
if permissions:
self._authorize_permissions(user, permissions, **options)
def is_superuser(self):
"""
gets a value indicating that the current user is superuser.
if you want to provide the actual implementation for checking that a
user is superuser, you should implement the `_is_superuser` method.
otherwise this method will always return False.
:rtype: bool
"""
user = session_services.get_current_user()
user_info = session_services.get_current_user_info()
return self._is_superuser(user, user_info=user_info)
@abstractmethod
def _has_permission(self, user, permissions, **options):
"""
gets a value indicating that given user has the requested permissions.
this method must be overridden in subclasses to perform a check on permissions.
:param user: user identity to authorize permissions for.
:param tuple[PermissionBase] permissions: permissions to check for user authorization.
:keyword dict user_info: user info to be used for authorization.
:raises CoreNotImplementedError: core not implemented error.
:rtype: bool
"""
raise CoreNotImplementedError()
@property
def name(self):
"""
gets the name of this authenticator.
:rtype: str
"""
return self._name
|
import logging
import numpy as np
import pandas as pd
import torch
from torch.nn.parameter import Parameter
from src.model.ae_modules import (
PrototypeAutoencoder,
PrototypeTransitionAutoencoder
)
from src.model.standard_autoencoder import StandardAutoencoderModel
from src.model.misc import (
diversity_loss,
interpretability_loss,
compute_squared_distances
)
from src.preprocess.dataloader import (
DataSet,
Random
)
LOGGER = logging.getLogger(__name__)
class PrototypeAutoencoderModel(StandardAutoencoderModel):
def __init__(self, results_dir):
super().__init__(results_dir)
self.name = "PrototypeAE"
def build_model(self, nb_classes, nb_measures, h_size,
h_drop, i_drop, mean, stds, n_prototypes, n_transition_prototypes):
self.ae_model = PrototypeAutoencoder(nb_classes=nb_classes, nb_measures=nb_measures, h_size=h_size,
h_drop=h_drop, i_drop=i_drop, n_prototypes=n_prototypes,
n_transition_prototypes=n_transition_prototypes)
setattr(self.ae_model, 'mean', mean)
setattr(self.ae_model, 'stds', stds)
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
self.ae_model.to(device)
def reg_loss(self, hidden, div_lambda=0.005, c_lamda=0.01, e_lambda=0.01):
diversity = diversity_loss(self.ae_model.prototype.prototypes)
hidden = hidden.reshape(-1, self.ae_model.h_size)
clustering_reg, evidence_reg = interpretability_loss(hidden,
self.ae_model.prototype.prototypes.data)
#LOGGER.debug("Diversity {} clustering {} evidence {}".format(diversity, clustering_reg, evidence_reg))
diversity = div_lambda * diversity / self.ae_model.prototype.n_prototypes
clustering_reg = c_lamda * clustering_reg / hidden.shape[0]
evidence_reg = e_lambda * evidence_reg /self.ae_model.prototype.n_prototypes
# LOGGER.debug("div {} clu {} ev {}".format(diversity, clustering_reg, evidence_reg))
#evidence_reg = torch.tensor([0.0])
return diversity, clustering_reg, evidence_reg
def train_epoch(self, dataset:Random, epoch, w_ent=1, w_reg=0.5, projection_step=10):
# if epoch == 0:
# self.project_prototypes(dataset)
if not epoch ==0 and epoch%projection_step == 0:
self.project_prototypes(dataset)
t = super().train_epoch(dataset, epoch, w_ent, w_reg)
return t
def fit(self, data: DataSet, epochs: int, seed: int, fold, w_ent=1, w_reg=1, hidden=False):
super().fit(data, epochs, seed, w_ent, w_reg, hidden)
p = pd.DataFrame(self.ae_model.prototype.prototypes.detach().numpy())
p.to_csv((self.results_dir / "prototype_hiddens_{}.csv".format(fold)), index=False)
rids, tps, prototypes = self.get_prototypes(data.train)
p = pd.DataFrame(np.concatenate((rids,tps,prototypes),axis=1))
p.to_csv(self.results_dir / "prototype_ids_{}.csv".format(fold), index=False)
def project_prototypes(self, dataset:DataSet):
(rids, tps, prototypes) = self.get_prototypes(dataset)
self.ae_model.prototype.prototypes.data = torch.from_numpy(prototypes)
if len(np.unique(rids)) != self.ae_model.prototype.n_prototypes:
LOGGER.debug("There is a duplicate prototype")
LOGGER.debug("RIDS {}".format(rids.squeeze()))
LOGGER.debug("TPs {}".format(tps.squeeze()))
def get_prototypes(self, dataset:Random):
batch_x = self.collect_hidden_states(dataset)
prototype_list = [None]*self.ae_model.prototype.n_prototypes
rids = np.zeros((self.ae_model.prototype.n_prototypes, 1))
tps = np.zeros((self.ae_model.prototype.n_prototypes, 1))
# Only get time points that exist (not imputed values, check cat mask)
mask = (batch_x.DX_mask == 1) & ~(batch_x.TP == 0.)
trans_mask = mask & (batch_x.DXCHANGE)
true_pts = batch_x[mask]
true_pts_trans = batch_x[trans_mask]
hiddens = true_pts.iloc[:, -(self.ae_model.h_size +1):-1]
hiddens_trans = true_pts_trans.iloc[:, -(self.ae_model.h_size +1):-1]
for i, p in enumerate(self.ae_model.prototype.prototypes):
# Select prototypes from transitioners if
h = hiddens_trans if i < self.ae_model.prototype.n_transition_prototypes else hiddens
pts = true_pts_trans if i < self.ae_model.prototype.n_transition_prototypes else true_pts
dist_mat = compute_squared_distances(torch.tensor(h.values), p.clone().unsqueeze(0))
closest_dp = torch.argmin(dist_mat).item()
rids[i] = pts.RID.iloc[closest_dp]
tps[i] = pts.TP.iloc[closest_dp]
prototype_list[i] = [h.iloc[closest_dp,:]]
#prototype_hidden = (tup for tup in prototype_list)
prototype_hidden = np.vstack(prototype_list)
prototype_hidden = np.array(prototype_hidden)
assert len(rids) == len(tps) == len(prototype_hidden) == self.ae_model.prototype.n_prototypes
return (rids, tps, prototype_hidden)
class PrototypeTransitionAutoencoderModel(PrototypeAutoencoderModel):
def __init__(self, results_dir):
super().__init__(results_dir)
self.name = "PrototypeTransitionAE"
def build_model(self, nb_classes, nb_measures, h_size,
h_drop, i_drop, mean, stds, n_prototypes, n_transition_prototypes):
self.ae_model = PrototypeTransitionAutoencoder(nb_classes=nb_classes, nb_measures=nb_measures, h_size=h_size,
h_drop=h_drop, i_drop=i_drop, n_prototypes=n_prototypes,
n_transition_prototypes=n_transition_prototypes)
setattr(self.ae_model, 'mean', mean)
setattr(self.ae_model, 'stds', stds)
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
|
import tensorflow as tf
from heads import WriteHead, ReadHead
class NTM:
def __init__(self, scope_, controller_, read_heads_, write_heads_, num_nodes_, size_):
self.controller = controller_
self.read_heads = read_heads_
self.write_heads = write_heads_
self.w_output = tf.get_variable("w_output", [num_nodes_, size_], tf.float32, tf.contrib.layers.xavier_initializer())
self.b_output = tf.get_variable("b_output", [size_], tf.float32, tf.constant_initializer(1e-6))
def step(self, input_, state, memory):
with tf.name_scope('ntm_step') as scope:
read_list = []
for head in self.read_heads:
read_list.append(head.generate_weight(memory, state))
read_t = ReadHead.read(tf.concat(0, read_list), memory)
next_state = self.controller.step(input_, read_t)
erase_list = []
write_list = []
for head in self.write_heads:
erase, write = head.build_weights(memory, state)
erase_list.append(erase)
write_list.append(write)
erase_ops = tf.pack(erase_list)
write_ops = tf.pack(write_list)
memory_tilde = WriteHead.apply_erase(memory, erase_ops)
new_memory = WriteHead.apply_write(memory_tilde, write_ops)
return next_state, new_memory
def execute(self, state_list):
with tf.name_scope('ntm_output') as scope:
states = tf.concat(0, state_list)
return tf.matmul(states, self.w_output) + self.b_output
|
from dolfin import *
from xii import *
# mesh_file = 'geometry.h5'
# comm = mpi_comm_world()
# h5 = HDF5File(comm, mesh_file, 'r')
# mesh = Mesh()
# h5.read(mesh, 'mesh', False)
# volumes = MeshFunction('size_t', mesh, mesh.topology().dim())
# h5.read(volumes, 'physical')
mesh = Mesh('geometry_old.xml')
surfaces = MeshFunction('size_t', mesh, 'geometry_old_facet_region.xml')
one = EmbeddedMesh(surfaces, 1)
two = EmbeddedMesh(surfaces, 4)
File('iface0_old.pvd') << one
File('ifacae1_old.pvd') << two
|
# Generated by Django 3.1.1 on 2021-02-03 22:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("temperature", "0001_squashed_0007_auto_20181125_2259"),
]
operations = [
migrations.AddField(
model_name="measurementdevice",
name="api",
field=models.IntegerField(default=0, verbose_name="Device API version."),
),
]
|
__all__ = ["Component", "Composite", "TaskComponent", "TaskComposite"]
from icevision.imports import *
from icevision.core import tasks
class Component:
order = 0.5
def __init__(self):
self.composite = None
def set_composite(self, composite):
self.composite = composite
class TaskComponent(Component):
def __init__(self, task=tasks.common):
self.task = task
class TaskComposite:
base_components = set()
def __init__(self, components: Sequence[TaskComponent]):
components = set(components)
components.update(set(comp() for comp in self.base_components))
self.components = components
self.set_task_components(self.components)
def __getattr__(self, name):
if name == "task_composites":
raise AttributeError(name)
# TODO: Possible bug if no task with _default is passed
try:
return getattr(self.task_composites[tasks.common.name], name)
except AttributeError:
pass
try:
return self.task_composites[name]
except KeyError:
pass
raise AttributeError(f"{self.__class__.__name__} has no attribute {name}")
def add_component(self, component: TaskComponent):
self.components.add(component)
self.set_task_components(self.components)
def remove_component_by_type(self, component_type: TaskComponent):
for component in self.components:
if isinstance(component, component_type):
break
self.components.remove(component)
self.set_task_components(self.components)
def set_task_components(self, components: Sequence[TaskComponent]):
task_components = defaultdict(list)
# example: task_components['detect'] = (LabelsComponent, BBoxesComponent, ...)
for component in components:
task_components[component.task].append(component)
self.task_composites = OrderedDict()
for task, components in sorted(
task_components.items(), key=lambda o: o[0].order
):
self.task_composites[task.name] = composite = Composite()
composite.add_components(components)
if task != tasks.common:
composite.set_parent(self)
# TODO: rename reduce_on_all_tasks_components
def reduce_on_components(
self,
fn_name: str,
reduction: Optional[str] = None,
**fn_kwargs,
) -> Dict[str, Any]:
results = {}
for task, composite in self.task_composites.items():
result = composite.reduce_on_components(fn_name, reduction, **fn_kwargs)
results[task] = result
return results
def reduce_on_task_components(
self,
fn_name: str,
task_name: str,
reduction: Optional[str] = None,
**fn_kwargs,
) -> Any:
composite = self.task_composites[task_name]
return composite.reduce_on_components(fn_name, reduction, **fn_kwargs)
class Composite:
base_components = set()
def __init__(self, components: Optional[Sequence[Component]] = None):
self._parent = None
components = set(components) if components is not None else set()
components.update(set(comp() for comp in self.base_components))
self.set_components(components)
def __getattr__(self, name):
# avoid recursion https://nedbatchelder.com/blog/201010/surprising_getattr_recursion.html
if name in ["components", "_parent"]:
raise AttributeError(name)
# delegates attributes to components
for component in self.components:
try:
return getattr(component, name)
except AttributeError:
pass
# delegates attributes to parent
try:
return getattr(self._parent, name)
except AttributeError:
pass
raise AttributeError(f"{self.__class__.__name__} has no attribute {name}")
def reduce_on_components(
self, fn_name: str, reduction: Optional[str] = None, **fn_kwargs
) -> Any:
results = []
for component in self.components:
results.append(getattr(component, fn_name)(**fn_kwargs))
if reduction is not None and len(results) > 0:
out = results.pop(0)
for r in results:
getattr(out, reduction)(r)
else:
out = results
return out
def get_component_by_type(self, component_type) -> Union[Component, None]:
for component in self.components:
if isinstance(component, component_type):
return component
def add_component(self, component):
self.add_components([component])
def add_components(self, components):
self.set_components(set(components).union(set(self.components)))
def set_components(self, components):
self.components = sorted(components, key=lambda o: o.order)
self.components_cls = [comp.__class__ for comp in self.components]
for component in self.components:
component.set_composite(self)
def set_parent(self, parent):
self._parent = parent
|
#=============================
# JointFontGAN
# Modified from https://github.com/azadis/MC-GAN
# By Yankun Xi
#=============================
import numpy as np
import os
import ntpath
import time
from . import XIutil
from . import XIhtml
import shutil
from scipy import misc
from skimage import data, img_as_float
import skimage.measure
import skvideo.io
class Visualizer():
def __init__(self, opt):
self.opt = opt
self.display_id = opt.display_id
self.stack_result = opt.stack_result
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
if self.opt.use_auxiliary:
expr_dir = os.path.join(self.opt.auxiliary_root,
self.opt.project_relative,
self.opt.checkpoints_dir,
self.opt.experiment_dir)
else:
expr_dir = os.path.join(".", self.opt.checkpoints_dir,
self.opt.experiment_dir)
self.name = os.path.dirname(expr_dir)
self.rgb = opt.rgb or opt.rgb_out
self.video_dir = []
if self.display_id > 0:
import visdom
self.vis = visdom.Visdom()
if self.use_html:
self.web_dir = os.path.join(expr_dir, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
if self.opt.use_auxiliary:
print('create auxiliary web directory %s...' % self.web_dir)
else:
print('create web directory %s...' % self.web_dir)
XIutil.mkdirs([self.web_dir, self.img_dir])
def eval_current_result(self, visuals, op='fake_B',
gt='real_B'):
op = visuals[op].copy()
gt = visuals[gt].copy()
# print(op.max(), gt.max())
ssim_score = skimage.measure.compare_ssim(op, gt,
data_range=gt.max() - gt.min())
float_type = np.result_type(op.dtype, gt.dtype, np.float32)
op = np.asarray(op, dtype=float_type)
gt = np.asarray(gt, dtype=float_type)
op /= gt.max()
gt /= gt.max()
# print(op.max(), gt.max())
# print(op)
l1_score = np.mean(np.absolute(op - gt))
mse_score = np.mean((op - gt)**2)
return l1_score, ssim_score, mse_score
# |visuals|: dictionary of images to display or save
# TO BE FINISHED
def display_current_results(self, visuals, epoch_str, last=False):
if self.stack_result:
image_numpy_ = []
image_numpy_e = []
for label, image_numpy in visuals.items():
if label[0] == 'E':
image_numpy_e = image_numpy_e + list(
image_numpy)
else:
image_numpy_ = image_numpy_ + list(
image_numpy)
image_numpy_e = np.asanyarray(image_numpy_e)
image_numpy_ = np.asanyarray(image_numpy_)
if self.display_id > 0: # show images in the browser
idx = 1
if self.stack_result:
if image_numpy_e.shape[0]:
self.vis.image(image_numpy_e.transpose([2, 0,
1]),
opts=dict(title='ExtendedStack'),
win=self.display_id + idx)
idx += 1
if image_numpy_.shape[0]:
self.vis.image(image_numpy_.transpose([2, 0, 1]),
opts=dict(title='Stack'),
win=self.display_id + idx)
idx += 1
else:
for label, image_numpy in visuals.items():
# image_numpy = np.flipud(image_numpy)
self.vis.image(image_numpy.transpose([2, 0, 1]),
opts=dict(title=label),
win=self.display_id + idx)
idx += 1
if self.use_html: # save images to a html file
if self.stack_result:
if image_numpy_e.shape[0]:
# print("numpy_e.shape is %s" % image_numpy_e.shape)
img_path = os.path.join(self.img_dir,
'epoch%s_%s.png' % (epoch_str, 'ExtendedStack'))
XIutil.save_image(image_numpy_e, img_path,
self.rgb)
img_path_ = os.path.join(self.img_dir,
'latest_ExtendedStack.png')
XIutil.save_image(image_numpy_e, img_path_,
self.rgb)
if image_numpy_.shape[0]:
img_path = os.path.join(self.img_dir,
'epoch%s_%s.png' % (epoch_str, 'Stack'))
XIutil.save_image(image_numpy_, img_path,
self.rgb)
img_path_ = os.path.join(self.img_dir,
'latest_Stack.png')
XIutil.save_image(image_numpy_, img_path_,
self.rgb)
else:
for label, image_numpy in visuals.items():
img_path = os.path.join(self.img_dir,
'epoch%s_%s.png' % (epoch_str, label))
XIutil.save_image(image_numpy, img_path, self.rgb)
img_path_ = os.path.join(self.img_dir,
'latest_%s.png' % label)
XIutil.save_image(image_numpy, img_path_,
self.rgb)
# # update website
# webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
# for n in range(epoch, 0, -1):
# webpage.add_header('epoch [%d]' % n)
# ims = []
# txts = []
# links = []
#
# for label, image_numpy in visuals.items():
# img_path = 'epoch%.3d_%s.png' % (n, label)
# ims.append(img_path)
# txts.append(label)
# links.append(img_path)
# webpage.add_images(ims, txts, links, width=self.win_size)
# webpage.save()
# errors: dictionary of error labels and values
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X':[],'Y':[], 'legend':list(errors.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])]*len(self.plot_data['legend']),1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in errors.items():
message += '%s: %.3f ' % (k, v)
print(message)
# save image to the disk
def save_images(self, webpage, visuals, image_path, tag="",
test_gt_dir=''):
image_dir = webpage.get_image_dir()
print("save to:", image_dir)
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0] + tag
webpage.add_header(name)
ims = []
txts = []
links = []
if self.stack_result:
image_numpy_ = []
image_numpy_e = []
for label, image_numpy in visuals.items():
if label[0] == 'E':
image_numpy_e = image_numpy_e + list(
image_numpy)
else:
image_numpy_ = image_numpy_ + list(
image_numpy)
image_numpy_e = np.asanyarray(image_numpy_e)
image_numpy_ = np.asanyarray(image_numpy_)
if sum(image_numpy_e.shape):
image_name = '%s_%s.png' % (name, 'ExtendedStack')
save_path = os.path.join(image_dir, image_name)
XIutil.save_image(image_numpy_e, save_path, self.rgb)
ims.append(image_name)
txts.append(label)
links.append(image_name)
if sum(image_numpy_.shape):
image_name = '%s_%s.png' % (name, 'Stack')
save_path = os.path.join(image_dir, image_name)
XIutil.save_image(image_numpy_, save_path, self.rgb)
ims.append(image_name)
txts.append(label)
links.append(image_name)
else:
for label, image_numpy in visuals.items():
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
if test_gt_dir:
# if label == 'redbox':
# XIutil.mkdirs(test_gt_dir + '/redbox')
# save_path = os.path.join(test_gt_dir,
# 'redbox', image_name)
temp = test_gt_dir
if label == 'real_A' or label == 'real_B':
XIutil.mkdirs(test_gt_dir + '/gt')
save_path = os.path.join(test_gt_dir, 'gt',
image_name)
elif label == 'Ereal_A' or label == 'Ereal_B':
XIutil.mkdirs(test_gt_dir + '/Egt')
save_path = os.path.join(test_gt_dir, 'Egt',
image_name)
elif label == 'real_Ask1' or label == 'real_Bsk1':
XIutil.mkdirs(test_gt_dir + '/gt_sk')
save_path = os.path.join(test_gt_dir, 'gt_sk',
image_name)
elif label == 'Ereal_Ask1' or label == 'Ereal_Bsk1':
XIutil.mkdirs(test_gt_dir + '/Egt_sk')
save_path = os.path.join(test_gt_dir,
'Egt_sk', image_name)
else:
XIutil.save_image(image_numpy, save_path,
self.rgb)
temp = '+'
if not temp == '+':
XIutil.save_image(image_numpy, save_path,
self.rgb)
else:
XIutil.save_image(image_numpy, save_path,
self.rgb)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
self.video_dir.extend([image_dir])
def copy_images(self, image_paths, image_path, dest_path_A, dest_path_B, target_path, observed):
name = os.path.splitext(image_path)[0]
saved_path = os.path.join(image_paths, image_path)
im_ = misc.imread(saved_path)
n_ch = im_.shape[1]/im_.shape[0]
target_size = im_.shape[0]
for obs in observed:
im_[:,target_size*obs:(obs+1)*target_size,:] = 255
for obs in observed:
image_name = '%s_%s.png' % (name, obs)
im_path = os.path.join(dest_path_A, image_name)
misc.imsave(im_path, im_)
target_ims = os.listdir(target_path)
for im in target_ims:
obs = im.split('.png')[0].split('_')[-1]
image_name = '%s_%s.png' % (name, obs)
shutil.copyfile(os.path.join(target_path,im), os.path.join(dest_path_B,image_name))
def save_video(self,video_path):
outputdata = []
os.system("mkdir ~/tmp_ffmpeg")
os.system("mkdir ~/tmp_ffmpeg/fake_B")
os.system("mkdir ~/tmp_ffmpeg/real_A")
os.system("rm ~/tmp_ffmpeg/*")
video =None
inpout=['fake_B','real_A']
for end_ in inpout:
i=1
for img_dir in self.video_dir:
imgs = sorted(os.listdir(img_dir))
num_imgs =len(imgs)/3
for im in imgs:
if im.endswith('_%s.png'%end_):
img = misc.imread(img_dir+'/'+im)
im_size = img.shape[0]
epoch = im.split('_')[0]
print(epoch, i)
# shutil.copyfile(os.path.join(img_dir,im),"~/tmp_ffmpeg/im%04d.png"%i)
os.system("cp %s ~/tmp_ffmpeg/%s/im%04d.png 2>&1|tee ~/tmp_ffmpeg/log.txt"%(os.path.join(img_dir.replace("&","\&"),im),end_,i))
i+=1
os.system("ffmpeg -r 6 -f image2 -s %dx%d -i ~/tmp_ffmpeg/fake_B/im%%04d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p %s/test_fake_B.mp4 2>&1|tee ~/tmp_ffmpeg/log2_fake_B.txt"%(img.shape[0],img.shape[1],video_path.replace("&","\&")))
os.system("ffmpeg -r 6 -f image2 -s %dx%d -i ~/tmp_ffmpeg/real_A/im%%04d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p %s/test_real_A.mp4 2>&1|tee ~/tmp_ffmpeg/log2_real_A.txt"%(img.shape[0],img.shape[1],video_path.replace("&","\&")))
|
"""Testing for Symbolic Fourier Approximation."""
# Author: Johann Faouzi <johann.faouzi@gmail.com>
# License: BSD-3-Clause
import numpy as np
import pytest
from sklearn.feature_selection import f_classif
from pyts.approximation import MultipleCoefficientBinning
from pyts.approximation import SymbolicFourierApproximation
rng = np.random.RandomState(42)
n_samples, n_timestamps = 5, 8
X = rng.randn(n_samples, n_timestamps)
y = rng.randint(2, size=n_samples)
def _compute_expected_results(X, y=None, n_coefs=None, n_bins=4,
strategy='quantile', drop_sum=False, anova=False,
norm_mean=False, norm_std=False, alphabet=None):
"""Compute the expected results."""
X = np.asarray(X)
if norm_mean:
X -= X.mean(axis=1)[:, None]
if norm_std:
X /= X.std(axis=1)[:, None]
X_fft = np.fft.rfft(X)
X_fft = np.vstack([np.real(X_fft), np.imag(X_fft)])
X_fft = X_fft.reshape(n_samples, -1, order='F')
if drop_sum:
X_fft = X_fft[:, 2:-1]
else:
X_fft = np.hstack([X_fft[:, :1], X_fft[:, 2:-1]])
if n_coefs is not None:
if anova:
_, p = f_classif(X_fft, y)
support = np.argsort(p)[:n_coefs]
X_fft = X_fft[:, support]
else:
X_fft = X_fft[:, :n_coefs]
mcb = MultipleCoefficientBinning(n_bins=n_bins, strategy=strategy,
alphabet=alphabet)
arr_desired = mcb.fit_transform(X_fft)
return arr_desired
@pytest.mark.parametrize(
'params',
[({}),
({'n_coefs': 3}),
({'n_bins': 2}),
({'strategy': 'uniform'}),
({'drop_sum': True}),
({'anova': True}),
({'norm_mean': True, 'drop_sum': True}),
({'norm_std': True}),
({'norm_mean': True, 'norm_std': True, 'drop_sum': True}),
({'n_coefs': 2, 'drop_sum': True, 'anova': True})]
)
def test_actual_results(params):
"""Test that the actual results are the expected ones."""
arr_actual = SymbolicFourierApproximation(**params).fit_transform(X, y)
arr_desired = _compute_expected_results(X, y, **params)
np.testing.assert_array_equal(arr_actual, arr_desired)
@pytest.mark.parametrize(
'params',
[({}),
({'n_coefs': 3}),
({'n_bins': 2}),
({'strategy': 'uniform'}),
({'drop_sum': True}),
({'anova': True}),
({'norm_mean': True, 'drop_sum': True}),
({'norm_std': True}),
({'norm_mean': True, 'norm_std': True, 'drop_sum': True}),
({'n_coefs': 2, 'drop_sum': True, 'anova': True})]
)
def test_fit_transform(params):
"""Test that fit and transform yield the same results as fit_transform."""
arr_1 = SymbolicFourierApproximation(**params).fit(X, y).transform(X)
arr_2 = SymbolicFourierApproximation(**params).fit_transform(X, y)
np.testing.assert_array_equal(arr_1, arr_2)
|
import os
import pickle
import argparse
from tqdm import trange
import torch
import torch.nn as nn
import pyro
import pyro.distributions as dist
import mlflow
from neural.baselines import BatchDesignBaseline
from neural.critics import CriticBA
from neural.aggregators import ConcatImplicitDAD
from neural.modules import Mlp
from experiment_tools.pyro_tools import auto_seed
from oed.design import OED
from estimators.bb_mi import BarberAgakov
from pharmacokinetic import Pharmacokinetic
def optimise_design_and_critic(
posterior_loc,
posterior_scale,
experiment_number,
device,
batch_size,
num_steps,
lr,
annealing_scheme,
):
design_init = torch.distributions.Uniform(-5.0, 5.0)
n = 1
latent_dim = 3
design_dim = (n, 1)
design_net = BatchDesignBaseline(
T=1, design_dim=design_dim, design_init=design_init
).to(device)
new_mean = posterior_loc
new_covmat = torch.diag(posterior_scale.reshape(-1) ** 2)
pharmaco = Pharmacokinetic(
design_net=design_net,
# Normal family -- new prior is stil MVN but with different params
theta_loc=new_mean,
theta_covmat=new_covmat,
T=1,
)
### Set up model networks ###
n = 1 # output dim/number of samples per design
design_dim = (n, 1) # design is t (time)
latent_dim = 3 # theta dimension is
observation_dim = n
hidden_dim = 512
encoding_dim = 8
hist_encoder_HD = [64, hidden_dim]
hist_enc_critic_head_HD = [
hidden_dim // 2,
hidden_dim,
]
###### CRITIC NETWORKS #######
## history encoder
critic_pre_pool_history_encoder = Mlp(
input_dim=[*design_dim, observation_dim],
hidden_dim=hist_encoder_HD,
output_dim=encoding_dim,
)
critic_history_enc_head = Mlp(
input_dim=encoding_dim,
hidden_dim=hist_enc_critic_head_HD,
output_dim=encoding_dim,
)
critic_history_encoder = ConcatImplicitDAD(
encoder_network=critic_pre_pool_history_encoder,
emission_network=critic_history_enc_head,
T=1,
empty_value=torch.ones(design_dim),
)
critic_net = CriticBA(
history_encoder_network=critic_history_encoder, latent_dim=latent_dim
).to(device)
### Set-up loss ###
mi_loss_instance = BarberAgakov(
model=pharmaco.model,
critic=critic_net,
batch_size=batch_size,
prior_entropy=pharmaco.log_theta_prior.entropy(),
)
### Set-up optimiser ###
optimizer = torch.optim.Adam
# Annealed LR. Set gamma=1 if no annealing required
annealing_freq, patience, factor = annealing_scheme
scheduler = pyro.optim.ReduceLROnPlateau(
{
"optimizer": optimizer,
"optim_args": {"lr": lr},
"factor": factor,
"patience": patience,
"verbose": False,
}
)
oed = OED(optim=scheduler, loss=mi_loss_instance)
### Optimise ###
loss_history = []
num_steps_range = trange(0, num_steps + 0, desc="Loss: 0.000 ")
for i in num_steps_range:
loss = oed.step()
# Log loss every 200 steps
if i % 100 == 0:
num_steps_range.set_description("Loss: {:.3f} ".format(loss))
loss_eval = oed.evaluate_loss()
if i % annealing_freq == 0:
scheduler.step(loss_eval)
return pharmaco, critic_net
def main_loop(
run, mlflow_run_id, device, T, batch_size, num_steps, lr, annealing_scheme,
):
pyro.clear_param_store()
latent_dim = 3
theta_loc = theta_prior_loc = torch.tensor([1, 0.1, 20], device=device).log()
theta_covmat = torch.eye(latent_dim, device=device) * 0.05
prior = torch.distributions.MultivariateNormal(theta_loc, theta_covmat)
# Sampling true theta from prior
true_theta = prior.sample(torch.Size([1]))
designs_so_far = []
observations_so_far = []
# Set posterior equal to the prior
posterior_loc = theta_loc
posterior_scale = torch.sqrt(theta_covmat.diag())
for t in range(0, T):
print(f"Step {t + 1}/{T} of Run {run + 1}")
pyro.clear_param_store()
pharmaco, critic = optimise_design_and_critic(
posterior_loc=posterior_loc,
posterior_scale=posterior_scale,
experiment_number=t,
device=device,
batch_size=batch_size,
num_steps=num_steps,
lr=lr,
annealing_scheme=annealing_scheme,
)
design, observation = pharmaco.forward(log_theta=true_theta)
posterior_loc, posterior_scale = critic.get_variational_params(
*zip(design, observation)
)
posterior_loc, posterior_scale = (
posterior_loc.detach(),
posterior_scale.detach(),
)
designs_so_far.append(design[0])
observations_so_far.append(observation[0])
print(f"Fitted posterior: mean = {posterior_loc}, sd = {posterior_scale}")
print("True theta = ", true_theta.reshape(-1))
data_dict = {}
for i, xi in enumerate(designs_so_far):
data_dict[f"xi{i + 1}"] = xi.cpu()
for i, y in enumerate(observations_so_far):
data_dict[f"y{i + 1}"] = y.cpu()
data_dict["theta"] = true_theta.cpu()
return data_dict
def main(
seed, mlflow_experiment_name, num_histories, device, T, batch_size, num_steps, lr,
):
pyro.clear_param_store()
seed = auto_seed(seed)
pyro.set_rng_seed(seed)
mlflow.set_experiment(mlflow_experiment_name)
# Log everything
mlflow.log_param("seed", seed)
mlflow.log_param("num_steps", num_steps)
mlflow.log_param("lr", lr)
mlflow.log_param("num_histories", num_histories)
mlflow.log_param("num_experiments", T)
annealing_scheme = [100, 5, 0.8]
mlflow.log_param("annealing_scheme", str(annealing_scheme))
results_vi = {
"loop": [],
"seed": seed,
"meta": {"num_histories": num_histories, "model": "pharmacokinetic"},
}
for i in range(num_histories):
results = main_loop(
run=i,
mlflow_run_id=mlflow.active_run().info.run_id,
device=device,
T=T,
batch_size=batch_size,
num_steps=num_steps,
lr=lr,
annealing_scheme=annealing_scheme,
)
results_vi["loop"].append(results)
# Log the results dict as an artifact
if not os.path.exists("./mlflow_outputs"):
os.makedirs("./mlflow_outputs")
with open("./mlflow_outputs/results_pharmaco_vi.pickle", "wb") as f:
pickle.dump(results_vi, f)
mlflow.log_artifact("mlflow_outputs/results_pharmaco_vi.pickle")
print("Done.")
ml_info = mlflow.active_run().info
path_to_artifact = "mlruns/{}/{}/artifacts/results_pharmaco_vi.pickle".format(
ml_info.experiment_id, ml_info.run_id
)
print("Path to artifact - use this when evaluating:\n", path_to_artifact)
# --------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="VI baseline: Pharmacokinetic model")
parser.add_argument("--seed", default=-1, type=int)
parser.add_argument(
"--num-histories", help="Number of histories/rollouts", default=128, type=int
)
parser.add_argument("--num-experiments", default=10, type=int)
parser.add_argument("--batch-size", default=1024, type=int)
parser.add_argument("--device", default="cuda", type=str)
parser.add_argument(
"--mlflow-experiment-name", default="pharmaco_variational", type=str
)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--num-steps", default=5000, type=int)
args = parser.parse_args()
main(
seed=args.seed,
num_histories=args.num_histories,
device=args.device,
T=args.num_experiments,
lr=args.lr,
batch_size=args.batch_size,
num_steps=args.num_steps,
mlflow_experiment_name=args.mlflow_experiment_name,
)
|
#Utilize sympy for creating
#the matrices used within the
#chemical equation balancer
import sympy
#
#use the string module
#in order to get a list of
#all ascii_letters for the
#chemical equation balancer
import string
#Purpose: The purpose of the balance.py file is to provide other developers functionality that will
# balance a given chemical equation.
#Functionality: In order to successfully balance a chemical equation, deveopers must call the function
# getEquation(); upon calling it, the program will prompt the user for a chemical equation.
# One big rule that must always be followed is that no spaces can in between elements that form
# a molecule, but spaces can be in between the symbols that join the reactants and products. For example
# H2+O2->H2O can also be represented with spaces as: H2 + O2 -> H2O, but not as H2 + O 2 -> H2 O.
# After the equation is provided as input, it is handed to the balanceEquation(equation) function
# and thoroughly analyzed for errors before being broken down into a system of linear equations
# that when solved, will provide the coefficients needed to balance the equation.
#notAppended() serves the same purpose
#as Notcontains2(), only it doesn't accept
#parameters and is ONLY used within the
#chemical equation balancing functions.
def notAppended(arrName,sub):
for rt in arrName:
if(rt == sub):
return 0
#no match was made
return 1
#alphaInLine() checks a string
#for a presence of any letters.
def alphaInLine(line):
for x in line:
if(x.isalpha()):
return 1
return 0
#The Notcontains2() function is used
#within the chemicalEquation function to check
#if a selected element symbol is within the arrName2
#history list.
def Notcontains2(component, data):
for g in data:
if(g == component):
return 0
#if data does not contain component
#return 1 to proceed with row incrementing
return 1
#The EquationWrong() function is responsible for
#returning an error code should the chemical equation
#within the equation text box have the wrong format.
def EquationWrong(equation):
#initialize sub and arrName variables
sub = ""
arrName = []
#get contents of text box
chem = equation
#Make sure + and -> components have a presence.
#If not, immediately return an error code.
if(not('+' in chem and '->' in chem)):
return -2,None
#Make sure there isn't more than one reaction symbol: ->
#Perform a splice right after the first -> and see if another -> is present
#in the second substring. If so, immediately return an error code.
if(chem.index('->') + 2 < len(chem)):
splice = chem[chem.index('->') + 2:len(chem)]
if('->' in splice):
return -3,None
#Check for appropriate pairing/contents of parentheses if present. If
#the contents or pairing is flawd, return an error code.
code = BracketParentheseMatch(chem,'(',')')
if(code != 0):
return code,None
#Check for appropriate pairing/contents/position of brackets if present.
#If the pairing, contents, or positioning is wrong, return an error code.
code = BracketParentheseMatch(chem,'[',']')
if(code != 0):
return code,None
#bare bones equation must contain no coefficients,lone partial segments, or sequential +/-/> characters
for BonJovie in range(len(chem)):
#The very first element cannot be a digit, '-' or '>' character, and cannot be a '+' character
if((chem[BonJovie].isdigit() or chem[BonJovie] == '-' or chem[BonJovie] == '>' or chem[BonJovie] == '+') and BonJovie == 0):
return -1,None
#The equation cannot end with a '+'
elif(chem[BonJovie] == '+' and (BonJovie + 1 == len(chem))):
return -1,None
#The equation cannot have an inappropriate placement of +,-,> characters or inappropriate placement of coefficients
elif(chem[BonJovie] == '+' and (chem[BonJovie + 1] == '+' or chem[BonJovie + 1].isdigit() or chem[BonJovie + 1] == '-' or chem[BonJovie + 1] == '>')):
if(not('[' in chem and chem[BonJovie - 1] == '[')):
return -1,None
#The equation cannot end with a '-'
elif(chem[BonJovie] == '-' and (BonJovie + 1 == len(chem))):
return -1,None
#The equation cannot have an inappropriate placement of +,-,> characters or inappropriate placement of coefficients
elif(chem[BonJovie] == '-' and (chem[BonJovie + 1] == '+' or chem[BonJovie + 1].isdigit() or chem[BonJovie + 1] == '-' or chem[BonJovie + 1].isalpha())):
if(not('[' in chem and chem[BonJovie - 1] == '[')):
return -1,None
#The equation cannot end with a > character
elif(chem[BonJovie] == '>' and (BonJovie + 1 == len(chem))):
#If the previous character is a '-' character
#return the '1' error code
if(chem[BonJovie - 1] == '-'):
return 1,None
#return the -1 error code
return -1,None
#If the current character is '>' and there is not a '-'
#character before it, return a -1 error code
elif(chem[BonJovie] == '>' and (chem[BonJovie - 1] != '-')):
return -1,None
#if all is good so far, loop through all elements with counter
countMe = 0
#all reactants elements...
reactant = []
#all product elements...
product = []
#indicator that the '->' symbol
#has been passed by the for loop...
dashA = False
#loop through and put together a list of elements
for k in chem:
#only take interest if the current character represents a letter
if(k.isalpha()):
#if the character is an upper case letter and the current
#length of the sub string variable is 0, append the letter to
#to the sub variable.
if(k.isupper() and len(sub) == 0):
#append the upper case character to the sub variable
sub += k
#if the next element isn't a lower case letter, then an element has just been appended to the
#sub variable. Check if the lement has already been appended to arrName...if not, append it...
#if yes, then ignore it.
if((countMe + 1 == len(chem) or not(chem[countMe + 1].islower()))):
#check to see if the element is unique...if so, append
#it to the arrName structure... (which will be used for
#deciding how many equations will be needed during the
#balancing process)
if(notAppended(arrName,sub)):
arrName.append(sub)
#if the symbol '->' has be passed...
if(dashA == True and not(sub in product)):
#append to product...
product.append(sub)
#if the symbol -> hasn't been passed...
elif(not(sub in reactant)):
#append to reactants...
reactant.append(sub)
#when finished appending to arrName, clear the sub
#variable
sub = ""
#if the current letter isn't a part of a two letter element symbol and is already appended
#to the arrName list, erase the sub variable...
elif((countMe + 1 == len(chem) or not(chem[countMe + 1].islower())) and not(notAppended(arrName,sub))):
#erase the sub variable by resetting to an empty string...
sub = ""
#if the current character is a lowercase letter and the
#lenth of the sub variable is equal to one (meaning an upper case letter is
#already stored in it), then a full element has just been come across; the lower case
#letter should be concatenated to the end of the sub variable and then the sub variable
#should be appended to arrName if it's not already present...
elif(k.islower() and len(sub) == 1):
#concatenate the lower case letter to the end of sub
sub += k
#check if the current element is already within arrName
if(notAppended(arrName,sub)):
#append sub to arrName if not present
#within arrName already
arrName.append(sub)
#if the symbol '->' has be passed...
if(dashA == True and not(sub in product)):
#append to product...
product.append(sub)
#if the symbol -> hasn't been passed...
elif(not(sub in reactant)):
#append to reactants...
reactant.append(sub)
#after appending, clear the sub variable
sub = ""
#If the current letter is lower case and the sub variable
#has a length of zero, return an error code since no element
#begins with a lower case letter.
elif(k.islower() and len(sub) == 0):
#clear arrName for the next attempt
arrName = []
#return error code three
return 3,None
#if a component of '->' is met...
elif(countMe+1 != len(chem) and k == '-' and chem[countMe + 1] == '>'):
#set dash to true...
dashA = True
#increment countMe, which acts
#as an index in each iteration
countMe = countMe + 1
#check to see if the equation is valid based on the elements...
if(len(product) != len(reactant)):
#return error code 12
return 12,None
else:
#check all elements to see if they match...
for x in reactant:
#check to see if x is in product...
if(not(x in product)):
#return error code 12 if x is not in product...
return 12,None
#no errors found
return 0,arrName
#BracketPareentheseMatch ensures that all of the brackets and parenthesis within
#the equation match. If not, an error code is returne...
def BracketParentheseMatch(chemLine,left,right):
#check for proper parentheses usage by utilizing the
#following variables to make sure the same number of each
#parenthesis/bracket type are used...
SamWinchester = 0
DeanWinchester = 0
#first check if potential pairs are present
if(right in chemLine and not(left in chemLine)):
return 2
elif(left in chemLine and not(right in chemLine)):
return 2
#Enter this block if a right and left form of bracket/parenthesis
#were found within the chemLine. Now additional analysis in the form
#of the number of each type needs to be performed...
elif(left in chemLine and right in chemLine):
#check for same number of parentheses of each type
for x in chemLine:
if(x == left):
SamWinchester = SamWinchester + 1
for x in chemLine:
if(x == right):
DeanWinchester = DeanWinchester + 1
#if both variabes used above do not equal each other,
#return error code two
if(SamWinchester != DeanWinchester):
return 2
#if the same number of parenthesis for each type are found,
#make sure pairing is done correctly...
else:
#check for appropriate pairing of parentheses/brackets
#by utilizing the one/two variables
one = 0
two = 0
#perform the check for appropriate parenthesis/bracket
#pairing
for x in chemLine:
if(x == right or x == left):
if(x == left and one == 0):
one = 2
elif(x == right and one != 0 and two == 0):
two = 2
else:
return 2
if(one != 0 and two != 0):
one = 0
two = 0
#check for proper contents of the brackets/parentheses
#first get index values for all left and right components
indecesL = []
indecesR = []
#get the index values of the brackets/parentheses
for s in range(len(chemLine)):
if(chemLine[s] == left):
indecesL.append(s)
elif(chemLine[s] == right):
indecesR.append(s)
#second, start pulling substrings that are within the brackets/parentheses
#and start searching the contents!
#The error code to be returned is initialy assigned
#the value None...
num = None
#Brackets are associated with the error code
#five and parentheses are associated with the error code
#seven...
if(left == '('):
num = 5
else:
num = 7
#perform the process of pulling substrings contained within the brackets/parentheses
#and searching for appropriate content...
for x in range(len(indecesL)):
#Pull a substring from the first set of parentheses/brackets...
CaptJackSparrow = chemLine[indecesL[x] + 1: indecesR[x]]
if('>' in CaptJackSparrow):
return num
#special case number 1 (user forgets to give input to parenthese or bracket pairs)
elif(CaptJackSparrow == ""):
#return error code nine if no contents for a pair or pairs of parentheses
#was given...
if(left == '('):
return 9
#return error code eleven if no conetents for a pair or pairs of brackets
#was given...
else:
return 11
#The very first element within a pair of brackets or parentheses should
#not be a digit...
elif(CaptJackSparrow[0].isdigit()):
return num
#The '-' character cannot be within a set of
#parentheses
elif('-' in CaptJackSparrow and right == '('):
return num
#The '+' character cannot be within a set of
#parentheses
elif('+' in CaptJackSparrow and right == '('):
return num
#special case number 2: The user ends the equation right after a right parenthese or forgets to put
#a subscript right after a right parenthese, throw error code ten...
elif(right == ')' and not(indecesR[x] + 1 != len(chemLine) and chemLine[indecesR[x] + 1].isdigit())):
return 10
#If brackets occur within a set of parentheses, throw error code num...
elif(('[' in CaptJackSparrow or ']' in CaptJackSparrow) and right == '('):
return num
#If parentheses occur within a set of brackets, throw error code num...
elif(('(' in CaptJackSparrow or ')' in CaptJackSparrow) and right == '['):
return num
#Letters cannot occur within brackets, only charges (digits) and the '+'
#character or '-' character. If letters are present, throw error code num...
elif(right == ']' and alphaInLine(CaptJackSparrow)):
return num
#Make sure that '-' is used correctly within brackets
elif('-' in CaptJackSparrow and right == '['):
#If the previous character is a left bracket and the right character is a digit, then so far, '-' is being used
#correctly; perform further analysis. If not, immediately return error code num...
if(CaptJackSparrow[CaptJackSparrow.index('-') - 1] == '[' and CaptJackSparrow[CaptJackSparrow.index('-') + 1].isdigit()):
#Get the substring that goes from the righ digit element to the end of the current substring to
#make sure that only digits are used.
AlexRusso = CaptJackSparrow[CaptJackSparrow.index('-') + 1:]
#If '-' appears again or '('/'+' appears, throw error code num...
if('-' in AlexRusso or '(' in AlexRusso or '+' in AlexRusso):
return num
#return error code num if the first condition was not met.
else:
return num
#Make sure that '+' is used correctly within the brackets...
elif('+' in CaptJackSparrow and right == '['):
#If the previous character is a left bracket and the right character is a digit, then so far, '-' is being used
#correctly; perform further analysis. If not, immediately return error code num...
if(CaptJackSparrow[CaptJackSparrow.index('+') - 1] == '[' and CaptJackSparrow[CaptJackSparrow.index('+') + 1].isdigit()):
#Get the substring that goes from the righ digit element to the end of the current substring to
#make sure that only digits are used.
MaxRusso = CaptJackSparrow[CaptJackSparrow.index('+') + 1:]
#If '+' appears again or '-'/'(' appears, throw error code num...
if('-' in MaxRusso or '(' in MaxRusso or '+' in MaxRusso):
return num
#return error code num if the first condition was not met...
else:
return num
#Check for appropriate positioning of paired brackets (if present)
#use the indecesL and indecesR variables from above
#first retrieve the correct error numbers
if(left == '['):
#Assign the error code that will be used when an error is confronted.
#Like in this case, and above in some areas, a variable did not have to be used,
#but it made the development process a lot easier while trying different layouts and modifying
#certain areas that weren't working. For instance, rather than having to re-type various number codes
#for different segments that might have to be re-done (if there is a faulty segment) just assign the numbers
#to a variable, based on what is being tested, at the biginning and use the same variable throughout.
num = 8
#Loop through all parenthese/bracket pairs
for x in range(len(indecesL)):
#1. The first element of the chemical equation cannot be a bracket or parenthese.
#2. A bracket or parenthese cannot come right after a '>', '+', or '-' character.
#If any of these conditions are met, return the num error code...
if(indecesL[x] == 0 or chemLine[indecesL[x] - 1] == '>' or chemLine[indecesL[x] - 1] == '+' or chemLine[indecesL[x] - 1] == '-'):
return num
#If the bracket is followed by a digit, letter, or '>' character, throw the num error code.
#The first check within the condition below: indecesR[x]+1 != len(chemLine), ensures that an illegal memory search isn't performed.
elif(indecesR[x]+1 != len(chemLine) and (chemLine[indecesR[x] + 1].isdigit() or chemLine[indecesR[x] + 1].isalpha() or chemLine[indecesR[x] + 1] == '>')):
return num
#return 0 if no error was found
return 0
#check if element is in substring...
def inSubstring(el, substring):
#check if el is length 2...
if(len(el) == 2):
if(el in substring):
return substring.index(el)
else:
return -1
else:
for x in range(len(substring)):
if(substring[x] == el and x+1 != len(substring) and not(substring[x+1].islower())):
return x
elif(substring[x] == el and x+1 == len(substring)):
return x
return -1
#Perform the process of balancing the chemical equation given
#by the user...
def balanceEquation(equation):
#get string value of text box
chem = equation
#Perform initial security check
#first to see if the chemical equation
#provided by the user is valid...
code,arrName = EquationWrong(equation)
#If any errors are present within the chemical equationo, EquationWrong() above will return an error code value
#to the code variable.
if(code == -3 or code == -2 or code == -1 or code == 1 or code == 2 or code == 3 or code == 4 or code == 5 or code == 6 or code == 7 or code == 8 or code == 9 or code == 10 or code == 11 or code == 12):
#All of the following blocks are didicated to error code conditions and
#Their associated messages...
if(code == -3):
print("Can't have more than one reaction symbol: ->. Try Again!")
elif(code == -2):
print("Missing + and/or -> components! Try Again!\nIf + and/or -> are missing, it is not a valid chemical reaction.")
elif(code == -1):
print("There is a coefficient present before a element/molecule and/or inapropriate placement of '+', '-', and/or '>' characters. Try Again!")
elif(code == 1):
print("Incomplete chemical equation. Try Again!")
elif(code == 2):
print("Either the number of parentheses of each type are unbalanced (there must be the same # of ( and ) parenthesese) or the parenthesese of each type are not paired correctly -> (pair). Try Again!")
elif(code == 3):
print("Invalid Element Input! Try Again!")
elif(code == 4):
print("Either the number of brackets of each type are unnbalanced (there must be the same # of [ and ] brackets) or the brackets of each type are not paired correctly -> [pair]. Try Again!")
elif(code == 5):
print("Parenthese pair contents is incorrect. Try Again!")
elif(code == 6):
print("Parenthese pair positioning is incorrect. Try Again!")
elif(code == 7):
print("Bracket pair contents is incorrect. Try Again!")
elif(code == 8):
print("Bracket pair positioning is incorrect. Try Again!")
elif(code == 9):
print("Parenthese pair has no contents. Try Again!")
elif(code == 10):
print("Incorrect use of Parenthese. There should be a subscript after the right parenthese. For instance:\nCu3(PO4)2")
elif(code == 11):
print("Bracket pair has no contents. Try Again!")
elif(code == 12):
print("This is an invalid equation due to individual elements not appearing on both sides of the equat\
ion. Try Again!")
#exit function after correct error code message has been
#displayed
return
#get all indeces of '+' in the chemical equation....
#substring will hold all of the element/compound segments that
#exist in betweeen '+' and '->' symbols...
substring = []
#holds the indeces of all '+' characters
indeces = []
#Used for pulling substrings that are between
#the '+' and '->' symbols
countMe = 0
#Get all indeces associatedd with the '+'
#characters.
for g in range(len(chem)):
if(chem[g] == '+' or chem[g] == '-'):
if(chem[g - 1] != '['):
indeces.append(g)
#now extract the substrings and append them
#to the list associated with the substring variable.
for g in range(len(indeces) + 1):
if(countMe == 0):
substring.append(chem[:indeces[g]])
countMe = indeces[g]
elif(g < len(indeces)):
substring.append(chem[countMe:indeces[g]])
countMe = indeces[g]
else:
substring.append(chem[countMe:])
#The number of rows present within the
#coefficient matrix be the same as the number of
#unique elements present within the chemical equation.
rows = len(arrName)
#make sure to account for charges
#(a row is reserved for all charges)
if('[' in chem):
rows += 1
#The number of substrings extracted from
#the chemical equation will be the same as the
#number of columns needed within the coefficient matrix.
columns = len(substring)
#initial matrix template
matrix = [[]] * rows
#Insert all collumns into the matrix template
for x in range(rows):
matrix[x] = [0] * columns
#now create the matrix... But first add all values:
#Right now we only care about the values associated
#with elements. Reduce the row variable for now if charges
#are present.
#initialize tempRow with None
tempRow = None
if('[' in chem):
tempRow = rows - 1
else:
tempRow = rows
#look over elements first
for x in range(tempRow):
#Get an element symbol from
#the arrName list
el = arrName[x]
#Used to indiciate if the products portion of the
#chemical equation has been reached (after the '->' symbol)...
flag9 = 0
#Add values to each column for the
#corresponding row...
for k in range(columns):
#Enter this block if the element is within one of the
#pulled substrings...
nindex = inSubstring(el, substring[k])
if(el in substring[k]):
#enter this block if the element symbol is only one capital letter.
if(len(el) == 1):
#Detect if a digit comes after the element symbol. If so, assign that digit value to the matrix cell
if((nindex + 1 != len(substring[k])) and substring[k][nindex + 1].isdigit()):
#Get the entire subscript...
#First get an even smaller substring
sty = substring[k][nindex + 2:]
#now based on the smaller substring, extract the whole subscript
#(it most likely will only be one digit, but it could be more)
subscript2 = substring[k][nindex + 1]
for gh in range(len(sty)):
#check if the current element is a digit, and if so,
#append it to subscript2
if(sty[gh].isdigit()):
#append the digit to subscript2
subscript2 += sty[gh]
#if the element is not a digit, then the end of all subscript
#recording is done...
else:
#break out of the for loop
break
#once '->' has been passed, all subscript values must
#be multiplied by negative one.
if(('>' in substring[k]) or flag9 == 100):
matrix[x][k] = -1 * int(subscript2)
if(flag9 == 0):
flag9 = 100
#If '->' has not been passed, assign the positive value
#to matrix...
else:
matrix[x][k] = int(subscript2)
#If not digit is detected after the element symbol, assign a value of
#positive or negative one...
else:
if('>' in substring[k] or flag9 == 100):
matrix[x][k] = -1
if(flag9 == 0):
flag9 = 100
else:
matrix[x][k] = 1
#If an element made of two letters (one capital and one lowercase), move two elements ahead in order to
#begin getting the subscript...
else:
#Detect if a digit comes after the element symbol, and if so, assing that value to the matrix...
if((nindex + 2 != len(substring[k])) and substring[k][nindex + 2].isdigit()):
#Get the entire subscript...
#First get an even smaller substring
sty = substring[k][nindex + 3:]
#now based on the smaller substring, extract the whole subscript
#(it most likely will only be one digit, but it could be more)
subscript3 = substring[k][nindex + 2]
for gh in range(len(sty)):
#check if the current element is a digit, and if so,
#append it to subscript2
if(sty[gh].isdigit()):
#append the digit to subscript2
subscript3 += sty[gh]
#if the element is not a digit, then the end of all subscript
#recording is done...
else:
#break out of the for loop
break
#If the '->' symbol has been passed,
#assign the subscript value multiplied by -1
if('>' in substring[k] or flag9 == 100):
matrix[x][k] = -1 * int(subscript3)
if(flag9 == 0):
flag9 = 100
#assign the positive version of the subscript if
#the '->' symbol has not been passed...
else:
matrix[x][k] = int(subscript3)
#assign a value of one or negative one if no
#digit is found after the element symbol...
else:
if('>' in substring[k] or flag9 == 100):
matrix[x][k] = -1
if(flag9 == 0):
flag9 = 100
else:
matrix[x][k] = 1
#make sure to incorporate digit after parenthese if any
#for instance: (H2SO4)2
#first make sure that the element is actually within the parenthesis
if(')' in substring[k]):
#check if the location of the element symbol is within the bounds of the parenthese pair...
if((nindex > substring[k].index('(')) and (nindex < substring[k].index(')'))):
#Get the entire subscript...
#First get an even smaller substring
stm = substring[k][substring[k].index(')') + 1:]
#now based on the smaller substring, extract the whole subscript
#(it most likely will only be one digit, but it could be more)
subscript4 = ""
for gh in range(len(stm)):
#check if the current element is a digit, and if so,
#append it to subscript2
if(stm[gh].isdigit()):
#append the digit to subscript2
subscript4 += stm[gh]
#if the element is not a digit, then the end of all subscript
#recording is done...
else:
#break out of the for loop
break
#assign the subscript4 value to the matrix cell...
matrix[x][k] = matrix[x][k] * int(subscript4)
#Make sure to indicate if a crossover from the ractants to
#the products side of the chemical equation ahs been reached
#by assigning the flag9 variable the value 100.
else:
if('>' in substring[k]):
flag9 = 100
#Now look over the charges
if('[' in chem):
#reset the flag9 variable
flag9 = 0
#go over each substring..
for y in range(columns):
#Assign a charge value to the matrix if brackets
#are present...
if('[' in substring[y]):
#Receive the charge value within the bracket pair...
Megatron = int(substring[y][substring[y].index('[') + 1:substring[y].index(']')])
#Make megatron a negative value if '->' has been passed...
if('>' in substring[y] or flag9 == 100):
matrix[rows-1][y] = Megatron * -1
if(flag9 == 0):
flag9 = 100
#Leave megatron as a positive value if '->' has not
#been passed...
else:
matrix[rows-1][y] = Megatron
#assign 100 to flag9 if '->' is passed...
elif('>' in substring[y]):
flag9 = 100
#turn 2d array represented by the matrix variable
#into a sympy matrix...
matrix = sympy.Matrix(matrix)
#now balance the equation
reduced = matrix.rref()
#check if solving is possible
if(IsUnbalanceable(reduced,rows,columns)):
print("Equation is unbalanceable.\nThis is most likely due to a mistyped/missing subscript or charge, but look for mistyped/missing elements too.\nGo over your chemical equation and try again. If there are more unique elements than there are reactants+products (a difference of two or more), it is possible that the equation is still solvable; the reason why it came back false is possibly because more than one free variable was detected. In the case of more than one free variable when balancing equations, the linear algebra method is no longer accurate.")
return
#Provide the balanced equation...
else:
#get individual equations from matrix
arrEquations = []
#create a blank equation variable
equ = ""
#hold in memory all lower case letters
alphas = string.ascii_lowercase
#counter for alphas
counter = 0
#hold a denominator value wheen necessary
denominator = None
#List of denominators
denominators = []
#n will be used to access the overall row and y will
#be used to aid in creating separate equations to be evaluated...
for n in range(columns * rows):
#start writing the new equation
if(reduced[0][n] == 1):
equ += alphas[counter]+'='
counter = counter + 1
#The end of a row has been reached once a negative value
#has been reached.
elif(reduced[0][n] < 0):
#invert the negative value and assign to equation
equ += str(-1*reduced[0][n])
#append the equation to the equations
#structure
arrEquations.append(equ)
#reset equation variable
equ = ""
#store the inverted negative value in a temp
#variable...
temp = str(-1*reduced[0][n])
#Find the greatest denominator value
if(denominator == None and '/' in str(reduced[0][n])):
index = temp.index('/')
denominator = int(temp[index + 1:])
elif('/' in str(reduced[0][n]) and int(temp[temp.index('/') + 1:]) > denominator):
denominator = int(temp[temp.index('/') + 1:])
#save all denominators within structure so we can find Least common multiple later...
if('/' in str(reduced[0][n])):
denominators.append(int(temp[temp.index('/') + 1:]))
#find the least common multiple if denominators were present
if(denominator != None and len(denominators) > 1):
LCM = denominator
BarneyStinson = 0
while(BarneyStinson == 0):
for x in range(len(denominators)):
if(LCM % denominators[x] != 0):
break
elif(LCM % denominators[x] == 0 and (x + 1 == len(denominators))):
BarneyStinson = 10
break
if(BarneyStinson == 0):
LCM += 1
denominator = LCM
#setup a new equation variable
newE = ""
#If no denominators were present after reduction
if(denominator == None):
#in the case of no fractions
coeff = 0
#Create the new balanced chemical equation
for u2 in range(len(list(chem))):
if(u2 == 0 or (((list(chem)[u2 - 1] == '+' and list(chem)[u2-2] != '[') or list(chem)[u2 - 1] == '>') and coeff != len(arrEquations))):
newE += str(arrEquations[coeff][arrEquations[coeff].index('=') + 1:])
newE += chem[u2]
coeff = coeff + 1
else:
newE += chem[u2]
#if denominators were present after reduction
else:
#in the case of fractions
coeff = 0
#create the new balanced chemical equation
for u2 in range(len(list(chem))):
if(u2 == 0 or (((list(chem)[u2 - 1] == '+' and list(chem)[u2-2] != '[') or list(chem)[u2 - 1] == '>') and coeff != len(arrEquations))):
str1 = arrEquations[coeff][arrEquations[coeff].index('=') + 1:]
#Make sure to multiply by the value within denominator...
if('/' not in str1):
newE += str(int(arrEquations[coeff][arrEquations[coeff].index('=') + 1:]) * denominator)
newE += chem[u2]
#If '/' is in the equation, divide the numerator and denominator and then
#multiply by the vale within denominator...
else:
num1 = str1[:str1.index('/')]
num2 = str1[str1.index('/') + 1:]
num3 = int((int(num1) / int(num2)) * denominator)
if(num3 != 1):
newE += str(num3)
newE += chem[u2]
coeff = coeff + 1
elif((list(chem)[u2 - 1] == '+' and list(chem)[u2-2] != '[') or list(chem)[u2 - 1] == '>'):
newE += str(denominator)
newE += chem[u2]
else:
newE += chem[u2]
#write new equation to the answer text box
print("\nAnswer: "+newE)
#Check to see if the chemical equation is balanceable or not
#when given the corresponding coefficient matrix...
def IsUnbalanceable(matrix, rows, columns):
#check for all zeros in rest of rows and for variable = 0
if((rows == columns) or (rows == columns - 1)):
num = FreeOrZero(rows,columns,matrix)
if(num == 1):
return 1
else:
return 0
# check if there are two or more free variables right off the bat
elif((columns - rows) > 1):
return 1
#for cases with more rows than columns
#the last several rows will be full of zeros due to rref()
#check the matrix as if it were a regular m x m matrix
elif((rows - columns) >= 1):
num = FreeOrZero(columns,columns,matrix)
if(num == 1):
return 1
else:
return 0
#If another free variable is found or a row
#contains the form, 0 = 1 (indicating there is no solution).
def FreeOrZero(row,column,matrix):
#both variables below are or counting
#characteristics that indicate an unbalanceable
#chemical equation...
LordVoldemort = 0
HarryPotter = 0
#Go through each row, beginning with the last one...
for t in range(row - 1,0,-1):
#For each row, go through each column cell...
for x in range((t * column) - column, t * column):
#search for another free variable by counting
#all zeros within the row
if(matrix[0][x] == 0):
LordVoldemort = LordVoldemort + 1
#search for a one within the row...
elif(matrix[0][x] == 1 or matrix[0][x] != 1):
HarryPotter = HarryPotter + 1
#If a whole row of zeros is found or a row with an ending
#one is found, the equation is unbalnceable; return error code
#one...
if(LordVoldemort == column):
return 1
elif(HarryPotter == 1):
return 1
#reset variables
LordVoldemort = 0
HarryPotter = 0
#No error found, return 0...
return 0
#get an equation from the user and submit to the balance equation function...
def getEquation():
#get a chemical equation from the user...
equation = input("Please type a chemical equation you would like to balance:\n")
#submit the equation to balanceEquation()
balanceEquation(equation)
|
import logging
from typing import Any, Dict
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class WorkflowState:
"""
Represents a single Comala workflow state which a page can be in.
"""
def __init__(self, json): # type: (Dict[str, Any]) -> None
self.name = json['name'] # type: str
if 'description' in json:
self.description = json['description'] # type: str
else:
self.description = '' # type: str
self.initial = json['initial'] # type: bool
self.final = json['final'] # type: bool
def __str__(self):
return self.name
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import pickle
import ujson, yaml
from collections import OrderedDict
from pathlib import Path
from typing import Union, Optional
def deserialize_yml(fpath: Union[Path, str]) -> dict:
# load yaml with OrderedDict to preserve order
# http://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
def load_yaml_file(file_stream):
# noinspection PyPep8Naming
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
# noinspection PyArgumentList
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
# noinspection PyUnresolvedReferences
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
return yaml.load(stream, OrderedLoader)
# noinspection PyTypeChecker
return ordered_load(file_stream, yaml.SafeLoader)
with open(fpath, 'r') as f:
return load_yaml_file(f)
def deserialize_json(fpath: Union[Path, str]) -> dict:
with open(str(fpath), "r") as f:
return ujson.load(f) |
# std libraries
import os
import warnings
# third parties libraries
import numpy as np
import matplotlib.pyplot as plt
# we use this function to extract certain information in the file f that stores the log of the specified mathematical model
def read(f):
lines = f.readlines()
start=dict()
finish=dict()
data_names = [ 'init_row', 'init_column', 'init_nonzero', 'init_varcont', 'init_varint', 'init_varbin',
'pre_time', 'pre_lazy', 'root_obj', 'root_computed', 'root_time', 'nodes', 'nodes_time', 'n_warnings',
'lp_solution', 'lp_time', 'total_time', 'solution', 'best_bound', 'gap', 'warm_up' ]
for name in data_names:
start[name] = 0
finish[name] = 0
heuristic_result = np.array([])
heuristic_time = np.array([])
init_row = np.array([])
init_column = np.array([])
init_nonzero = np.array([])
init_varcont=np.array([])
init_varint=np.array([])
init_varbin=np.array([])
pre_time = np.array([])
pre_lazy=np.array([])
root_obj = np.array([])
root_computed = np.array([]) # this records when the root obj has been computed or not
root_time = np.array([])
clique = np.array([])
cover = np.array([])
flow_cover = np.array([])
gomory = np.array([])
gub_cover = np.array([])
implied_bound = np.array([])
lazy_constraints = np.array([])
mir = np.array([])
mod_K = np.array([])
rlt = np.array([])
strongCG = np.array([])
zero_half = np.array([])
nodes=np.array([])
nodes_time=np.array([])
n_warnings = np.array([])
lp_solution=np.array([])
lp_time=np.array([])
total_time = np.array([])
solution=np.array([])
best_bound = np.array([])
gap = np.array([])
warm_up = np.array([])
cut_dict = dict()
cuts_names = ['clique', 'cover','flow_cover','gomory','gub_cover',
'implied_bound','lazy_constraints','mir','mod_K','rlt','strongCG', 'zero_half']
for cutt in cuts_names:
cut_dict[cutt] = np.array([])
start[cutt] = 0
finish[cutt] = 0
locals()[cutt] = np.array([])
general_list = []
n_init_row = -1000000
n_pre_row = -1000000
n_init_column = -1000000
n_pre_column = -1000000
gurobi_9_Big_preprocess_flag = 0
root_relax_flag = 0
for line in lines:
if line[0:8] == 'Gurobi 9':
gurobi_9_Big_preprocess_flag = 0
root_relax_flag = 0
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
n_init_row = -100000
n_pre_row = -100000
n_init_column = -100000
n_pre_column = -100000
# We record the initial data (rows, columns, nonzeros, variables)
if line[0:10] == 'Optimize a':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j=='h' and line[i+1]==' ':
start['init_row']=i+2
if i>start['init_row'] and j==' ' and finish['init_row']==0 and start['init_row']!=0:
finish['init_row']=i
if i>finish['init_row'] and j==' ' and start['init_column']==0 and finish['init_row']!=0:
start['init_column']=i+1
if i>start['init_column'] and j==' ' and finish['init_column']==0 and start['init_column']!=0:
finish['init_column']=i
if i>finish['init_column'] and j=='d' and start['init_nonzero']==0 and finish['init_column']!=0:
start['init_nonzero']=i+2
if i>start['init_nonzero'] and j==' ' and finish['init_nonzero']==0 and start['init_nonzero']!=0:
finish['init_nonzero']=i
n_init_row = int(line[start['init_row']:finish['init_row']])
n_init_column = int(line[start['init_column']:finish['init_column']])
init_row=np.append(init_row, n_init_row)
init_column=np.append(init_column, n_init_column)
init_nonzero=np.append(init_nonzero,int(line[start['init_nonzero']:finish['init_nonzero']]))
if line[0:10] == 'Variable t' and oldline[0:5] == 'Model':
for i,j in enumerate(line):
if j==':' and line[i+1]==' ':
start['init_varcont']=i+2
if i>start['init_varcont'] and j==' ' and finish['init_varcont']==0 and start['init_varcont']!=0:
finish['init_varcont']=i
if i>finish['init_varcont'] and j==',' and start['init_varint']==0 and finish['init_varcont']!=0:
start['init_varint']=i+2
if i>start['init_varint'] and j==' ' and finish['init_varint']==0 and start['init_varint']!=0:
finish['init_varint']=i
if i>finish['init_varint'] and j=='(' and start['init_varbin']==0 and finish['init_varint']!=0:
start['init_varbin']=i+1
if i>start['init_varbin'] and j==' ' and finish['init_varbin']==0 and start['init_varbin']!=0:
finish['init_varbin']=i
init_varcont=np.append(init_varcont,int(line[start['init_varcont']:finish['init_varcont']]))
init_varint=np.append(init_varint,int(line[start['init_varint']:finish['init_varint']]))
init_varbin=np.append(init_varbin,int(line[start['init_varbin']:finish['init_varbin']]))
# We store the data of the Presolve method (rows, columns, nonzeros,time, and maybe variables)
if line[0:10] == 'Presolve t':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j==':' and line[i+1]==' ':
start['pre_time']=i+2
if i>start['pre_time'] and j=='s' and finish['pre_time']==0 and start['pre_time']!=0:
finish['pre_time']=i
pre_time=np.append(pre_time,float(line[start['pre_time']:finish['pre_time']]))
# If some lazy constraints are extracted, we record it
if line[0:9] == 'Extracted':
for i,j in enumerate(line):
if j=='d' and line[i+1]==' ' and line[i-2]=='t':
start['pre_lazy']=i+2
if i>start['pre_lazy'] and (j==' ') and finish['pre_lazy']==0 and start['pre_lazy']!=0: # or j=='e'
finish['pre_lazy']=i
pre_lazy=np.append(pre_lazy,int(line[start['pre_lazy']:finish['pre_lazy']]))
# If there is some root relaxation, we record it
if line[0:10] == 'Root relax':
root_relax_flag = 1
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j=='e' and line[i+1]==' ' and line[i-1]=='v':
start['root_obj']=i+2
if i>start['root_obj'] and (j==',') and finish['root_obj']==0 and start['root_obj']!=0: # or j=='e'
finish['root_obj']=i
elif j=='f' and line[i+1]==',' and line[i-1]=='f': # if instead of a number a "cutoff" is printed, adapt
start['root_obj']=i
finish['root_obj']=i
elif j=='e' and line[i+1]==',' and line[i-1]=='l': # if instead of a number an "infeasible" is printed, adapt
start['root_obj']=i
finish['root_obj']=i
if i>finish['root_obj'] and start['root_obj']!=finish['root_obj'] and j==',' and start['root_time']==0 and finish['root_obj']!=0 and line[i-4:i]=='ions':
start['root_time']=i+2
if i>start['root_time'] and j==' ' and finish['root_time']==0 and start['root_time']!=0:
finish['root_time']=i
if start['root_obj']==finish['root_obj']:
root_relax_flag = 0
else:
root_obj = np.append(root_obj,float(line[start['root_obj']:finish['root_obj']]))
root_time = np.append(root_time,float(line[start['root_time']:finish['root_time']]))
# We read the cuts:
cuts = [' Clique:',' Cover:',' Flow cover:',' Gomory:',' GUB cover:',' Implied bound:',
' Lazy constraints:',' MIR:',' Mod-K:',' RLT:',' StrongCG:',' Zero half:']
short_names = ['clique', 'cover','flow_cover','gomory','gub_cover',
'implied_bound','lazy_constraints','mir','mod_K','rlt','strongCG', 'zero_half']
for cut, name in zip(cuts,short_names):
# we read each specific kind of cutting plane
if line[0:len(cut)] == cut:
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j==':' and line[i+1]==' ':
start[name]=i+2
cut_dict[name] = np.append(cut_dict[name], float(line[start[name]:]) )
# we read the number of explored nodes
if line[0:8] == 'Explored':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j=='d' and line[i+1]==' ':
start['nodes']=i+2
if i>start['nodes'] and j==' ' and finish['nodes']==0 and start['nodes']!=0:
finish['nodes']=i
if i>finish['nodes'] and line[i-1]=='i' and j=='n' and start['nodes_time']==0 and finish['nodes']!=0:
start['nodes_time']=i+2
if i>start['nodes_time'] and j==' ' and finish['nodes_time']==0 and start['nodes_time']!=0:
finish['nodes_time']=i
nodes=np.append(nodes,int(line[start['nodes']:finish['nodes']]))
nodes_time=np.append(nodes_time, float(line[start['nodes_time']:finish['nodes_time']]) )
# We retireve the warm-up solution value
if line[0:15] == 'Loaded user MIP':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if line[i-2]=='i' and line[i-1]=='v' and j=='e':
start['warm_up']=i+2
warm_up = np.append(warm_up,float(line[start['warm_up']:]))
# we read the time and the solution if the problem is an LP
if line[0:9] == 'Solved in':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j=='n' and line[i+1]=='d' and line[i+2]==' ' and line[i-1]=='a':
start['lp_time']=i+3
if i>start['lp_time'] and j==' ' and finish['lp_time']==0 and start['lp_time']!=0:
finish['lp_time']=i
lp_time=np.append(lp_time,float(line[start['lp_time']:finish['lp_time']]))
if line[0:10] == 'Optimal ob':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j=='e' and line[i+1]==' ' and line[i+2]==' ' and line[i-1]=='v':
start['lp_solution']=i+3
lp_solution=np.append(lp_solution,float(line[start['lp_solution']:]))
# RUNTIME
if line[0:8] == 'Runtime:':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
for i,j in enumerate(line):
if j==':' and line[i+1]==' ' and line[i-1]=='e':
start['total_time']=i+2
total_time = np.append(total_time,float(line[start['total_time']:]))
# Warnings
if line[0:8] == 'Warning:':
#print(line)
n_warnings = np.append(n_warnings,1)
# best objective, best bound and gap
if line[0:14] == 'Best objective':
start = dict.fromkeys(start, 0)
finish = dict.fromkeys(finish, 0)
flag=0
for i,j in enumerate(line):
if j=='v' and line[i+1]=='e' and line[i+2]==' ':
start['solution'] = i+3
if i>start['solution'] and j==',' and finish['solution']==0 and start['solution']!=0:
finish['solution'] = i
if i>finish['solution'] and line[i-1]=='n' and j=='d' and start['best_bound']==0 and finish['solution']!=0:
start['best_bound'] = i+2
if i>start['best_bound'] and j==',' and finish['best_bound']==0 and start['best_bound']!=0:
finish['best_bound'] = i
if i>finish['best_bound'] and line[i-1]=='a' and j=='p' and start['gap']==0 and finish['best_bound']!=0:
start['gap'] = i+2
if i>start['gap'] and j=='%' and finish['gap']==0 and start['gap']!=0:
finish['gap'] = i
solution = np.append(solution,float(line[start['solution']:finish['solution']]))
best_bound = np.append(best_bound,float(line[start['best_bound']:finish['best_bound']]))
gap = np.append(gap,float(line[start['gap']:finish['gap']]))
root_computed = np.append(root_computed, root_relax_flag ) # we append root_relax_flag
if root_relax_flag==0:
root_obj=np.append(root_obj,0)
oldline=line
oldpresolveline=line
f.close()
return [int(np.mean(init_row)), int(np.mean(init_column)),
format(np.round(np.mean(pre_time),2), '.2f') ,
np.round(root_obj,2), format(np.round(np.mean(root_time),2), '.2f'),
np.round(np.mean(nodes),1), len(np.nonzero(np.round(gap,2))),
solution, format(np.round(np.mean(total_time),2), '.2f'),
[np.round(np.mean(cut_dict[name]),1) for name in short_names],
np.round(lp_solution,2), format(np.round(np.mean(lp_time),2), '.2f'),
root_computed, np.round(np.mean(gap),1) ]
# we use this function to extract certain information in the file f that stores the log of the specified heuristic
def read_heur(f):
lines = f.readlines()
start=dict()
data_names = [ 'heuristic_result', 'heuristic_time' ]
for name in data_names:
start[name] = 0
heuristic_result = np.array([])
heuristic_time = np.array([])
for line in lines:
if line[0:9] == 'solution:':
start = dict.fromkeys(start, 0)
for i,j in enumerate(line):
if j==':' and line[i+1]==' ':
start['heuristic_result']=i+2
heuristic_result = np.append(heuristic_result, float(line[start['heuristic_result']:-1]) )
if line[0:8] == 'Runtime:':
start = dict.fromkeys(start, 0)
for i,j in enumerate(line):
if j==':' and line[i+1]==' ':
start['heuristic_time']=i+2
heuristic_time = np.append(heuristic_time, float(line[start['heuristic_time']:-1]) )
f.close()
return [ np.round(np.mean(heuristic_result),2) ,
format(np.round(np.mean(heuristic_time),2), '.2f') ]
# we compute the root gap of each mathematical method
def compute_root_gap(root_array, solution, root_computed):
gap = np.array([])
for i in range(len(root_computed)):
if root_computed[i]== 1:
if solution[i]!= 0:
gap = np.append(gap, (abs(root_array[i] - solution[i])/solution[i] )*100 )
else:
gap = np.append(gap, root_array[i])
elif root_computed[i]== 0:
gap = np.append(gap, 0 )
return np.round(np.mean(gap),1)
# we compute the solution gap
def solution_gap(method_solution, solution):
gap = np.array([])
for i in range(len(solution)):
if solution[i]!= 0:
gap = np.append(gap, (abs( method_solution[i] -solution[i])/solution[i] )*100 )
else:
gap = np.append(gap, method_solution[i] )
return np.round(np.mean(gap),1)
# this function produces table 2. Takes as input three lists, the list of # of students, the # of colleges and number of extra funding
# for example
# init_n1 = [1000,2000]
# init_n2 = [5, 8, 10, 15]
# funding = [1,2,5,10,20,30]
def produce_data_table(init_n1, init_n2, funding):
for n1 in init_n1:
current_path = os.getcwd()
folder_path = current_path +f'\Data\Experiments_n1={n1}'
folder_path_heuristics = current_path +'\Data\Heuristics_sol'
for n2 in init_n2:
for f in funding:
warnings.filterwarnings("ignore") # we add this command to avoid warnings about empty arrays
# we read the data about the Integer Program
IP_file = open(folder_path+"\m.miqp_st{}un{}fund{}.gurobi-log.txt".format(n1,n2,f),"r")
IP_results=read(IP_file)
IP_file.close()
# we read the data about the aggregated linearization
MC_agg_file = open(folder_path+"\m.McC_agg_st{}un{}fund{}.gurobi-log.txt".format(n1,n2,f),"r")
MC_agg_results = read(MC_agg_file)
MC_agg_file.close()
# we read the data about the LP-based heuristic
LP_based_file = open(folder_path_heuristics+"\\LP_based_st{}un{}fund{}.txt".format(n1,n2,f),"r")
LP_based_results = read_heur(LP_based_file)
LP_based_file.close()
# we read the data about the Greedy heuristic
Greedy_file = open(folder_path_heuristics+"\\greedy_st{}un{}fund{}.txt".format(n1,n2,f),"r")
Greedy_results = read_heur(Greedy_file)
Greedy_file.close()
# we compute the array of the best solutions and the average best solution
solution = np.minimum(IP_results[7], MC_agg_results[7] )
mean_solution = np.round(np.mean( solution),1)
# we compute the average root gaps
IP_root_gap = compute_root_gap(IP_results[3], solution, IP_results[12])
Agg_root_gap = compute_root_gap(MC_agg_results[3], solution, MC_agg_results[12])
# we compute the average solution gap
IP_gap = solution_gap(IP_results[7], solution)
Agg_gap = solution_gap(MC_agg_results[7], solution)
# we compute the gap between the heuristics and the best upper bound
LP_based_gap = abs(np.round( (( LP_based_results[0] - mean_solution)/mean_solution )*100 , 1))
Greedy_based_gap = abs(np.round( (( Greedy_results[0] - mean_solution)/mean_solution )*100 , 1))
# BIG TABLE, we produce the data
print(f' {f} & {n1} & {n2} & '\
f' {LP_based_gap} & {Greedy_based_gap} & '\
f'{IP_root_gap} & {IP_results[5]} & '\
f' {IP_results[6]} & {IP_results[13]} & {IP_gap} & {IP_results[8]} & '\
f'{Agg_root_gap} & {MC_agg_results[5]} & '\
f' {MC_agg_results[6]} & {MC_agg_results[13]} & {Agg_gap} & {MC_agg_results[8]} \\\\ \n \hline \n')
|
from heapq import heappush, heappop
#import collections
n, k = map(int, input().split())
c = []
for i in range(n):#h:高さ
c.append([int(m) for m in input().split()])
c.sort(key=lambda x:(x[0], x[1]))
f = sorted(c, key=lambda x:(x[1], x[0]), reverse=True)
donyoku = 0
donset = set()
#pq = PriorityQueue()
notfirst =[]
for i in range(k):
if f[i][0] not in donset:
donset.add(f[i][0])
else:
heappush(notfirst, f[i][1])
donyoku += f[i][1]
donyoku += len(donset)**2
candidate = []
candidateset = set()
donyokulength = len(donset)#len(donset)の長さが変わってしまうので予めとっておく
for i in range(k, n):
if f[i][0] not in donset:#
candidate.append(-f[i][1])#最小のものから取り出されるのでマイナスをつけることによって最大のものから取り出せるようにしている
candidateset.add(f[i][0])
donset.add(f[i][0])#donsetにも入れて二番目以降の侵入を妨げる
satili = [donyoku]
donyoku -= donyokulength**2
for i in range(len(candidateset)):
if notfirst == []:
break
else:
donyoku -= heappop(notfirst)
donyoku += -heappop(candidate)#最小のものから取り出されるのでマイナスをつけることによって最大のものから取り出せるようにしている
donyoku += (donyokulength + i + 1) ** 2
satili.append(donyoku)
donyoku -= (donyokulength + i + 1) ** 2
print(max(satili))
|
#!/usr/bin/python3
import time
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import re
from collections import Counter
from urllib.parse import urlparse
from fake_useragent import UserAgent
import argparse
WEBURL = "https://www.eniyiuygulama.com/"
PROXY = "socks5://127.0.0.1:9050"
LIMIT = 5
VISIT = 0
RUN = True
WAIT = 10
FOLLOW_URL = True
HISTORY = [WEBURL]
DEBUG = True
PARSE_URL = urlparse(WEBURL)
driver = None
def referance():
if len(HISTORY) > 1:
driver.execute_script('window.location.href = "{}";'.format(HISTORY[-1]))
if DEBUG:
print("History setup %s" % HISTORY[-1])
def setup():
global driver
ua = UserAgent()
user_agent = ua['google chrome']
chrome_options = webdriver.ChromeOptions()
# specify headless mode
chrome_options.add_argument('headless')
chrome_options.add_argument("--window-size=1920,1080")
chrome_options.add_argument(f'user-agent={user_agent}')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--log-level=0')
chrome_options.add_argument('--proxy-server=%s' % PROXY)
driver = webdriver.Chrome(executable_path="/chromedriver/chromedriver",options=chrome_options)
driver.get(WEBURL)
if FOLLOW_URL:
search_url(driver)
def start():
global VISIT, WEBURL, FOLLOW_URL
referance()
if FOLLOW_URL:
search_url(driver)
VISIT += 1
if DEBUG:
if driver.get_log("browser"):
print(driver.get_log("browser"))
time.sleep(WAIT)
def search_url(driver):
global HISTORY, WEBURL
elements = driver.find_elements_by_xpath("//a[@href]")
all_links = []
for element in elements:
key = "#"
url_new = re.sub(key+'.*', key, element.get_attribute("href"))
all_links.append(url_new.replace("#",""))
best_links = Counter(all_links)
for url, count in sorted(best_links.items(), key=lambda x: x[1], reverse=True):
url_parse = urlparse(url)
if url not in HISTORY and url_parse.netloc.find(PARSE_URL.netloc) > -1:
HISTORY.append(url)
WEBURL = url
break
if DEBUG:
print("next URL %s" % WEBURL)
def is_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-u", "--url", default="https://www.eniyiuygulama.com/")
parser.add_argument("-w", "--wait", default=1)
parser.add_argument("-l", "--limit", default=0)
parser.add_argument("-d", "--debug", default=False)
parser.add_argument("-loop", "--loop", default=1)
args = parser.parse_args()
if args.wait is not None:
WAIT = int(args.wait)
if args.limit is not None:
LIMIT = int(args.limit)
if args.debug is not None:
DEBUG = bool(args.debug)
if is_url(args.url):
WEBURL = args.url
print(args.url)
setup()
while RUN:
start()
if VISIT > LIMIT:
RUN = False
driver.close()
|
import qsm
def test_project_defines_author_and_version():
assert hasattr(qsm, '__author__')
assert hasattr(qsm, '__version__')
|
#!/usr/bin/env python2.7
import numpy as np
import math as m
import matplotlib.pyplot as plt
import radio_beam
from spectral_cube import SpectralCube
from astropy.io import fits
from astropy import units as u
import os
#from reproject import reproject_interp
from astropy.wcs import WCS
"""
using data cubes and putting them at the same resolution
"""
def flatten(unflat, flux, name):
udata, uhd=fits.getdata(unflat, header=True)
fdata, fhd=fits.getdata(flux, header=True)
udata[np.isnan(udata)]=0
fdata[np.isnan(fdata)]=0
flat=udata*fdata
#zerotonan
flat[flat==0]=np.nan
name=name+'.image.flat.fits.gz'
return fits.writeto(name, flat, uhd, clobber=True)
"""
def aligner(image_1, image_2, name_1='12CO', name_2='13CO'):
#align first image to second
data_1,hd_1=fits.getdata(image_1, header=True)
data_2,hd_2=fits.getdata(image_2, header=True)
array, footprint = reproject_interp(image_1, hd_2)
aligned='../GMC1/'+name_1+'.aligned.fits.gz'
fits.writeto(aligned,array,hd_1)
return aligned
"""
"""
From here, we plug the flattened images into myriad's regrid option
"""
def convolver(image_1, image_2, name_1='12CO', name_2='13CO', size=None):
resimages=[]
images=[image_1,image_2]
names=[name_1,name_2]
if size!=None:
# put both images at specified resolution
newbeam=radio_beam.Beam(major=size*u.arcsec, minor=size*u.arcsec, pa=0*u.deg)
print newbeam
for i in range(2):
cube=SpectralCube.read(images[i])
print cube
old_unit = cube._header['bunit']
print cube.beam
new_cube=cube.convolve_to(newbeam)
# Keep the cube in single precision if it was originally
if cube._header['bitpix'] == -32:
y = new_cube._data.astype(np.float32)
new_cube._data = y
str1 = "{:.1f}".format(size)
str2 = str1.replace(".0","")
str3 = str2.replace(".","p")
convolved=names[i] + '_' + str3 + 'as.image.fits.gz'
new_cube.write(convolved,format='fits',overwrite=True)
resimages.append(convolved)
# fix brightness unit because spectralcube drops 'per beam'
if old_unit == 'Jy/beam':
hdulist=fits.open(convolved,mode='update')
hdulist[0].header['BUNIT']='Jy/beam'
hdulist.close()
return resimages
elif size==None:
# put first image at resolution of second image
cube=SpectralCube.read(image_1)
print cube
old_unit = cube._header['bunit']
print cube.beam
newbeam=radio_beam.Beam.from_fits_header(image_2)
print newbeam
new_cube=cube.convolve_to(newbeam)
# Keep the cube in single precision if it was originally
if cube._header['bitpix'] == -32:
y = new_cube._data.astype(np.float32)
new_cube._data = y
convolved=name_1+'.convolved.fits.gz'
new_cube.write(convolved,format='fits',overwrite=True)
# fix brightness unit because spectralcube drops 'per beam'
if old_unit == 'Jy/beam':
hdulist=fits.open(convolved,mode='update')
hdulist[0].header['BUNIT']='Jy/beam'
hdulist.close()
return [convolved,image_2]
def vradspec(image, name):
#spectrum of body, using radio velocity
data,hd=fits.getdata(image,header=True)
data[np.isnan(data)]=0
#replace nan with 0
nlayer=len(data)
#amount of vrad intervals
ncolumn,nrow=len(data[0]),len(data[0][0])
fpb_list=[0]*nlayer
#flux/beam on respective vrad
vrad_list,flux_list=[],[]
vrad_start=hd['CRVAL3']/1000
#starting radio velocity in [km/s]
vrad_int=hd['CDELT3']/1000
#radio velocity interval size in [km/s]
BMAJ,BMIN=hd['BMAJ'],hd['BMIN']
CDELT1,CDELT2=hd['CDELT1'],hd['CDELT2']
pb=((m.pi)/(4*m.log(2)))*((BMAJ*BMIN)/abs(CDELT1*CDELT2))
#pixels/beam
a=0
for i in range(nlayer):
vrad_list.append(vrad_start+a)
a+=vrad_interval
for j in range(ncolumn):
for k in range(nrow):
fpb_list[i]+=data[i][j][k]
#(Jy*pixels)/beam
for i in range(len(fpb_list)):
#Jy/beam -> Jy
flux_list.append((fpb_list[i]/pb))
plt.plot(vrad_list,flux_list,'b')
#Flux v Vrad
plt.xlabel("Radio velocity [km/s]")
plt.ylabel("Flux [Jy]")
plt.savefig(name+'.vradplot.pdf')
plt.close()
return
|
import discord
import asyncio
from discord.ext import commands
from bin import zb_config
from bin import zb
_var = zb_config
_query = """ SELECT g.guild_id,u.real_user_id,r.role_id,u.name,g.nick,u.int_user_id
FROM users u
LEFT JOIN guild_membership g ON u.int_user_id = g.int_user_id
LEFT JOIN role_membership r ON u.int_user_id = r.int_user_id """
rmvRoles = [[562694658933653524],[562995654079545365],[562995727844900865],
[562995775940984832],[562998542986379264],[562695065789530132],
[562695118310604810],[562387019188273155]]
_int_user_id = """ SELECT int_user_id
FROM users
WHERE real_user_id = {0} """
_update_punish = """ UPDATE guild_membership
SET punished = {0}
WHERE int_user_id = {1}
AND guild_id = {2} """
def punish_user(member,number):
sql = _int_user_id.format(member.id)
data, rows, string = zb.sql_query(sql)
intID = int(data[0])
sql = _update_punish.format(number,intID,member.guild.id)
rows, string = zb.sql_update(sql)
def is_outranked(member1, member2, role_perms):
""" test for valid data in database at two columns """
idList1 = []
for role in member1.roles:
idList1.append(role.id)
idList2 = []
for role in member2.roles:
idList2.append(role.id)
sql = """ SELECT MIN(role_perms)
FROM roles
WHERE guild_id = {0}
AND NOT role_perms = 0
AND role_perms <= {1}
AND role_id in {2} """
sql = sql.format(member1.guild.id,
role_perms,
zb.sql_list(idList1))
data1, rows, string = zb.sql_query(sql)
sql = """ SELECT MIN(role_perms)
FROM roles
WHERE guild_id = {0}
AND NOT role_perms = 0
AND role_perms <= {1}
AND role_id in {2} """
sql = sql.format(member2.guild.id,
role_perms,
zb.sql_list(idList2))
data2, rows, string = zb.sql_query(sql)
try:
var = int(data2[0])
except:
return True
if rows == 0:
return True
elif int(data1[0]) < int(data2[0]):
return True
else:
return False
class BanesWeebECog(commands.Cog):
def __init__(self, bot):
self.bot = bot
def is_in_guild(guild_id):
async def predicate(ctx):
return ctx.guild and ctx.guild.id == guild_id
return commands.check(predicate)
# @commands.Cog.listener()
# async def on_message(self, message):
# # Ignore self
# if message.author == self.bot.user:
# return
# # Adds people to blacklist
# guild = self.bot.get_guild(562078425225887777)
# if(message.author in guild.members and
# message.guild is None):
# channel = guild.get_channel(562078853133107230)
# # Sets static values
# member = message.author
# carryMsg = message
# # Looks for last message in admin chats
# while True:
# async for message in channel.history(limit=500):
# if message.author == member:
# msg = message
# break
# break
# # Try to get message ctx if found
# try:
# ctx = await self.bot.get_context(msg)
# except:
# return
# # If ctx found, test for permissions
# if(zb.is_trusted(ctx,4) and
# zb.is_pattern(carryMsg.content,
# '^([0-9]{14,})\s+(((\w+\s+)+(\w+)?)|(\w+)).+')):
# data = carryMsg.content.split(' ',1)
# sql = """ SELECT real_user_id
# FROM blacklist
# WHERE guild_id = {0}
# AND real_user_id = {1} """
# sql = sql.format(guild.id,data[0])
# junk, rows, junk2 = zb.sql_query(sql)
# # Checks if already in list
# if rows > 0:
# await carryMsg.author.send('Thank you for reporting ' +
# f'`{data[0]}`, but it already exists for **{guild.name}**.')
# return
# else:
# await carryMsg.author.send(f'I have added `{data[0]}` ' +
# f'to the blacklist for **{guild.name}**')
# zb.add_blacklist(message,data[0],data[1])
def setup(bot):
bot.add_cog(BanesWeebECog(bot))
|
# https://www.codewars.com/kata/find-the-next-perfect-square/train/python
# My solution
find_next_square = lambda sq : -1 if int(sq**0.5)**2 != sq else int(sq**0.5+1)**2
# Return the next square if sq is a square, -1 otherwise
# ...
def find_next_square(sq):
root = sq ** 0.5
if root.is_integer():
return (root + 1)**2
return -1
# ...
import math
def find_next_square(sq):
return (math.sqrt(sq) + 1) ** 2 if (math.sqrt(sq)).is_integer() else -1
# ...
from math import sqrt
def find_next_square(sq):
return (sqrt(sq)+1)**2 if sqrt(sq)%1 == 0 else -1
|
from django.contrib import admin
from store.models import Product
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ['title', 'user', 'price']
|
import random
from tests.utils import async_test
from donphan import Column, Table, SQLType
from unittest import TestCase
class _TestLikeTable(Table):
a: Column[SQLType.Text] = Column(primary_key=True)
class ViewTest(TestCase):
def test_query_fetch_null(self):
where = _TestLikeTable._build_where_clause({"a__like": r"%foo"})
assert (
_TestLikeTable._build_query_fetch(where, None, None)
== r"SELECT * FROM public.__test_like_table WHERE a LIKE $1"
)
@async_test
async def test_a_table_create(self):
await _TestLikeTable.create(None)
@async_test
async def test_b_table_insert(self):
await _TestLikeTable.insert(None, a="foo")
await _TestLikeTable.insert(None, a="bar")
await _TestLikeTable.insert(None, a="foobar")
@async_test
async def test_c_table_fetch(self):
records = await _TestLikeTable.fetch(None, a__like=r"foo%")
assert len(records) == 2 # type: ignore
@async_test
async def test_d_table_fetch_insensitive(self):
records = await _TestLikeTable.fetch(None, a__ilike=r"FoO%")
assert len(records) == 2 # type: ignore
@async_test
async def test_e_table_delete(self):
await _TestLikeTable.drop(None)
|
from tkinter import *
from utility import *
import os
from tkinter import messagebox
import threading
from os import listdir
from os.path import isfile, join
from pygame import mixer # Load the popular external library
import time
from mutagen.mp3 import MP3
def main(root, WIDTH, HEIGHT, wu, hu):
global songFolderEntry, currentFolderLabel, currentSongLabel
root.config(bg = "black")
heading = Label(root, text = "Music Player", font = ( "", int(calculateFontSize(WIDTH, hu*20) * 1.5)), fg = "black", bg = "#bdbdbd")
heading.place(x = 0, y = 0, width = WIDTH, height = hu*20)
instruction = Label(root, text = "Enter the path to your music folder below.", font = ( "", int(calculateFontSize(wu*70, hu*15) )), fg = "white", bg = "black", justify = LEFT)
instruction.place(x = 5*wu, y = 25*hu, height = hu*15)
songFolderEntry = Entry(root, font = ("", int(calculateFontSize(wu * 90, hu*10)*2/3)), highlightbackground="red")
songFolderEntry.place(x = 5*wu, y = 45*hu, width = wu*90, height = hu*10)
currentFolderLabel = Label(root, text = "Current Playing Folder : N/A", font = ( "", int(calculateFontSize(wu*50, hu*10) )), fg = "white", bg = "black", justify = LEFT)
currentFolderLabel.place(x = 5*wu, y = 55*hu, height = hu*10)
currentSongLabel = Label(root, text = "Current Playing Song : N/A", font = ( "", int(calculateFontSize(wu*50, hu*10) )), fg = "white", bg = "black", justify = LEFT)
currentSongLabel.place(x = 5*wu, y = 65*hu, height = hu*10)
playButton = RoundedButton(root, "black", 25*wu, 25*hu, "Play", int(calculateFontSize(25*wu, 25*hu)*1.2), playSongs, "#00ff6e")
playButton.place(x = 5*wu, y = 75*hu, width = 25*wu, height = 25*hu)
playButton = RoundedButton(root, "black", 30*wu, 25*hu, "Go Back", int(calculateFontSize(30*wu, 25*hu)*1.2), root.place_forget, "#00bbff")
playButton.place(x = 35*wu, y = 75*hu, width = 30*wu, height = 25*hu)
playButton = RoundedButton(root, "black", 25*wu, 25*hu, "Stop", int(calculateFontSize(25*wu, 25*hu)*1.2), stop, "#00ff6e")
playButton.place(x = 70*wu, y = 75*hu, width = 25*wu, height = 25*hu)
userActivity = getUserActivity()
if userActivity is not None:
if "musicPlayer" in userActivity:
currentFolderLabel['text'] = f"Current Playing Folder : {userActivity['musicPlayer']['currentFolder']}"
currentSongLabel['text'] = f"Current Playing Song : {userActivity['musicPlayer']['currentSong']}"
root.mainloop()
def stop():
modifyUserActivity({
"musicPlayer":{"playingSongs":False}
})
try:
currentFolderLabel["text"] = f"Current Playing Folder : N/A"
currentSongLabel["text"] = f"Current Playing Folder : N/A"
except Exception:
pass
def playSongs():
songFolderPath = songFolderEntry.get()
if not os.path.exists(songFolderPath):
messagebox.showerror("Path Location Error", "The path entered is not a valid path.")
return
def play(songFolderPath):
modifyUserActivity({
"musicPlayer":{
"playingSongs":True,
"currentFolder": songFolderPath
}
})
currentFolderLabel["text"] = f"Current Playing Folder : {songFolderPath}"
mixer.init()
while True:
files = [join(songFolderPath, f) for f in listdir(songFolderPath) if isfile(join(songFolderPath, f))]
musicFiles = [file for file in files if file.endswith(".mp3")]
if len(musicFiles) == 0:
messagebox.showerror("No Song Found", "There were no mp3 files found in the path entered.")
return
continueToPlaySong = True
for musicFile in musicFiles:
musicFileAddedActivity = getUserActivity()
musicFileAddedActivity["musicPlayer"]["currentSong"] = musicFile
modifyUserActivity(musicFileAddedActivity)
mixer.music.load(musicFile)
mixer.music.play()
audio = MP3(musicFile)
lengthOfMusic = int(audio.info.length) # In Seconds
start = time.time()
currentSongLabel["text"] = f"Current Playing Song : {musicFile}"
while time.time() - start < lengthOfMusic:
userActivity = getUserActivity()["musicPlayer"]
if not userActivity["playingSongs"]:
continueToPlaySong = False
if not continueToPlaySong:
break
time.sleep(0.05)
if not continueToPlaySong:
break
if not continueToPlaySong:
break
mixer.quit()
playThread = threading.Thread(target = lambda : play(songFolderPath))
playThread.daemon = True
playThread.start()
|
import os
import sys
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
import torchvision.transforms as transforms
from PIL.Image import CUBIC
from time import time
from opts import parse_opts
from model import generate_model
from spatial_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,
MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor)
from temporal_transforms import LoopPadding, TemporalRandomCrop, TemporalCenterCrop
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset import get_training_set, get_validation_set, get_test_set
from utils import Logger, get_hms
from train import train_epoch
from validation import val_epoch
import test
if __name__ == '__main__':
opt = parse_opts()
torch.manual_seed(opt.manual_seed)
model, parameters = generate_model(opt)
#print(model)
criterion = nn.CrossEntropyLoss()
if not opt.no_cuda:
criterion = criterion.cuda()
if opt.bayesian:
from models.BayesianLayers.BBBlayers import GaussianVariationalInference
criterion = GaussianVariationalInference(criterion)
#if opt.no_mean_norm and not opt.std_norm:
if True:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
if not opt.no_train:
assert opt.train_crop in ['random', 'corner', 'center', 'rescale']
assert opt.train_temporal_crop in ['center', 'random']
if opt.train_crop == 'random':
crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'corner':
crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'center':
crop_method = MultiScaleCornerCrop(
opt.scales, opt.sample_size, crop_positions=['c'])
elif opt.train_crop == 'rescale':
#crop_method = transforms.Resize((opt.sample_size, opt.sample_size), interpolation=CUBIC)
crop_method = Scale(opt.sample_size, interpolation=CUBIC)
spatial_transform = Compose([
crop_method,
RandomHorizontalFlip(),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = TemporalCenterCrop(opt.sample_duration)
if opt.train_temporal_crop == 'random':
temporal_transform = TemporalRandomCrop(opt.sample_duration)
target_transform = ClassLabel()
training_data = get_training_set(opt, spatial_transform,
temporal_transform, target_transform)
train_loader = torch.utils.data.DataLoader(
training_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_threads,
pin_memory=True)
train_logger = Logger(
opt.result_path_logs+'_train.log',
['epoch', 'loss', 'acc', 'lr'], bool(opt.resume_path))
train_batch_logger = Logger(
opt.result_path_logs+'_train_batch.log',
['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'], bool(opt.resume_path))
if opt.nesterov:
dampening = 0
else:
dampening = opt.dampening
if opt.optimizer == 'sgd':
optimizer = optim.SGD(
parameters,
lr=opt.learning_rate,
momentum=opt.momentum,
dampening=dampening,
weight_decay=opt.weight_decay,
nesterov=opt.nesterov)
elif opt.optimizer == 'adam' or opt.optimizer == 'amsgrad':
optimizer = optim.Adam(
parameters,
lr=opt.learning_rate,
weight_decay=opt.weight_decay,
amsgrad=(opt.optimizer=='amsgrad'))
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', patience=opt.lr_patience)
del training_data, target_transform, temporal_transform, spatial_transform, parameters, crop_method
if not opt.no_val:
if opt.train_crop == 'rescale':
spatial_transform = Compose([
#transforms.Resize((opt.sample_size, opt.sample_size), interpolation=CUBIC),
Scale(opt.sample_size, interpolation=CUBIC),
ToTensor(opt.norm_value), norm_method
])
else:
spatial_transform = Compose([
Scale(opt.sample_size),
CenterCrop(opt.sample_size),
ToTensor(opt.norm_value), norm_method
])
assert opt.val_temporal_crop in ['loop', 'random', 'center']
temporal_transform = LoopPadding(opt.sample_duration)
if opt.val_temporal_crop == 'center':
temporal_transform = TemporalCenterCrop(opt.sample_duration)
elif opt.val_temporal_crop == 'random':
temporal_transform = TemporalRandomCrop(opt.sample_duration)
target_transform = ClassLabel()
validation_data = get_validation_set(
opt, spatial_transform, temporal_transform, target_transform)
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
val_logger = Logger(
opt.result_path_logs+'_val.log', ['epoch', 'loss', 'acc', 'acc_mean', 'acc_vote'], bool(opt.resume_path))
uncertainty_logger = Logger(
opt.result_path_logs+'_uncertainty.log', ['epoch',
'epistemic', 'aleatoric', 'random_param_mean', 'random_param_log_alpha',
'total_param_mean', 'total_param_log_alpha'], bool(opt.resume_path))
del validation_data, target_transform, temporal_transform, spatial_transform
if opt.resume_path:
print('loading checkpoint {}'.format(opt.resume_path))
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
opt.begin_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if not opt.no_train:
optimizer.load_state_dict(checkpoint['optimizer'])
for param_group in optimizer.param_groups:
param_group['lr'] = opt.learning_rate
del checkpoint
start_time = time()
print('run')
for i in range(opt.begin_epoch, opt.n_epochs + 1):
if not opt.no_train:
train_epoch(i, train_loader, model, criterion, optimizer, opt,
train_logger, train_batch_logger)
if not opt.no_val:
validation_loss = val_epoch(i, val_loader, model, criterion, opt,
val_logger, uncertainty_logger)
if not opt.no_train and not opt.no_val:
scheduler.step(validation_loss)
elapsed_time = time() - start_time
print('| Elapsed time : %d:%02d:%02d' %(get_hms(elapsed_time)))
if opt.test:
spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = LoopPadding(opt.sample_duration)
target_transform = VideoID()
test_data = get_test_set(opt, spatial_transform, temporal_transform,
target_transform)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
test.test(test_loader, model, opt, test_data.class_names)
|
class Solution(object):
# def minTotalDistance(self, grid):
# """
# :type grid: List[List[int]]
# :rtype: int
# """
# # sort
# rows = []
# cols = []
# for i in range(len(grid)):
# for j in range(len(grid[0])):
# if grid[i][j] == 1:
# rows.append(i)
# cols.append(j)
# row = rows[len(rows) / 2]
# cols.sort()
# col = cols[len(cols) / 2]
# return self.minDistance1D(rows, row) + self.minDistance1D(cols, col)
# def minDistance1D(self, points, origin):
# distance = 0
# for point in points:
# distance += abs(point - origin)
# return distance
def minDistance1D(self, points):
# two points
distance = 0
i, j = 0, len(points) - 1
while i < j:
distance += points[j] - points[i]
i += 1
j -= 1
return distance
def minTotalDistance(self, grid):
rows = self.collectRows(grid)
cols = self.collectCols(grid)
row = rows[len(rows) / 2]
col = cols[len(cols) / 2]
return self.minDistance1D(rows) + self.minDistance1D(cols)
def collectRows(self, grid):
rows = []
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
rows.append(i)
return rows
def collectCols(self, grid):
cols = []
for j in range(len(grid[0])):
for i in range(len(grid)):
if grid[i][j] == 1:
cols.append(j)
return cols
|
"""Provide methodologies for creating the model from the data."""
import json
import logging
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from hoqunm.data_tools.analysis import Analyser, CartSpecs, analyse
from hoqunm.data_tools.base import (BEGIN, CART_COLUMNS, CURRENT_CLASS,
CURRENT_WARD, END, EXTERNAL, INTERNAL,
OUTPUT_DIR, SERVICE, and_query,
column_query, drop_week_arrival)
from hoqunm.data_tools.preprocessing import adjust_data
from hoqunm.simulation.evaluators import EvaluationResults
from hoqunm.simulation.hospital import HospitalSpecs
from hoqunm.utils.distributions import (Hypererlang, HypererlangSpecs,
distribution_to_rate, fit_expon,
fit_hypererlang, plot_distribution_fit,
rate_to_distribution)
from hoqunm.utils.utils import LOGGING_DIR, get_logger
pd.set_option('mode.chained_assignment', None)
class Modeller:
"""Class for creating the model with the help of Analyser class.
A possible workflow would be:
1. __init__ with analyser
3. analyser.make_occupancy
4. analyser.make_classes or analyser.set_classes
5. self.inter_arrival_fit
6. self.service_fit
7. self.routing
(8. self.adjust_fits)
:param analyser: The analyser (already finished) to use.
:param logger: Logger for logging.
:param output_dir: Output directory for plots.
"""
def __init__(self,
analyser: Analyser,
logger: Optional[logging.Logger] = None,
output_dir: Path = OUTPUT_DIR):
self.analyser = analyser
self.hospital_specs = self.analyser.hospital_specs
self.output_dir = output_dir
self.logger = logger if logger is not None else get_logger(
"modeller", self.output_dir.joinpath("modeller.log"))
def copy(self) -> "Modeller":
"""Copy self.
:return: New instance with same attributes.
"""
out = Modeller(analyser=self.analyser.copy(),
logger=self.logger,
output_dir=self.output_dir)
return out
def make_cart(
self,
cart_specs: Optional[CartSpecs] = None
) -> Tuple[Dict[Any, Any], Any]:
"""Make cart analysis.
:param cart_specs: Specifications for CART analysis.
:return: Code map for columns converted to categorical, created CART graphs.
"""
if cart_specs is None:
cart_specs = CartSpecs(wards=self.analyser.wards)
code_map, graphs = self.analyser.cart_classes(cart_specs=cart_specs)
self.logger.info(f"CART mapping: \n{code_map}")
return code_map, graphs
# pylint: disable=too-many-nested-blocks
def inter_arrival_fit(self,
classes: Optional[List[int]] = None,
distributions: Optional[List[Callable[
[Union[List[float], np.ndarray, pd.Series]],
Union[Hypererlang, scipy.stats.expon]]]] = None,
filename="inter_arrival_fit") -> List[HospitalSpecs]:
"""compute inter arrival fit distributions from data.
:param classes: The classes to include, if empty include all.
:param distributions: Callables which return fitted distributions to data.
:param filename: Filename for plot.
:return: A numpy array holding the distributions for each ward and class.
If multiple distributions are given, a numpy.zero array will be returned.
"""
if classes is None:
if hasattr(self.analyser, "classes"):
classes = self.analyser.classes
else:
classes = [0]
if distributions is None:
distributions = [fit_expon]
arrivals = [
np.zeros((len(self.analyser.wards), len(classes), 2), dtype="O")
for _ in range(len(distributions))
]
for j, origin in enumerate([EXTERNAL, INTERNAL, [INTERNAL, EXTERNAL]]):
for ward in self.analyser.wards:
for i, class_ in enumerate(classes):
qry = and_query(column_query(CURRENT_WARD, ward),
column_query(CURRENT_CLASS, class_))
class_data = self.analyser.data.query(qry).dropna(
subset=[BEGIN, END])
class_data["Arrival"] = self.analyser.make_inter_arrival(
class_data, pre_ward=[origin])
if ward == "PACU":
class_data = drop_week_arrival(class_data, week=True)
arrival_data = class_data["Arrival"].dropna()
distribution_fits: List[Union[Hypererlang,
scipy.stats.expon]] = []
if not arrival_data.empty:
for k, distribution_ in enumerate(distributions):
distribution_fits.append(
distribution_(arrival_data))
if j in [0, 1]:
arrivals[k][self.analyser.wards_map[ward], i,
j] = distribution_fits[0]
title = f"ward: {ward}, class: {int(class_)}, origin: {origin}"
plot_distribution_fit(arrival_data,
distribution_fits,
title=title)
d = ", ".join([d.name for d in distribution_fits])
filename_ = filename + f" - distributions[{d}] - ward[{ward}] - " \
f"class[{int(class_)}] - origin[{origin}].pdf"
plt.savefig(self.output_dir.joinpath(filename_))
plt.close()
self.hospital_specs.set_arrival(arrivals[0])
hospital_specs = [
self.hospital_specs.copy() for _ in range(len(distributions))
]
for specs, arrival in zip(hospital_specs, arrivals):
specs.set_arrival(arrival)
return hospital_specs
def service_fit(
self,
classes: Optional[List[int]] = None,
distributions: Optional[List[
Callable[[Union[List[float], np.ndarray, pd.Series]],
Union[Hypererlang, scipy.stats.expon]]]] = None,
filename="service_fit",
) -> List[HospitalSpecs]:
"""Compute service fit distributions from data.
:param classes: The classes to include, if empty include all.
:param distributions: Callables which return fitted distributions to data.
:param filename: The filename for plot saving.
:return: A numpy array holding the distributions for each ward and class.
If multiple distributions are given, a numpy.zero array will be returned.
"""
if classes is None:
if hasattr(self.analyser, "classes"):
classes = self.analyser.classes
else:
classes = [0]
if distributions is None:
distributions = [fit_expon]
services = [
np.zeros((len(self.analyser.wards), len(classes)), dtype="O")
for _ in range(len(distributions))
]
self.analyser.make_service()
self.logger.info(f"Modell for service.")
for ward in self.analyser.wards:
for i, class_ in enumerate(classes):
qry = and_query(column_query(CURRENT_WARD, ward),
column_query(CURRENT_CLASS, class_))
class_data = self.analyser.data.query(qry)
service_data = class_data[SERVICE].dropna()
distribution_fits: List[Union[Hypererlang,
scipy.stats.expon]] = []
if not service_data.empty:
for j, distribution_ in enumerate(distributions):
distribution_fit = distribution_(service_data)
distribution_fits.append(distribution_fit)
title = f"Ward: {ward}, Class: {int(class_)}"
plot_distribution_fit(service_data, [distribution_fit],
title=title)
filename_ = filename.format(distribution_fit.name,
ward, int(class_))
plt.savefig(
self.output_dir.joinpath(f"{filename_}.pdf"))
plt.close()
services[j][self.analyser.wards_map[ward],
i] = distribution_fit
title = f"ward: {ward}, class: {int(class_)}"
plot_distribution_fit(service_data,
distribution_fits,
title=title)
d = ", ".join([d.name for d in distribution_fits])
filename_ = filename + f" - distributions[{d}] - ward[{ward}] - " \
f"class[{int(class_)}].pdf"
plt.savefig(self.output_dir.joinpath(filename_))
plt.close()
self.hospital_specs.set_service(services[0])
hospital_specs = [
self.hospital_specs.copy() for _ in range(len(distributions))
]
for specs, service in zip(hospital_specs, services):
specs.set_service(service)
return hospital_specs
def routing(self) -> np.ndarray:
"""Computing routing from data.
:return: Routing matrix.
"""
assert hasattr(self.analyser, "classes")
routing_ = np.zeros(
(len(self.analyser.wards), len(self.analyser.classes),
len(self.analyser.wards) + 1, len(self.analyser.classes)),
dtype="float")
data = self.analyser.data.copy()
for ward1 in self.analyser.wards:
for ward2 in self.analyser.wards:
for c1, class1 in enumerate(self.analyser.classes):
for c2, class2 in enumerate(self.analyser.classes):
routing_[self.analyser.wards_map[ward1], c1,
self.analyser.wards_map[ward2],
c2] = self.analyser.compute_routing(
data, ward1, ward2, class1, class2)
routing_[:, :, len(self.analyser.wards),
0] = 1 - routing_.sum(axis=(2, 3))
self.hospital_specs.routing = routing_
return routing_
def adjust_fits(self) -> HospitalSpecs:
"""Adjust arrival and routing.
Assume that arrival, service and routing exist all in
hospital_specs.
"""
occupancy = np.array(self.analyser.occupancy).T
self.hospital_specs.arrival = self.adjust_arrival(
arrival=self.hospital_specs.arrival,
capacities=self.hospital_specs.capacities,
occupancy=occupancy)
self.hospital_specs.routing = self.adjust_routing(
routing=self.hospital_specs.routing,
capacities=self.hospital_specs.capacities,
holdings=self.hospital_specs.holdings,
occupancy=occupancy)
return self.hospital_specs.copy()
@staticmethod
def adjust_routing(routing: np.ndarray,
capacities: np.ndarray,
holdings: List[bool],
occupancy: np.ndarray,
logger: Optional[logging.Logger] = None) -> np.ndarray:
"""Adjust routing.
:param routing: Routing to consider.
:param capacities: Capacities to consider.
:param holdings: If a ward holds (then do not adjust routing.
:param occupancy: Occupancy to consider.
:param logger: Logger to use.
:return: Adjusted routing matrix.
"""
if logger is None:
logger = get_logger("adjust_routing",
LOGGING_DIR.joinpath("adjust_routing.log"))
I = len(capacities)
# adjust routing!
routing = routing.copy()
routing_ = routing.copy()
for index, val in np.ndenumerate(routing):
if (not holdings[index[0]]) and (index[2] != routing.shape[2] - 1):
routing_[index] = val / (
1 - occupancy[index[2], capacities[index[2]]])
routing_[:, :, I, 0] = 1 - routing_[:, :, :I, :].sum(axis=(2, 3))
if np.any(routing_ < 0) or np.any(routing_ > 1):
for index in np.ndindex(routing_.shape[:2]):
if np.any(routing_[index] < 0) or np.any(routing_[index] > 1):
logger.warning(
f"Routing issue on index: {index}\n"
f"Re-adjusting the matrix:\n{routing_[index]}")
routing_[index] = np.maximum(routing_[index],
0) / np.maximum(
routing_[index], 0).sum()
return routing_
@staticmethod
def adjust_arrival(arrival: np.ndarray, capacities: np.ndarray,
occupancy: np.ndarray) -> np.ndarray:
"""Adjust arrival distributions.
:param arrival: Arrival distributions to consider.
:param capacities: Capacities to consider.
:param occupancy: Occupancy to consider.
:return: Adjusted arrival.
"""
arrival = arrival.copy()
arrival_ = arrival.copy()
for index, val in np.ndenumerate(arrival):
if val != 0:
arrival_[index] = scipy.stats.expon(
scale=val.mean() *
(1 - occupancy[index[0], capacities[index[0]]]))
return arrival_
class Service:
"""Holds possible service information regarding different distributions.
:param expon: Service exonential distributions.
:param hypererlang: Service hypererlang distributions.
"""
def __init__(self,
expon: Optional[np.ndarray] = None,
hypererlang: Optional[np.ndarray] = None):
self.expon = expon
self.hypererlang = hypererlang
def save_dict(self) -> Dict[str, List[Any]]:
"""Make class details into dict for saving.
:return: Dict for saving.
"""
return {
"expon": HospitalSpecs.service_to_list(service=self.expon),
"hypererlang":
HospitalSpecs.service_to_list(service=self.hypererlang)
}
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "Service":
"""Create class from Dict with arguments and values in it.
:param arguments: The dict containing the parameter-argument pairs.
:return: Class instance.
"""
arguments_ = {
key: HospitalSpecs.service_from_list(service=args)
for key, args in arguments.items()
}
return Service(**arguments_)
class WardModel:
"""Holds the ward model for different variants. This is basically intended
to generate different models out of before made analysis.
:param arrival: Computed arrival distributions.
:param service: Computed service distributions.
:param routing: Computed routings.
:param capacity: Criginal capacity.
:param logger: Logger to use.
"""
def __init__(self,
name: str,
arrival: np.ndarray,
service: Service,
routing: np.ndarray,
occupancy: np.ndarray,
capacity: int = 1,
logger: Optional[logging.Logger] = None):
self.name = name
self.arrival = arrival
self.service = service
self.routing = routing
self._occupancy = occupancy
self.capacity = capacity
self.logger = logger if logger is not None else get_logger(
"WardModel", LOGGING_DIR.joinpath("ward_model.log"))
def specs(
self,
capacity: Optional[int] = None,
service_name: str = "expon",
adjust_arrival: bool = True,
adjust_internal_rate: float = 0
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Create the specifications for the ward for the given parameters.
:param capacity: Ward capacity to consider.
:param service_name: Service distributions to use.
:param adjust_arrival: If arrival should be adjusted via observed occupancy.
:param adjust_internal_rate: Adjust internal arrival rate by given parameter.
:return: Ward specifications.
"""
if capacity is None:
capacity = self.capacity
arrival = self.arrival.copy()
for i, class_arrival in enumerate(arrival[0]):
class_rates = distribution_to_rate(class_arrival)
class_rates[0] += (1 - adjust_internal_rate) * class_rates[1]
class_rates[1] = adjust_internal_rate * class_rates[1]
arrival[0, i] = rate_to_distribution(class_rates)
if adjust_arrival:
arrival[:, :, 0] = Modeller.adjust_arrival(
arrival=arrival[:, :, 0],
capacities=np.array([capacity]),
occupancy=self.occupancy(capacity=capacity).reshape((1, -1)))
service_ = getattr(self.service, service_name)
if service_ is not None:
service = service_.copy()
else:
raise NotImplementedError("Service does not exist.")
return arrival, service, self.routing.copy(), self.occupancy(
capacity=capacity)
def occupancy(self, capacity: Optional[int] = None) -> np.ndarray:
"""Compute occupancy distributions for given capacity.
:param capacity: Capacity to use.
:return: Adjusted occupancy.
"""
if capacity is None:
capacity = self.capacity
occ = self._occupancy[:capacity + 1].copy()
occ[capacity] = self._occupancy[capacity:].sum()
return occ
def save_dict(self) -> Dict[str, Any]:
"""Save the parameters and values of the class as a dict.
:return: Dict for saving purposes.
"""
arguments = {
"name": self.name,
"arrival": HospitalSpecs.arrival_to_list(arrival=self.arrival),
"service": self.service.save_dict(),
"routing": self.routing.tolist(),
"occupancy": self._occupancy.tolist(),
"capacity": self.capacity
}
return arguments
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "WardModel":
"""Load a class instance from saved dict.
:param arguments: Arguments-value mapping for class instance.
:return: Class instance for given parameters.
"""
if not all(arg in arguments
for arg in ["name", "arrival", "service", "routing"]):
raise KeyError
return WardModel(name=arguments["name"],
arrival=HospitalSpecs.arrival_from_list(
arrival=arguments["arrival"]),
service=Service.load_dict(arguments["service"]),
routing=np.array(arguments["routing"]),
occupancy=np.array(arguments["occupancy"]),
capacity=arguments.get("capacity", 1))
class HospitalModel:
"""Model of te hospital. Basically holds all wards to consider and logi to
compute different models from it.
:param wards: Statons to consider.
:param logger: Logger to use.
"""
def __init__(self,
wards: List[WardModel],
logger: Optional[logging.Logger] = None):
self.logger = logger if logger is not None else get_logger(
"HospitalModel", LOGGING_DIR.joinpath("hospital_model.log"))
self.wards = {ward.name: ward for ward in wards}
self.ward_map_inv: Dict[int, str] = {
i: ward.name
for i, ward in enumerate(wards)
}
self.ward_map = {
ward: index
for index, ward in self.ward_map_inv.items()
}
@property
def ward_names(self) -> List[str]:
"""Return used ward names (sorted).
:return: Sorted ward names.
"""
ward_names_ = list(self.wards)
ward_names_.sort()
return ward_names_
def occupancy(self, **capacities) -> np.ndarray:
"""Compute occupancy distributions for given capacities.
:param capacities: Capacities for each ward.
:return: Adjusted occuppancy distributions.
"""
ward_occupancies = []
max_index = max(list(capacities.values())) + 1
for ward_name in self.ward_names:
if capacities.get(ward_name, None):
ward_occupancies.append(
np.pad(
self.wards[ward_name].occupancy(capacities[ward_name]),
(0, max_index - capacities[ward_name])).reshape(1, -1))
return np.concatenate(ward_occupancies)
def occupancy_as_evaluator(self, **capacities) -> EvaluationResults:
"""Compute occupancy distributions and return them as
EvaluationResults.
:param capacities: Capacities for each ward.
:return: Adjusted occuppancy distributions as EvaluationResults.
"""
occupancy = self.occupancy(**capacities)
hospital_specs = self.hospital_specs(capacities=capacities,
adjust_arrival=False,
adjust_routing=False)
evaluation_results = EvaluationResults(hospital_specs=hospital_specs)
evaluation_results.distributions.occupancy = occupancy
evaluation_results.name = "Real observation"
return evaluation_results
def hospital_specs(self,
capacities: Optional[Dict[str, int]] = None,
service_name: str = "expon",
adjust_arrival: bool = True,
adjust_int_rates: Optional[Dict[str, float]] = None,
adjust_routing: bool = True) -> HospitalSpecs:
"""For the given parameters, compute a HospitalSpecs instance.
:param capacities: Capacities to consider.
:param service_name: Service to use.
:param adjust_arrival: Adjust outside arrival from occupancy distributions.
:param adjust_int_rates: Adjust internal arrival rate with given parameter.
:param adjust_routing: Adjust internal routing by occupancy distributions.
:return: Computed HospitalSpecs.
"""
if capacities is None:
capacities = {
ward_name: ward.capacity
for ward_name, ward in self.wards.items()
}
if len([ward for ward in capacities if ward in self.wards]) == 0:
raise ValueError("No valid wards given.")
if adjust_int_rates is None:
adjust_int_rates = {ward: 0.0 for ward in capacities}
specs = []
for ward_name in self.ward_names:
if capacities.get(ward_name, None):
specs.append(self.wards[ward_name].specs(
capacity=capacities[ward_name],
service_name=service_name,
adjust_arrival=adjust_arrival,
adjust_internal_rate=adjust_int_rates.get(ward_name, 0)))
# clean routing
ward_indices = [
self.ward_map[ward] for ward in capacities if ward in self.ward_map
]
ward_indices.sort()
arrival = np.concatenate([spec[0] for spec in specs])
service = np.concatenate([spec[1] for spec in specs])
routing = np.concatenate(
[np.array([spec[2][:, ward_indices, :]]) for spec in specs])
max_capacity = max(list(capacities.values())) + 1
occupancy = np.concatenate([
np.array([np.pad(spec[3], (0, max_capacity - spec[3].shape[0]))])
for spec in specs
])
# pylint: disable=unsubscriptable-object
out_routing = 1 - routing.sum(axis=(2, 3), keepdims=True)
out_routing = np.pad(out_routing,
pad_width=[
(0, 0), (0, 0), (0, 0),
(0, routing.shape[3] - out_routing.shape[3])
])
routing = np.concatenate([routing, out_routing], axis=2)
if adjust_routing:
routing = Modeller.adjust_routing(
routing=routing,
capacities=np.array(list(capacities.values())),
holdings=[False] * len(capacities),
occupancy=occupancy,
logger=self.logger)
ward_map = {
index: ward
for index, ward in self.ward_map_inv.items() if ward in capacities
}
capacities_ = np.zeros((len(ward_map)), dtype=int)
for idx, ward in ward_map.items():
capacities_[idx] = capacities[ward]
return HospitalSpecs(capacities=capacities_,
arrival=arrival,
service=service,
routing=routing,
ward_map=ward_map)
def get_model(self,
model: int = 1,
capacities: Optional[Dict[str, int]] = None,
service_name: str = "expon",
adjust_int_rates: Optional[Dict[str, float]] = None,
waitings: Optional[Dict[str, List[str]]] = None):
"""Create the model (models 1,2,3 are possible).
:param model: Model to use.
:param capacities: Capacities to use.
:param service_name: Service distributions to use.
:param adjust_int_rates: Internal arrrival adjust rates to use.
:param waitings: Waiting map to use.
:return: Specific model HospitalSpecs.
"""
if model not in [1, 2, 3]:
raise ValueError(
f"Model should be in [1,2,3]. Provided model: {model}.")
adjust_arrival = model in [1, 2, 3]
adjust_int_rates = None if model in [1, 2] else adjust_int_rates
adjust_routing = model == 1
hospital_specs = self.hospital_specs(capacities=capacities,
service_name=service_name,
adjust_arrival=adjust_arrival,
adjust_int_rates=adjust_int_rates,
adjust_routing=adjust_routing)
if model in [2, 3]:
hospital_specs.set_holdings(
**{ward: True
for ward in hospital_specs.ward_map_inv})
if waitings is not None:
hospital_specs.set_waitings(**waitings)
return hospital_specs
def save_dict(self) -> Dict[str, List[Any]]:
"""Save class instance to dict.
:return: Argument value mapping.
"""
arguments = {
"wards": [ward.save_dict() for ward in self.wards.values()]
}
return arguments
def save(self, filepath: Path = Path()) -> None:
"""Save the HospitalModel to json via save_dict.
:param filepath: Path to save to.
"""
arguments_dict = self.save_dict()
with open(filepath, "w") as f:
json.dump(arguments_dict, f)
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "HospitalModel":
"""Create class instance from dict.
:param arguments: Arguments value mapping for class call.
:return: Class instance.
"""
wards = [
WardModel.load_dict(ward_arguments)
for ward_arguments in arguments["wards"]
]
return HospitalModel(wards=wards)
@staticmethod
def load(filepath: Path = Path(),
logger: Optional[logging.Logger] = None) -> "HospitalModel":
"""Load the HospitalModel from json via load_dict.
:param filepath: Filepath to load from.
:param logger: Logger to use.
:return: Loaded instance of self.
"""
if logger is None:
logger = get_logger("hospital_model",
LOGGING_DIR.joinpath("hospital_model.log"))
with open(filepath, "r") as f:
arguments = json.load(f)
arguments["logger"] = logger
return HospitalModel.load_dict(arguments)
def make_hospital_model(
filepath: Path,
wards: List[str],
capacities: List[int],
startdate: datetime = datetime(2019, 1, 1),
enddate: datetime = datetime(2019, 12, 1),
cart_specs: Optional[CartSpecs] = None,
hypererlang_specs: Optional[HypererlangSpecs] = None,
adjust_pacu_occupancy: bool = True,
output_dir: Path = OUTPUT_DIR,
logger: Optional[logging.Logger] = None) -> List[HospitalModel]:
"""Make hospital model.
:param filepath: Path to excel file to analyse.
:param wards: Wards to consider and their respective capacities.
:param capacities: Capacities for wards.
:param startdate: Startdate to use.
:param enddate: Enddate to use.
:param cart_specs: Specifications for CART analysis.
:param hypererlang_specs: Specifications for hypererlang fit.
:param adjust_pacu_occupancy: Adjust pacu occupancy because of weekends.
:param output_dir: Output_dir to use for plots.
:param logger: Logger to use for logging.
:return: Created HospitalModels.
"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_dir = output_dir.joinpath("Modelling - " + timestamp)
output_dir.mkdir()
if logger is None:
logger = get_logger(__file__,
file_path=output_dir.joinpath(
f"{Path(__file__).resolve().stem}.log"))
if hypererlang_specs is None:
hypererlang_specs = HypererlangSpecs()
if cart_specs is None:
cart_specs = CartSpecs(wards=wards)
preprocessor = adjust_data(filepath=filepath,
wards=wards,
startdate=startdate,
enddate=enddate,
keep_internal=True,
logger=logger)
analyser = analyse(preprocessor=preprocessor,
wards=wards,
capacities=capacities,
output_dir=output_dir,
adjust_pacu_occupancy=adjust_pacu_occupancy,
logger=logger)
plt.close()
hospital_models: List[HospitalModel] = []
for cart in [False, True]:
modeller = Modeller(analyser=analyser,
logger=logger,
output_dir=output_dir)
c = "multiple classes" if cart else "one class"
modeller.logger.info(f"Build model for {c}.")
modeller = model_class_arrival_routing(modeller=modeller,
make_cart=cart,
cart_specs=cart_specs)
plt.close()
hospital_specs_service = modeller.service_fit(
distributions=[
fit_expon,
lambda x: fit_hypererlang(x, specs=hypererlang_specs)
],
filename=f"service_fit - cart[{cart}]")
plt.close()
ward_models = []
for ward in wards:
ward_index = hospital_specs_service[0].ward_map_inv[ward]
arrival = hospital_specs_service[0].arrival[ward_index:ward_index +
1]
service = Service(
expon=hospital_specs_service[0].service[ward_index:ward_index +
1],
hypererlang=hospital_specs_service[1].
service[ward_index:ward_index + 1])
occupancy = np.array(modeller.analyser.occupancies[0][ward])
routing = hospital_specs_service[0].routing[ward_index, :, :-1, :]
ward_models.append(
WardModel(name=ward,
arrival=arrival,
service=service,
routing=routing,
occupancy=occupancy))
plt.close()
hospital_model = HospitalModel(wards=ward_models)
filename = f"HospitalModel - cart[{cart}].json"
hospital_model.save(filepath=output_dir.joinpath(filename))
modeller.logger.info(
f"Model for {c} saved in {output_dir.joinpath(filename)}.")
hospital_models.append(hospital_model)
return hospital_models
def model_class_arrival_routing(
modeller: Modeller,
make_cart: bool = False,
cart_specs: Optional[CartSpecs] = None,
output_dir: Path = OUTPUT_DIR,
) -> Modeller:
"""Modell cart classes, arrival and routing.
:param modeller: Modeller instance to use.
:param make_cart: Make cart classes.
:param cart_specs: Specifications for cart.
:param output_dir: Output_dir to use for plots.
:return: Modeller instance with analysed information.
"""
if make_cart:
if cart_specs is None:
cart_specs = CartSpecs(wards=modeller.analyser.wards)
modeller.logger.info("Make cart based on {}.".format(CART_COLUMNS))
_, graphs = modeller.make_cart(cart_specs)
# pylint: disable=broad-except
try:
for ward, graph in graphs:
filename = f"decision_tree - ward[{ward}].gv"
graph.render(output_dir.joinpath(filename))
except Exception:
pass
modeller.logger.info("Classes: {}.".format(modeller.analyser.classes))
modeller.logger.info("Make routing")
modeller.routing()
modeller.logger.info("Make inter-arrival")
modeller.logger.info("Inter-arrival fits")
filename = f"inter_arrival_fit - cart[{make_cart}]"
modeller.inter_arrival_fit(distributions=[fit_expon], filename=filename)
return modeller
|
from __future__ import division
from openpyxl import Workbook
from array import array
#from openpyxl.utils import coordinate_from_string, column_index_from_string
from openpyxl.utils import column_index_from_string
from openpyxl.utils.cell import coordinate_from_string
from openpyxl.utils import get_column_letter
import urllib2,re
from bs4 import BeautifulSoup
import urllib,sys
from urllib2 import Request, urlopen, URLError
import openpyxl
reload(sys)
sys.setdefaultencoding('UTF-8')
InputXlsxFile = "output/papers_authors.xlsx" #input file containing data to be processed.
AuthorkeyFile = "output/author-key map.txt" #reference output file for indexing authors
AuthorListFile = "output/author-list.txt"
wb = openpyxl.load_workbook(InputXlsxFile)
sh = wb.get_active_sheet()
exceldata = []
# Here we calculate the similarity of two conferences which builds up the similarity matrix of conferences.
def SimilarityOfConf(Conf1, Conf2):
if (Conf1 == Conf2):
return float(1.0)
Conf1Auth = []
Conf2Auth = []
for counter in range(len(exceldata)):
num_author = exceldata[counter][2]
year = exceldata[counter][3]
if (year == Conf1):
for num in range (1,num_author +1):
auth_link = exceldata[counter][3+num]
Conf1Auth.append(auth_link)
if (year == Conf2):
for num in range (1,num_author +1):
auth_link = exceldata[counter][3+num]
Conf2Auth.append(auth_link)
containsBoth = set(Conf1Auth).intersection(Conf2Auth)
containsEither = list(set(Conf1Auth).union(Conf2Auth))
similarity = float(len(containsBoth)/ len(containsEither))
return similarity
#################******~~~~Execution starts here~~~~*******#######################
f_auth = open("output/Author.txt", 'w+')
f_conf = open("output/Conf.txt",'w+')
f_conf_matrix = open("output/Conf_matrix.txt",'w+')
f_auth_conf_graph = open("output/Auth-Conf_graph.txt",'w+')
f = open(AuthorkeyFile, 'w+')
#g = open(OPMetisFile, 'w+')
h = open(AuthorListFile, 'w+')
counter=0 #row pointer of the input file.
author_count=0 #total number of authors.
author_dict = {} #list of all the authors.
conf_list = [] #list of all the conferences.
for r in sh.rows: #loop over each line of input file
eachrow = []
counter+=1 #point to the next line of the input file
if (counter==1): #First row is header row, so skip.
continue
num_author= int(r[2].value) #Get number of authors for the paper represented in that row.
if (num_author > 0): # If there is atleast one entry of authorship for that paper.
year = int(r[3].value)
if year not in conf_list:
conf_list.append(year)
eachrow.extend([int(r[0].value),str(r[1].value.decode('utf-8')),num_author, int(r[3].value)])
for num in range(1,num_author+1): #For each author of that paper
auth_link = str(r[3+num].value) #get the author's URI.
eachrow.append(auth_link)
if auth_link not in author_dict: #If author not already in the author's dictionary then add him/her.
author_count+=1 #update author count
author_dict[auth_link] = author_count
exceldata.append(eachrow)
conf_list.sort(key=int)
#Here we save the Author Vertices File.
BPGAuthorFile = ""
for key, value in sorted(author_dict.iteritems(), key=lambda (k,v): (v,k)):
f.write("%s: %s\n" % (key, value)) #Print the entire author key mapping onto the AuthorKey file.
BPGAuthorFile += "\n" + "A" + str(value)
h.write(key)
h.write("\n")
BPGAuthorFile = str((author_count)) + BPGAuthorFile
f_auth.write(BPGAuthorFile)
del BPGAuthorFile
h.close()
f.close()
f_auth.close()
#Here we save the Conference Vertices File.
BPGconfFile = ""
for conf in conf_list:
BPGconfFile += "\n" + "C" + str(conf)
BPGconfFile = str(len(conf_list)) + BPGconfFile
f_conf.write(BPGconfFile)
del BPGconfFile
f_conf.close()
#Here we save the Conference Similarity Matrix File.
BPGraph_ConfSimilarity = [[0 for x in range(len(conf_list))] for y in range(len(conf_list))]
for conf1 in conf_list:
for conf2 in conf_list:
if (conf2 >= conf1):
similarityValue = SimilarityOfConf(conf1,conf2)
BPGraph_ConfSimilarity[conf1-2001][conf2-2001] = similarityValue
BPGraph_ConfSimilarity[conf2-2001][conf1-2001] = BPGraph_ConfSimilarity[conf1-2001][conf2-2001]
BPGraph_string = ""
for i in range(len(conf_list)):
for j in range(len(conf_list)):
BPGraph_string += str(BPGraph_ConfSimilarity[i][j]) + " "
BPGraph_string = BPGraph_string[:-1]
BPGraph_string += "\n"
BPGraph_string = str(len(conf_list)) + "\n" + BPGraph_string
f_conf_matrix.write(BPGraph_string)
del BPGraph_ConfSimilarity
f_conf_matrix.close()
#Edges from authors to conferences. we find the weight of the edges depending on the number of times an author published in each conference.
BPGraph_dict=[[0 for x in range(len(conf_list))] for y in range(author_count)] #A Matrix of size equal to total number of authors by the number of conferences.
for counter in range(len(exceldata)):
num_author = exceldata[counter][2]
year = exceldata[counter][3]
for num in range (1,num_author +1):
auth_link = exceldata[counter][3+num]
auth_value = author_dict.get(auth_link)
auth_year = year - 2001
BPGraph_dict[auth_value-1][auth_year] += 1
edgesInBPG = 0
BPGdata = ""
for auth_counter in range(author_count):
for year_counter in range(len(conf_list)):
BPGvalue = BPGraph_dict[auth_counter][year_counter]
if (BPGvalue >0):
edgesInBPG +=1
auth_value = author_dict.keys()[author_dict.values().index(auth_counter+1)]
BPGdata += ('\nA{}\tC{}\tedge\t{}'.format((auth_counter+1), (year_counter + 2001), BPGvalue/10))
BPGdata = str(edgesInBPG) + BPGdata
f_auth_conf_graph.write(BPGdata)
del BPGdata
del BPGraph_dict
f_auth_conf_graph.close()
wb.close()
|
import os
import glob
import inspect
import warnings
import datetime
import functools
import contextlib
import collections
import numpy as np
import pandas as pd
import tqdm
from . import getdata
from . import getstatic
from .. import config
def _filter_df(df: pd.DataFrame, query: str) -> pd.DataFrame:
if '@' in query:
# In this case, we appear to be using
# external variables, so we need to
# pull in the local variables from the caller
frame = inspect.currentframe()
caller_locals = frame.f_back.f_locals
with contextlib.suppress(TypeError):
# We hop back two frames, so the caller and the caller's caller.
caller_locals.update(frame.f_back.f_back.f_locals)
else:
# Otherwise, leave it empty.
caller_locals = None
try:
return df.query(query, local_dict=caller_locals)
except TypeError:
# This may be triggered by an operation unsupported by Numexpr.
# Try to fall back to Python backend.
return df.query(query, local_dict=caller_locals, engine='python')
except pd.core.computation.ops.UndefinedVariableError:
msg = ('One of the keys does not seem to appear in some of the data. '
'This may be due to wrong spelling, or simply missing from one of the hours.')
warnings.warn(RuntimeWarning(msg))
return pd.DataFrame()
def _clear_duplicates(df: pd.DataFrame, feed: str) -> None:
if feed == 'TripUpdates' and not df.empty:
df.sort_values(by='timestamp', ascending=True)
# Not all feeds provide exactly the same fields, so this filters for it:
keys = list({'trip_id', 'direction_id', 'stop_sequence', 'stop_id'}.intersection(df.keys()))
df.drop_duplicates(subset=keys, keep='last', inplace=True)
def get_data_range(feed: str, company: str, start_date: str, start_hour: int = 0, end_date: str = None,
end_hour: int = 23, query: (str, None) = None, merge_static: bool = False,
clear_duplicates: bool = True) -> pd.DataFrame:
"""Get all data from start_date:start_hour to end_date:end_hour, inclusive, downloading if necessary.
Date should be given in the format YYYY-MM-DD or YYYY_MM_DD.
If end_date is not specified, it is the same as start_date.
Use the parameter `query` to filter the data as it is being read if you want to minimise the memory footprint.
merge_static includes some information from the static feed.
"""
if feed not in ('VehiclePositions', 'TripUpdates', 'ServiceAlerts'):
raise ValueError(f'Feed {feed} not recognised.')
if company not in ('dt', 'klt', 'otraf', 'skane', 'sl', 'ul', 'varm', 'xt'):
warnings.warn(RuntimeWarning(f'Company {company} is not recognised. Maybe the API will fail?'))
frames = []
if end_date is None:
end_date = start_date
start = start_date.replace('_', '-') + f' {start_hour}:00'
end = end_date.replace('_', '-') + f' {end_hour}:00'
for date_hour in tqdm.tqdm(pd.date_range(start, end, freq='h'), desc='Loading data'):
date, time_code = str(date_hour).split()
hour = int(time_code.split(':')[0])
full_path = getdata._get_data_path(company, feed=feed, hour=hour, date=date)
if not os.path.exists(full_path):
# Download if it doesn't exist'
try:
getdata.get_data(date, hour, feed, company)
except ValueError:
warnings.warn(RuntimeWarning(f'The API did not return data for {date_hour}'))
continue
df = pd.read_feather(full_path)
if clear_duplicates:
_clear_duplicates(df, feed)
if feed == 'VehiclePositions':
if 'trip_id' in df:
# For vehicle positions, skip data that does not correspond to routes.
df = df.query('trip_id.notna()', engine='python')
else:
df = pd.DataFrame()
if merge_static and not df.empty:
this_static = load_static_data(company, date, remove_unused_stations=True)
if this_static is None:
continue
if feed == 'TripUpdates':
# Overwrite some parameters already present in the static data
df.drop(columns=['route_id', 'direction_id'], errors='ignore', inplace=True)
# Rename some common columns
df.rename(
columns={'arrival_time': 'observed_arrival_time', 'departure_time': 'observed_departure_time'},
inplace=True)
this_static.stop_times.rename(columns={'arrival_time': 'scheduled_arrival_time',
'departure_time': 'scheduled_departure_time'}, inplace=True)
df = df.merge(this_static.stop_times, how='left',
on=('trip_id', 'stop_id', 'stop_sequence'),
validate='m:1')
df = df.merge(this_static.trips.reset_index(level=['route_id', 'direction_id']), how='left',
on='trip_id', validate='m:1')
elif feed == 'VehiclePositions':
df.drop(columns=['route_id', 'direction_id'], errors='ignore', inplace=True)
df = df.merge(this_static.trips.reset_index(level=['route_id', 'direction_id']), how='left',
on='trip_id', validate='m:1')
elif feed == 'ServiceAlerts':
warnings.warn(RuntimeWarning('ServiceAlerts cannot be merged with static data.'))
if query is not None:
df = _filter_df(df, query)
# Drop the index, since it will be regenerated when concatenated
df.drop(columns='index', errors='ignore', inplace=True)
frames.append(df)
df_merged = pd.concat(frames)
if 'timestamp' in df_merged.keys():
df_merged['datetime'] = pd.to_datetime(df_merged.timestamp, unit='s')
return df_merged
static_data = collections.namedtuple('StaticData', ['stop_times', 'stops', 'trips', 'shapes', 'routes'])
def _remove_unused_stations(stops_data: pd.DataFrame, stops_times: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):
parent_stations = stops_data.parent_station.dropna().values
used_stations = set(stops_times.stop_id).union(parent_stations)
stops_times = stops_times.query('stop_id in @used_stations')
stops_data = stops_data.query('stop_id in @used_stations')
return stops_data, stops_times
@functools.lru_cache(1)
def load_static_data(company: str, date: str, remove_unused_stations: bool = False) -> (static_data, None):
"""Load static data from the cache, downloading it if necessary.
Date should be given in the format YYYY-MM-DD or YYYY_MM_DD.
"""
output_folder = getstatic._get_static_data_path(company, date)
if not os.path.isdir(output_folder):
try:
getstatic.get_static_data(company=company, date=date)
except ValueError:
warnings.warn(RuntimeWarning(f'Missing static data for {company} at {date}'))
return
if not os.path.exists(os.path.join(output_folder, 'stop_times.txt')):
warnings.warn(RuntimeWarning(f'Missing static data for {company} at {date}'))
return
def parse_date(time_code: str):
time_code = list(map(int, time_code.split(':')))
if time_code[0] < 24:
return datetime.datetime(*map(int, date.replace('_', '-').split('-')), *time_code)
else:
# If it is over 24 h, roll over the next day.
time_code[0] -= 24
dt = datetime.datetime(*map(int, date.replace('_', '-').split('-')), *time_code)
return dt + datetime.timedelta(days=1)
stops_times = pd.read_csv(os.path.join(output_folder, 'stop_times.txt'), dtype={'trip_id': 'str', 'stop_id': 'str'},
parse_dates=['arrival_time', 'departure_time'], date_parser=parse_date)
stops_data = pd.read_csv(os.path.join(output_folder, 'stops.txt'),
dtype={'stop_id': 'str', 'parent_station': 'str'})
trips_data = pd.read_csv(os.path.join(output_folder, 'trips.txt'), dtype={'trip_id': 'str', 'route_id': 'str'})
shapes_data = pd.read_csv(os.path.join(output_folder, 'shapes.txt'))
routes = pd.read_csv(os.path.join(output_folder, 'routes.txt'), dtype={'route_id': 'str', 'agency_id': 'str',
'route_short_name': 'str',
'route_long_name': 'str'})
if remove_unused_stations:
stops_data, stops_times = _remove_unused_stations(stops_data, stops_times)
stop_times = pd.merge(stops_times, stops_data, on='stop_id', how='left', validate='m:1')
trips_data = pd.merge(trips_data, routes, on='route_id', how='left', validate='m:1')
# Set indexes for faster querying and merging
trips_data.set_index(['trip_id', 'route_id', 'direction_id'], inplace=True, drop=True, verify_integrity=True)
stop_times.set_index(['trip_id', 'stop_id', 'stop_sequence'], inplace=True, drop=True, verify_integrity=True)
shapes_data.set_index(['shape_id', 'shape_pt_sequence'], inplace=True, drop=True, verify_integrity=True)
stops_data.set_index('stop_id', inplace=True, drop=True, verify_integrity=True)
data = static_data(stop_times=stop_times, stops=stops_data, trips=trips_data, shapes=shapes_data, routes=routes)
return data
def clean_cache() -> None:
"""Apply cleaning filters to all files in the cache, to fix issues with arrays downloaded with older versions."""
for f in tqdm.tqdm(glob.glob(os.path.join(config.CACHE_DIR, '*feather'))):
df = pd.read_feather(f)
if 'ServiceAlerts' in f:
df = getdata.unpack_jsons(df)
getdata.sanitise_array(df)
if df.empty: # Feather does not support a DF without columns, so add a dummy one
df['_'] = np.zeros(len(df), dtype=np.bool_)
df.reset_index(drop=True).to_feather(f, compression='zstd', compression_level=9)
|
import json
import uvicorn
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import Response
from fastapi.templating import Jinja2Templates
from climate.model.load_production_model import Load_Prod_Model
from climate.model.prediction_from_model import Prediction
from climate.model.training_model import Train_model
from climate.validation_insertion.prediction_validation_insertion import Pred_Validation
from climate.validation_insertion.train_validation_insertion import Train_Validation
from utils.read_params import read_params
app = FastAPI()
config = read_params()
templates = Jinja2Templates(directory=config["templates"]["dir"])
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
async def index(request: Request):
return templates.TemplateResponse(
config["templates"]["index_html_file"], {"request": request}
)
@app.get("/train")
async def trainRouteClient():
try:
raw_data_train_bucket = config["s3_bucket"]["climate_raw_data_bucket"]
train_val = Train_Validation(raw_data_train_bucket)
train_val.training_validation()
train_model = Train_model()
num_clusters = train_model.training_model()
load_prod_model_object = Load_Prod_Model(num_clusters=num_clusters)
load_prod_model_object.load_production_model()
except Exception as e:
return Response("Error Occurred! %s" % e)
return Response("Training successfull!!")
@app.get("/predict")
async def predictRouteClient():
try:
raw_data_pred_bucket = config["s3_bucket"]["climate_raw_data_bucket"]
pred_val = Pred_Validation(raw_data_pred_bucket)
pred_val.prediction_validation()
pred = Prediction()
bucket, filename, json_predictions = pred.predict_model()
return Response(
f"prediction file created in {bucket} bucket with filename as {filename}, and few of the predictions are {str(json.loads(json_predictions))}"
)
except Exception as e:
return Response("Error Occurred! %s" % e)
if __name__ == "__main__":
host = config["app"]["host"]
port = config["app"]["port"]
uvicorn.run(app, host=host, port=port)
|
from flask import Flask, render_template, request, redirect, url_for
import json, time, serial
app = Flask(__name__)
app.secret_key = 'shumugfdsghjk'
app.config['SERVER_NAME'] = '192.168.1.33'
ser = serial.Serial('/dev/ttyACM0', 9600)
@app.route('/')
def needToLogin():
return redirect(url_for('login'))
@app.route('/home/<username>')
def index(username):
#Check if logged in...
if 'username' == "":
return redirect(url_for('login'))
#Get Data
with open('drinkdata.txt') as data_file:
data = json.load(data_file)
return render_template('index.html', username=username, cocktails=data['cocktails'])
@app.route('/mix/<id>/<username>')
def mix(id,username):
#Get Data
with open('drinkdata.txt') as data_file:
data = json.load(data_file)
for index, i in enumerate(data['cocktails']):
if i['id'] == id:
whatToMix = i['ingredients']
nameOfCocktail = i['name']
break
#Send data to Arduino
for alco in whatToMix:
print(data['ingredients'][alco])
ser.write(bytes(str(data['ingredients'][alco]), 'UTF-8'))
ser.write(bytes("\n", 'UTF-8'))
time.sleep(0.1)
ser.write(bytes('100', 'UTF-8'))
ser.write(bytes("\n", 'UTF-8'))
data['cocktails'][index]['count'] += 1
#Save Data
with open('drinkdata.txt', 'w') as outfile:
json.dump(data, outfile)
return render_template('mix.html', nameOfCocktail=nameOfCocktail,username=username)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
print('Setting Username' + request.form['username'])
return redirect(url_for('index', username = request.form['username']))
return render_template('login.html')
@app.route('/make/<username>', methods=['GET', 'POST'])
def make(username):
#Check if logged in...
if 'username' == "":
return redirect(url_for('login'))
#Get Data
with open('drinkdata.txt') as data_file:
data = json.load(data_file)
if request.method == 'POST':
cocking = []
for ing in data['ingredients']:
for i in range(int(request.form[ing])):
cocking.append(ing)
drinkId = str(int(time.time())) + request.form['cocktailname'];
newcocktail = {'id': drinkId , 'name' : request.form['cocktailname'], 'ingredients' : cocking[:8], 'instructions': request.form['instructions'], 'count' : 0, 'createdby' : username }
data['cocktails'].append(newcocktail)
with open('drinkdata.txt', 'w') as outfile:
json.dump(data, outfile)
return redirect(url_for('index', username=username) + '#' + drinkId)
else:
"""
data = {'ingredients' : ['Vodka', 'White Rum', 'Dark Rum']}
with open('drinkdata.txt', 'w') as outfile:
json.dump(data, outfile)
"""
return render_template('make.html', ingredients=data['ingredients'])
@app.route('/reset')
def reset():
data = {
'ingredients' : {'Gin': 4, 'Whiskey': 3, 'Vodka' : 2, 'White Rum' : 1, 'Dark Rum':0, 'Passoa': -1, 'Blackcurrant Liqueur': -2, 'Triple Sec': -3, 'Coffee Liqueur': -4},
'cocktails' : [
{'id': '1 Test', 'name' : 'Test', 'createdby' : 'Stuart', 'ingredients' : ['Vodka', 'Vodka', 'White Rum'], 'instructions': 'Add Lime', 'count' : 0 }
]
}
for i in data['cocktails'][0]:
print(i)
#Save Data
with open('drinkdata.txt', 'w') as outfile:
json.dump(data, outfile)
return 'Reset'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, threaded=True) #add when ready, threaded=True
#debug=True, |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2016 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import client
from tests import EchecTest, test
#Nom du test qui sera afficher en console
nom = "Test des créations de compte et des connexions"
class AfficherMOTD(test):
nom = "AfficherMOTD"
def test(self):
self.cl = client.Client(self.smtp)
message = self.cl.connecter()
try:
message.index(b"Bienvenue sur")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide",self.cl.com)
try:
message.index(b"Votre compte")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide",self.cl.com)
class EntrerNom(test):
nom = "EntrerNom"
def sendNom(self,nom):
self.cl = client.Client(self.smtp)
self.cl.connecter()
return self.cl.envoyer(nom.encode())
def test(self):
message = self.sendNom("nouveau")
try:
message.index(b"Votre nom de compte")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide",self.cl.com)
message = self.sendNom("test")
try:
message.index(b"Ce compte n'existe pas.")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide",self.cl.com)
class EntrerPass(test):
nom = "EntrerPass"
def test(self):
self.cl = client.Client(self.smtp)
self.cl.connecter()
try:
self.cl.creer_compte("nicolas", "123456", "nicolas@orange.fr")
except EchecTest as inst:
raise EchecTest("Impossible de crée le compte : " + str(inst), \
self.cl.com)
self.cl = client.Client(self.smtp)
self.cl.connecter()
message = self.cl.envoyer(b"nicolas")
try:
message.index(b"")#TODO
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide", self.cl.com)
class NouveauNom(test):
nom = "NouveauNom"
nom_valide = ["Lea","Bastien","JeanneFrancoise","tictac42","Nitrate"]
nom_invalide = ["nouveau","kassie","Kassie","Al","Léa", \
"MichelFrancois325","JeannineHuguette","Jean-Marc","François"]
def mettre_nom(self,nom):
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.envoyer(b"nouveau")
return self.cl.envoyer(nom.encode())
def test(self):
message = self.mettre_nom("/")
try:
message.index(b"Votre compte")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide, pour revenir en arrière", self.cl.com)
for nom in self.nom_valide:
message = self.mettre_nom(nom)
try:
message.index(b"Choix de l'encodage")
except ValueError:
raise EchecTest("Le nom {0} n'a pas été accepté".format(nom), \
self.cl.com)
for nom in self.nom_invalide:
message = self.mettre_nom(nom)
try:
message.index(b"Votre nom de compte :")
except ValueError:
raise EchecTest("Le nom {0} a été accepté".format(nom), \
self.cl.com)
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.envoyer(b"nouveau")
try:
self.cl.creer_compte("Marie", "Blabla", "marie@orange.fr")
except EchecTest as detail:
raise EchecTest("Impossible de crée le compte : " + str(detail), \
self.cl.com)
message = self.mettre_nom("Marie")
try:
message.index(b"Votre nom de compte :")
except ValueError:
raise EchecTest("Un nom de compte a été accepté deux fois", \
self.cl.com)
class ChangeEncodage(test):
nom = "ChangeEncodage"
def mettre_encodage(self,encodage):
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.envoyer(b"nouveau")
self.cl.envoyer(b"Titus")
return self.cl.envoyer(encodage.encode())
def test(self):
message = self.mettre_encodage("0")
try:
message.index(b"Entrez le numero correspondant")
except ValueError:
raise EchecTest("0 accepté comme encopage", self.cl.com)
message = self.mettre_encodage("1")
try:
message.index("prétexte".encode('Utf-8'))
except ValueError:
raise EchecTest("Erreur lors du choix de l'encodage 1", self.cl.com)
message = self.mettre_encodage("2")
try:
message.index("prétexte".encode('Latin-1'))
except ValueError:
raise EchecTest("Erreur lors du choix de l'encodage 2", self.cl.com)
message = self.mettre_encodage("3")
try:
message.index("prétexte".encode('cp850'))
except ValueError:
raise EchecTest("Erreur lors du choix de l'encodage 3", self.cl.com)
message = self.mettre_encodage("4")
try:
message.index("prétexte".encode('cp1252'))
except ValueError:
raise EchecTest("Erreur lors du choix de l'encodage 4", self.cl.com)
message = self.mettre_encodage("5")
try:
message.index(b"Entrez le numero correspondant")
except ValueError:
raise EchecTest("5 accepté comme encodage", self.cl.com)
class ChoisirPass(test):
nom = "ChoisirPass"
mdp_valide = ["123456","Bastien","tictac42","{"*8,"}"*8,"/"*8, \
"["*8,"]"*8,"("*8,")"*8,"+"*8,"="*8,"$"*8,"_"*8,"*"*8,"@"*8,"^"*8, \
"\""*8,"'"*8,"`"*8,"£"*8,"#"*8,"-"*8]
mdp_invalide = ["totor","oubli","flané32","\\"*8]
def mettre_mdp(self,mdp):
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.envoyer(b"nouveau")
self.cl.envoyer(b"potter")
self.cl.envoyer(b"1")
return self.cl.envoyer(mdp.encode())
def test(self):
message = self.mettre_mdp("/")
try:
message.index(b"Choix de l'encodage")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide, pour revenir en arrière", self.cl.com)
for mdp in self.mdp_valide:
message = self.mettre_mdp(mdp)
try:
message.index(b"Confirmez le mot de passe :")
except ValueError:
raise EchecTest("Le mot de passe : {0} n'a pas été " \
"accepté".format(mdp), self.cl.com)
for mdp in self.mdp_invalide:
message = self.mettre_mdp(mdp)
try:
message.index(b"Votre mot de passe :")
except ValueError:
raise EchecTest("Le mot de passe : {0} a " \
"été accepté".format(mdp), self.cl.com)
class ConfirmerPass(test):
nom = "ConfirmerPass"
def mettre_mdp(self,mdp):
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.envoyer(b"nouveau")
self.cl.envoyer(mdp.encode())
self.cl.envoyer(b"1")
return self.cl.envoyer(mdp.encode())
def test(self):
self.mettre_mdp("test22")
message = self.cl.envoyer(b"/")
try:
message.index(b"Votre mot de passe :")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide, pour revenir en arrière", self.cl.com)
self.mettre_mdp("test42")
message = self.cl.envoyer(b"test42")
try:
message.index(b"Votre adresse mail :")
except ValueError:
raise EchecTest("Un mot de passe n'a pas été confirmé", self.cl.com)
self.mettre_mdp("test21")
message = self.cl.envoyer(b"test42")
try:
message.index(b"Confirmez le mot de passe :")
except ValueError:
raise EchecTest("Un mot de passe a été confirmé")
import asyncore
import time
class EntrerEmail(test):
nom = "EntrerEmail"
nom_compte = 100000
mail_valide = ["test@test.com", "bruno@maitredumonde.fr", \
"blabla@un.grand.nom.de.domaine.fr","test@test.info"]
mail_invalide = ["essaye","essaye@encore","blabla@aa.a","test@fr", \
"test@test.francais","français@test.fr","test@français.fr"]
def mettre_mail(self,email):
self.nom_compte += 1
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.envoyer(b"nouveau")
self.cl.envoyer(str(self.nom_compte).encode())
self.cl.envoyer(b"1")
self.cl.envoyer(str(self.nom_compte).encode())
self.cl.envoyer(str(self.nom_compte).encode())
return self.cl.envoyer(email.encode())
def test(self):
message = self.mettre_mail("/")
try:
message.index(b"Votre adresse mail")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide, en essayent de revenir en arrière", self.cl.com)
self.cl = client.Client(self.smtp)
self.cl.connecter()
try:
self.cl.creer_compte("bruno", "maitredumonde", "bruno@mdm.com")
except EchecTest as detail:
raise EchecTest("Impossible de crée le compte : " + str(detail), \
self.cl.com)
message = self.mettre_mail("bruno@mdm.com")
try:
message.index(b"Votre adresse mail")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide, en essayent de revenir en arrière", self.cl.com)
for mail in self.mail_valide:
message = self.mettre_mail(mail)
try:
message.index(b"Code de validation :")
except ValueError:
raise EchecTest("Le mail : {0} n'a pas été " \
"accepté".format(mail), self.cl.com)
for mail in self.mailmail_invalide:
message = self.mettre_mail(mail)
try:
message.index(b"Votre mot de passe :")
except ValueError:
raise EchecTest("Le mail : {0} a été accepté".format(mail), \
self.cl.com)
class Validation(test):
nom = "Validation"
nom_compte = 100000
def ask_validation(self):
self.nom_compte += 1
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.envoyer(b"nouveau")
self.cl.envoyer(str(self.nom_compte).encode())
self.cl.envoyer(b"1")
self.cl.envoyer(str(self.nom_compte).encode())
self.cl.envoyer(str(self.nom_compte).encode())
return self.cl.envoyer((str(self.nom_compte) + "@free.fr").encode())
def test(self):
self.ask_validation()
message = self.cl.envoyer(b"/")
try:
message.index(b"Votre adresse mail :")
except ValueError:
raise EchecTest("Réponse attendu de la part de Kassie " \
"invalide, pour revenir en arrière", self.cl.com)
self.ask_validation()
adresse = (str(self.nom_compte) + "@free.fr").encode()
mail = self.smtp.attendre_message_de(1,adresse)
if mail == None:
raise EchecTest("Mail de validation non reçue",self.cl.com)
code = self.cl.extraire_code(mail)
message = self.cl.envoyer(str(code).encode())
try:
message.index(b"Choix du personnage")
except ValueError:
raise EchecTest("Validation classique impossible", self.cl.com)
self.ask_validation()
adresse = (str(self.nom_compte) + "@free.fr").encode()
mail = self.smtp.attendre_message_de(1,adresse)
if mail == None:
raise EchecTest("Mail de validation non reçue",self.cl.com)
self.cl = client.Client(self.smtp)
self.cl.connecter(str(self.nom_compte),str(self.nom_compte))
code = self.cl.extraire_code(mail)
message = self.cl.envoyer(str(code).encode())
try:
message.index(b"Choix du personnage")
except ValueError:
raise EchecTest("Validation après reconnexion impossible", \
self.cl.com)
self.ask_validation()
adresse = (str(self.nom_compte) + "@free.fr").encode()
mail = self.smtp.attendre_message_de(1,adresse)
if mail == None:
raise EchecTest("Mail de validation non reçue",self.cl.com)
code = self.cl.extraire_code(mail)
message = self.cl.envoyer("")
message = self.cl.envoyer("")
message = self.cl.envoyer("")
mail = self.smtp.attendre_message_de(1,adresse)
if mail == None:
raise EchecTest("Mail de validation non reçue",self.cl.com)
code = self.cl.extraire_code(mail)
message = self.cl.envoyer(str(code).encode())
try:
message.index(b"Choix du personnage")
except ValueError:
raise EchecTest("Validation après 3 tentatives impossible", \
self.cl.com)
class Connexion(test):
nom = "Connexion"
nom_compte = ["Bastien","JeanneFrancoise","tictac42","Nitrate"]
mdp = ["123456","Bastien","tictac42"]
email = ["test@test.com"]
rand_nom = 100000
def testCompte(self, nom, mdp, mail):
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.creer_compte(nom,mdp,mail)
self.cl = client.Client(self.smtp)
self.cl.connecter()
self.cl.connexion(nom, mdp)
def test(self):
for nom in self.nom_compte:
self.testCompte(nom, nom, nom + "@free.fr")
for mdp in self.mdp:
self.rand_nom += 1
self.testCompte(str(self.rand_nom), mdp, \
str(self.rand_nom) + "@free.fr")
for mail in self.email:
self.rand_nom += 1
self.testCompte(str(self.rand_nom), str(self.rand_nom), mail)
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
retHead = head
if head is None: return head
while(head.next!=None):
if(head.val==head.next.val):
head.next = head.next.next
else:
head = head.next
return retHead
|
from trnlp import TrnlpWord
from SimurgKelimeTemizle import kelime_temizle
text = "İnsanlar buraya geldiler."
#text = "İnsanlar buraya geldi."
#text = "Birkaç kişiler geldiler."
#text = "Bizimkilerle sizin eve geldim."
def ozneYuklem(text):
def cumle_ayirici(text):
ayrilmis_metin = []
obj = TrnlpWord()
kelime_listesi = text.split(" ")
for i in kelime_listesi:
i = kelime_temizle(i)
obj.setword(str(i))
ayrilmis_metin.append(str(obj))
return ayrilmis_metin
ayrilmis_kelimeler = []
ayrilmis_kelimeler = cumle_ayirici(text)
#print(ayrilmis_kelimeler)
# -------------FİİL
def fiil_yazdir():
""""
return : fiil olan kelimeleri liste halinde döndürür.
"""
fiil_listesi = []
fiil = "fiil"
counter = 0
for i in ayrilmis_kelimeler:
i = str(i)
if fiil in i:
#print("fiil bulunan indeks:", str(counter))
fiil_listesi.append(ayrilmis_kelimeler[counter])
counter += 1
if len(fiil_listesi) > 0 :
return fiil_listesi
fiil = fiil_yazdir()
#print(fiil)
def fiil_kisi_ekini_bul(fiil): # şuan fiil liste halinde dönüyor ve tek elemandan oluşuyor. 2 elemanda sıkıntı olursa değiştir!
fiil_koku = ""
kisi_ekleri = ["{Ke1t}", "{Ke2t}", "{Ke3t}", "{Ke1ç}", "{Ke2ç}", "{Ke3ç}"]
for i in kisi_ekleri:
if not fiil:
fiil_koku="None"
else:
for j in fiil:
if i in j:
fiil_koku = i
return fiil_koku
fiil_koku = fiil_kisi_ekini_bul(fiil)
#print(fiil_koku)
# -------------İSİM
def isim_yazdir():
""""
return : isim olan kelimeleri liste halinde döndürür.
"""
isim_listesi = []
isim = "isim"
counter = 0
for i in ayrilmis_kelimeler:
i = str(i)
if isim in i:
#print("isim buldu")
#print("isim bulunan indeks:", str(counter))
isim_listesi.append(ayrilmis_kelimeler[counter])
counter += 1
#return ayrilmis_kelimeler[counter-1]
return isim_listesi
isim = isim_yazdir()
#print(isim)
def isim_kisi_eki_cogul_mu(isim):
cekim_ekleri = ["lar", "ler"]
isim_koku = ""
for i in cekim_ekleri:
for j in isim:
if i in j:
isim_koku = i
return isim_koku
isim_koku = isim_kisi_eki_cogul_mu(isim)
#print(isim_koku)
#print("İsim çekim eki: {",isim_koku,"} Fill şahıs eki: ",fiil_koku)
if (isim_koku == "lar" or isim_koku == "ler") and fiil_koku == "{Ke3ç}":
#print(text," ( Özne-Yüklem arasında ek sorunu vardır. Lütfen değişiklik yapın. )")
return 1
else:
#print(text," ( Özne-Yüklem arasında ek sorunu yoktur. ) ")
return 0
#print(ozneYuklem(text)) |
"""Unit test module for Intervention API"""
from datetime import datetime, timedelta
import json
import os
from dateutil.relativedelta import relativedelta
from flask_webtest import SessionScope
import pytest
from portal.extensions import db
from portal.models.audit import Audit
from portal.models.clinical_constants import CC
from portal.models.group import Group
from portal.models.identifier import Identifier
from portal.models.intervention import (
INTERVENTION,
Intervention,
UserIntervention,
)
from portal.models.intervention_strategies import AccessStrategy
from portal.models.message import EmailMessage
from portal.models.organization import Organization
from portal.models.qb_timeline import invalidate_users_QBT
from portal.models.questionnaire_bank import QuestionnaireBank
from portal.models.research_study import ResearchStudy
from portal.models.role import ROLE
from portal.models.user import add_role
from portal.models.user_consent import UserConsent
from portal.system_uri import DECISION_SUPPORT_GROUP, SNOMED
from tests import TEST_USER_ID, associative_backdate
from tests.test_assessment_status import (
metastatic_baseline_instruments,
mock_qr,
mock_questionnairebanks,
)
@pytest.fixture
def admin_user(test_user, promote_user):
promote_user(role_name=ROLE.ADMIN.value)
return test_user
@pytest.fixture
def patient_user(test_user, promote_user):
promote_user(role_name=ROLE.PATIENT.value)
return test_user
def test_intervention_wrong_service_user(
service_user, login, client, deepen_org_tree):
service_user = db.session.merge(service_user)
login(user_id=service_user.id)
data = {'user_id': TEST_USER_ID, 'access': 'granted'}
response = client.put(
'/api/intervention/sexual_recovery',
content_type='application/json',
data=json.dumps(data))
assert response.status_code == 401
def test_intervention(
test_client, service_user,
login, client, deepen_org_tree):
test_client = db.session.merge(test_client)
test_client.intervention = INTERVENTION.SEXUAL_RECOVERY
test_client.application_origins = 'http://safe.com'
service_user = db.session.merge(service_user)
login(user_id=service_user.id)
data = {
'user_id': TEST_USER_ID,
'access': "granted",
'card_html': "unique HTML set via API",
'link_label': 'link magic',
'link_url': 'http://safe.com',
'status_text': 'status example',
'staff_html': "unique HTML for /patients view"
}
response = client.put(
'/api/intervention/sexual_recovery',
content_type='application/json',
data=json.dumps(data))
assert response.status_code == 200
ui = UserIntervention.query.one()
assert ui.user_id == data['user_id']
assert ui.access == data['access']
assert ui.card_html == data['card_html']
assert ui.link_label == data['link_label']
assert ui.link_url == data['link_url']
assert ui.status_text == data['status_text']
assert ui.staff_html == data['staff_html']
def test_music_hack(
test_client, service_user,
login, client, deepen_org_tree):
test_client = db.session.merge(test_client)
test_client.intervention = INTERVENTION.MUSIC
test_client.application_origins = 'http://safe.com'
service_user = db.session.merge(service_user)
login(user_id=service_user.id)
data = {'user_id': TEST_USER_ID, 'access': "granted"}
response = client.put(
'/api/intervention/music',
content_type='application/json',
data=json.dumps(data))
assert response.status_code == 200
ui = UserIntervention.query.one()
assert ui.user_id == data['user_id']
assert ui.access == 'subscribed'
def test_intervention_partial_put(
test_client, service_user,
login, client, deepen_org_tree):
test_client = db.session.merge(test_client)
test_client.intervention = INTERVENTION.SEXUAL_RECOVERY
test_client.application_origins = 'http://safe.com'
service_user = db.session.merge(service_user)
login(user_id=service_user.id)
# Create a full UserIntervention row to attempt
# a partial put on below
data = {
'user_id': TEST_USER_ID,
'access': "granted",
'card_html': "unique HTML set via API",
'link_label': 'link magic',
'link_url': 'http://safe.com',
'status_text': 'status example',
'staff_html': "unique HTML for /patients view"
}
ui = UserIntervention(
user_id=data['user_id'], access=data['access'],
card_html=data['card_html'], link_label=data['link_label'],
link_url=data['link_url'], status_text=data['status_text'],
staff_html=data['staff_html'],
intervention_id=INTERVENTION.SEXUAL_RECOVERY.id)
with SessionScope(db):
db.session.add(ui)
db.session.commit()
# now just update a couple, but expect full data set (with
# merged updates) to be returned
update = {
'user_id': TEST_USER_ID,
'access': "forbidden",
'card_html': "no access for YOU"
}
response = client.put(
'/api/intervention/sexual_recovery',
content_type='application/json',
data=json.dumps(update))
assert response.status_code == 200
ui = UserIntervention.query.one()
assert ui.user_id == data['user_id']
assert ui.access == update['access']
assert ui.card_html == update['card_html']
assert ui.link_label == data['link_label']
assert ui.link_url == data['link_url']
assert ui.status_text == data['status_text']
assert ui.staff_html == data['staff_html']
def test_intervention_bad_access(
test_client, service_user,
login, client, deepen_org_tree):
test_client = db.session.merge(test_client)
test_client.intervention = INTERVENTION.SEXUAL_RECOVERY
service_user = db.session.merge(service_user)
login(user_id=service_user.id)
data = {
'user_id': TEST_USER_ID,
'access': 'enabled',
}
response = client.put(
'/api/intervention/sexual_recovery',
content_type='application/json',
data=json.dumps(data))
assert response.status_code == 400
def test_intervention_validation(
test_client, service_user,
login, client, deepen_org_tree):
test_client = db.session.merge(test_client)
test_client.intervention = INTERVENTION.SEXUAL_RECOVERY
test_client.application_origins = 'http://safe.com'
service_user = db.session.merge(service_user)
login(user_id=service_user.id)
data = {
'user_id': TEST_USER_ID,
'link_url': 'http://un-safe.com',
}
response = client.put(
'/api/intervention/sexual_recovery',
content_type='application/json',
data=json.dumps(data))
assert response.status_code == 400
def test_clinc_id(initialize_static, test_user):
# Create several orgs with identifier
org1 = Organization(name='org1')
org2 = Organization(name='org2')
org3 = Organization(name='org3')
identifier = Identifier(value='pick me', system=DECISION_SUPPORT_GROUP)
for org in (org1, org2, org3):
org.identifiers.append(identifier)
# Add access strategy to the care plan intervention
cp = INTERVENTION.CARE_PLAN
cp.public_access = False # turn off public access to force strategy
cp_id = cp.id
with SessionScope(db):
db.session.add(org1)
db.session.add(org2)
db.session.add(org3)
db.session.commit()
d = {
'function': 'limit_by_clinic_w_id',
'kwargs': [{'name': 'identifier_value',
'value': 'pick me'}]
}
strat = AccessStrategy(
name="member of org with identifier",
intervention_id=cp_id,
function_details=json.dumps(d))
with SessionScope(db):
db.session.add(strat)
db.session.commit()
cp = INTERVENTION.CARE_PLAN
user = db.session.merge(test_user)
# Prior to associating user with any orgs, shouldn't have access
assert not cp.display_for_user(user).access
assert not cp.quick_access_check(user)
# Add association and test again
org3 = db.session.merge(org3)
user.organizations.append(org3)
with SessionScope(db):
db.session.commit()
user, cp = map(db.session.merge, (user, cp))
assert cp.display_for_user(user).access
assert cp.quick_access_check(user)
def test_diag_stategy(
initialize_static, test_user,
login, deepen_org_tree):
"""Test strategy for diagnosis"""
# Add access strategies to the care plan intervention
cp = INTERVENTION.CARE_PLAN
cp.public_access = False # turn off public access to force strategy
cp_id = cp.id
with SessionScope(db):
d = {'function': 'observation_check',
'kwargs': [
{
'name': 'display',
'value': CC.PCaDIAG.codings[0].display
},
{'name': 'boolean_value', 'value': 'true'}]}
strat = AccessStrategy(
name="has PCa diagnosis",
intervention_id=cp_id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
cp = INTERVENTION.CARE_PLAN
user = db.session.merge(test_user)
# Prior to PCa dx, user shouldn't have access
assert not cp.display_for_user(user).access
assert not cp.quick_access_check(user)
# Bless the test user with PCa diagnosis
login()
user.save_observation(
codeable_concept=CC.PCaDIAG, value_quantity=CC.TRUE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='registered', issued=None)
with SessionScope(db):
db.session.commit()
user, cp = map(db.session.merge, (user, cp))
assert cp.display_for_user(user).access
assert cp.quick_access_check(user)
def test_diag_changed_stategy(test_user, login, deepen_org_tree):
"""Test strategy for altered diagnosis"""
# Add access strategies to the care plan intervention
cp = INTERVENTION.CARE_PLAN
cp.public_access = False # turn off public access to force strategy
cp_id = cp.id
with SessionScope(db):
d = {'function': 'observation_check',
'kwargs': [
{'name': 'display',
'value': CC.PCaDIAG.codings[0].display},
{'name': 'boolean_value', 'value': 'true'}]}
strat = AccessStrategy(
name="has PCa diagnosis",
intervention_id=cp_id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
cp = INTERVENTION.CARE_PLAN
user = db.session.merge(test_user)
# Prior to PCa dx, user shouldn't have access
assert not cp.display_for_user(user).access
assert not cp.quick_access_check(user)
# Bless the test user with PCa diagnosis
login()
now = datetime.utcnow()
before = now - relativedelta(hours=1)
user.save_observation(
codeable_concept=CC.PCaDIAG, value_quantity=CC.TRUE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='registered', issued=before)
with SessionScope(db):
db.session.commit()
user, cp = map(db.session.merge, (user, cp))
assert cp.display_for_user(user).access
assert cp.quick_access_check(user)
# Now post a *NEW* value taking away PCa dx, should eclipse old value
# and tak away access
user.save_observation(
codeable_concept=CC.PCaDIAG, value_quantity=CC.FALSE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='registered', issued=now)
with SessionScope(db):
db.session.commit()
user, cp = map(db.session.merge, (user, cp))
assert not cp.display_for_user(user).access
assert not cp.quick_access_check(user)
def test_no_tx(initialize_static, test_user, add_procedure):
"""Test strategy for not starting treatment"""
# Add access strategies to the care plan intervention
cp = INTERVENTION.CARE_PLAN
cp.public_access = False # turn off public access to force strategy
cp_id = cp.id
with SessionScope(db):
d = {'function': 'tx_begun',
'kwargs': [{'name': 'boolean_value', 'value': 'false'}]}
strat = AccessStrategy(
name="has not stared treatment",
intervention_id=cp_id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
cp = INTERVENTION.CARE_PLAN
user = db.session.merge(test_user)
# Prior to declaring TX, user should have access
assert cp.display_for_user(user).access
assert cp.quick_access_check(user)
add_procedure(
code='424313000', display='Started active surveillance')
with SessionScope(db):
db.session.commit()
user, cp = map(db.session.merge, (user, cp))
# Declaring they started a non TX proc, should still have access
assert cp.display_for_user(user).access
assert cp.quick_access_check(user)
add_procedure(
code='26294005',
display='Radical prostatectomy (nerve-sparing)',
system=SNOMED)
with SessionScope(db):
db.session.commit()
user, cp = map(db.session.merge, (user, cp))
# Declaring they started a TX proc, should lose access
assert not cp.display_for_user(user).access
assert not cp.quick_access_check(user)
def test_exclusive_stategy(initialize_static, test_user):
"""Test exclusive intervention strategy"""
user = db.session.merge(test_user)
ds_p3p = INTERVENTION.DECISION_SUPPORT_P3P
ds_wc = INTERVENTION.DECISION_SUPPORT_WISERCARE
ds_p3p.public_access = False
ds_wc.public_access = False
with SessionScope(db):
d = {'function': 'allow_if_not_in_intervention',
'kwargs': [{
'name': 'intervention_name', 'value': ds_wc.name}]}
strat = AccessStrategy(
name="exclusive decision support strategy",
intervention_id=ds_p3p.id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
user, ds_p3p, ds_wc = map(db.session.merge, (user, ds_p3p, ds_wc))
# Prior to associating user w/ decision support, the strategy
# should give access to p3p
assert ds_p3p.display_for_user(user).access
assert ds_p3p.quick_access_check(user)
assert not ds_wc.display_for_user(user).access
assert not ds_wc.quick_access_check(user)
# Add user to wisercare, confirm it's the only w/ access
ui = UserIntervention(user_id=user.id, intervention_id=ds_wc.id,
access='granted')
with SessionScope(db):
db.session.add(ui)
db.session.commit()
user, ds_p3p, ds_wc = map(db.session.merge, (user, ds_p3p, ds_wc))
assert not ds_p3p.display_for_user(user).access
assert not ds_p3p.quick_access_check(user)
assert ds_wc.display_for_user(user).access
assert ds_wc.quick_access_check(user)
def test_not_in_role_or_sr(initialize_static, test_user):
user = db.session.merge(test_user)
sm = INTERVENTION.SELF_MANAGEMENT
sr = INTERVENTION.SEXUAL_RECOVERY
sm.public_access = False
sr.public_access = False
d = {
'function': 'combine_strategies',
'kwargs': [
{'name': 'strategy_1',
'value': 'allow_if_not_in_intervention'},
{'name': 'strategy_1_kwargs',
'value': [{
'name': 'intervention_name',
'value': INTERVENTION.SEXUAL_RECOVERY.name}]},
{'name': 'strategy_2',
'value': 'not_in_role_list'},
{'name': 'strategy_2_kwargs',
'value': [{
'name': 'role_list',
'value': [ROLE.WRITE_ONLY.value]}]}
]
}
with SessionScope(db):
strat = AccessStrategy(
name="SELF_MANAGEMENT if not SR and not in WRITE_ONLY",
intervention_id=sm.id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
user, sm, sr = map(db.session.merge, (user, sm, sr))
# Prior to granting user WRITE_ONLY role, the strategy
# should give access to p3p
assert sm.display_for_user(user).access
assert sm.quick_access_check(user)
# Add WRITE_ONLY to user's roles
add_role(user, ROLE.WRITE_ONLY.value)
with SessionScope(db):
db.session.commit()
user, sm, sr = map(db.session.merge, (user, sm, sr))
assert not sm.display_for_user(user).access
assert not sm.quick_access_check(user)
# Revert role change for next condition
user.roles = []
with SessionScope(db):
db.session.commit()
user, sm, sr = map(db.session.merge, (user, sm, sr))
assert sm.display_for_user(user).access
assert sm.quick_access_check(user)
# Grant user sr access, they should lose sm visibility
ui = UserIntervention(
user_id=user.id,
intervention_id=INTERVENTION.SEXUAL_RECOVERY.id,
access='granted')
with SessionScope(db):
db.session.add(ui)
db.session.commit()
user, sm, sr = map(db.session.merge, (user, sm, sr))
assert not sm.display_for_user(user).access
assert not sm.quick_access_check(user)
def test_in_role(initialize_static, test_user):
user = db.session.merge(test_user)
sm = INTERVENTION.SELF_MANAGEMENT
sm.public_access = False
d = {
'function': 'in_role_list',
'kwargs': [
{'name': 'role_list',
'value': [ROLE.PATIENT.value]}]
}
with SessionScope(db):
strat = AccessStrategy(
name="SELF_MANAGEMENT if PATIENT",
intervention_id=sm.id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
user, sm = map(db.session.merge, (user, sm))
# Prior to granting user PATIENT role, the strategy
# should not give access to SM
assert not sm.display_for_user(user).access
assert not sm.quick_access_check(user)
# Add PATIENT to user's roles
add_role(user, ROLE.PATIENT.value)
with SessionScope(db):
db.session.commit()
user, sm = map(db.session.merge, (user, sm))
assert sm.display_for_user(user).access
assert sm.quick_access_check(user)
def test_card_html_update(
client, initialize_static, initialized_patient_logged_in):
"""Confirm assessment status state affects AE card on /home view"""
test_user = initialized_patient_logged_in
test_user_id = test_user.id
# Need a current date, adjusted slightly to test UTC boundary
# date rendering, one day back so the assessment period will have
# started.
dt = datetime.utcnow().replace(
hour=20, minute=0, second=0, microsecond=0) - relativedelta(
days=1)
# generate questionnaire banks and associate user with
# metastatic organization
mock_questionnairebanks()
metastatic_org = Organization.query.filter_by(name='metastatic').one()
test_user = db.session.merge(test_user)
for o in test_user.organizations:
test_user.organizations.remove(o)
test_user.organizations.append(metastatic_org)
consent = UserConsent.query.filter(
UserConsent.user_id == test_user_id).one()
consent.organization_id = metastatic_org.id
consent.acceptance_date = dt
# Fetch home page, which should include Assessment Engine "card"
results = client.get("/home")
# without completing an assessment, card_html should include username
assert bytes(test_user.display_name, 'utf-8') in results.data
# Add a fake assessments and see a change
m_qb = QuestionnaireBank.query.filter(
QuestionnaireBank.name == 'metastatic').filter(
QuestionnaireBank.classification == 'baseline').one()
for i in metastatic_baseline_instruments:
mock_qr(instrument_id=i, timestamp=dt, qb=m_qb)
mi_qb = QuestionnaireBank.query.filter_by(
name='metastatic_indefinite').first()
mock_qr(instrument_id='irondemog', timestamp=dt, qb=mi_qb)
invalidate_users_QBT(test_user_id, research_study_id='all')
results = client.get("/home")
assert bytes("Thank you", 'utf-8') in results.data
ae = INTERVENTION.ASSESSMENT_ENGINE
assert ae.quick_access_check(test_user)
# test datetime display based on user timezone
today = datetime.strftime(dt, '%e %b %Y')
assert bytes(today, 'utf-8') in results.data
test_user = db.session.merge(test_user)
test_user.timezone = "Asia/Tokyo"
with SessionScope(db):
db.session.commit()
tomorrow = datetime.strftime(
dt + timedelta(days=1), '%e %b %Y')
results = client.get("/home")
assert bytes(tomorrow, 'utf-8') in results.data
def test_expired(client, initialized_patient_logged_in):
"""If baseline expired check message"""
test_user = initialized_patient_logged_in
# backdate so baseline is expired
backdate, nowish = associative_backdate(
now=datetime.utcnow(), backdate=relativedelta(months=3))
# generate questionnaire banks; associate and consent user with
# localized organization
mock_questionnairebanks()
localized_org = Organization.query.filter_by(name='localized').one()
test_user = db.session.merge(test_user)
for o in test_user.organizations:
test_user.organizations.remove(o)
test_user.organizations.append(localized_org)
consent = UserConsent.query.filter(
UserConsent.user_id == test_user.id).one()
consent.organization_id = localized_org.id
consent.acceptance_date = backdate
results = client.get("/home")
assert bytes(
"The assessment is no longer available", 'utf-8') in results.data
def test_strat_from_json(initialize_static):
"""Create access strategy from json"""
d = {
'name': 'unit test example',
'description': 'a lovely way to test',
'function_details': {
'function': 'allow_if_not_in_intervention',
'kwargs': [{'name': 'intervention_name',
'value': INTERVENTION.SELF_MANAGEMENT.name}]
}
}
acc_strat = AccessStrategy.from_json(d)
assert d['name'] == acc_strat.name
assert d['function_details'] == json.loads(acc_strat.function_details)
def test_strat_view(admin_user, login, client, deepen_org_tree):
"""Test strategy view functions"""
login()
d = {
'name': 'unit test example',
'function_details': {
'function': 'allow_if_not_in_intervention',
'kwargs': [{'name': 'intervention_name',
'value': INTERVENTION.SELF_MANAGEMENT.name}]
}
}
response = client.post(
'/api/intervention/sexual_recovery/access_rule',
content_type='application/json',
data=json.dumps(d))
assert response.status_code == 200
# fetch it back and compare
response = (client
.get('/api/intervention/sexual_recovery/access_rule'))
assert response.status_code == 200
data = response.json
assert len(data['rules']) == 1
assert d['name'] == data['rules'][0]['name']
assert d['function_details'] == data['rules'][0]['function_details']
def test_strat_dup_rank(admin_user, login, client, deepen_org_tree):
"""Rank must be unique"""
login()
d = {
'name': 'unit test example',
'rank': 1,
'function_details': {
'function': 'allow_if_not_in_intervention',
'kwargs': [{'name': 'intervention_name',
'value': INTERVENTION.SELF_MANAGEMENT.name}]
}
}
response = client.post(
'/api/intervention/sexual_recovery/access_rule',
content_type='application/json',
data=json.dumps(d))
assert response.status_code == 200
d = {
'name': 'unit test same rank example',
'rank': 1,
'description': 'should not take with same rank',
'function_details': {
'function': 'allow_if_not_in_intervention',
'kwargs': [{'name': 'intervention_name',
'value': INTERVENTION.SELF_MANAGEMENT.name}]
}
}
response = client.post(
'/api/intervention/sexual_recovery/access_rule',
content_type='application/json',
data=json.dumps(d))
assert response.status_code == 400
def test_and_strats(initialize_static, test_user):
# Create a logical 'and' with multiple strategies
ds_p3p = INTERVENTION.DECISION_SUPPORT_P3P
ds_p3p.public_access = False
user = db.session.merge(test_user)
identifier = Identifier(
value='decision_support_p3p', system=DECISION_SUPPORT_GROUP)
uw = Organization(name='UW Medicine (University of Washington)')
uw.identifiers.append(identifier)
INTERVENTION.SEXUAL_RECOVERY.public_access = False
with SessionScope(db):
db.session.add(uw)
db.session.commit()
user, uw = map(db.session.merge, (user, uw))
uw_child = Organization(name='UW clinic', partOf_id=uw.id)
with SessionScope(db):
db.session.add(uw_child)
db.session.commit()
user, uw, uw_child = map(db.session.merge, (user, uw, uw_child))
d = {
'name': 'not in SR _and_ in clinc UW',
'function': 'combine_strategies',
'kwargs': [
{'name': 'strategy_1',
'value': 'allow_if_not_in_intervention'},
{'name': 'strategy_1_kwargs',
'value': [{'name': 'intervention_name',
'value': INTERVENTION.SEXUAL_RECOVERY.name}]},
{'name': 'strategy_2',
'value': 'limit_by_clinic_w_id'},
{'name': 'strategy_2_kwargs',
'value': [{'name': 'identifier_value',
'value': 'decision_support_p3p'}]}
]
}
with SessionScope(db):
strat = AccessStrategy(
name=d['name'],
intervention_id=INTERVENTION.DECISION_SUPPORT_P3P.id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
# first strat true, second false. therfore, should be False
assert not ds_p3p.display_for_user(user).access
# Add the child organization to the user, which should be included
# due to default behavior of limit_by_clinic_w_id
user.organizations.append(uw_child)
with SessionScope(db):
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
# first strat true, second true. therfore, should be True
assert ds_p3p.display_for_user(user).access
ui = UserIntervention(
user_id=user.id,
intervention_id=INTERVENTION.SEXUAL_RECOVERY.id,
access='granted')
with SessionScope(db):
db.session.add(ui)
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
# first strat true, second false. AND should be false
assert not ds_p3p.display_for_user(user).access
def test_p3p_conditions(
test_user, add_procedure, login, deepen_org_tree):
# Test the list of conditions expected for p3p
ds_p3p = INTERVENTION.DECISION_SUPPORT_P3P
ds_p3p.public_access = False
user = db.session.merge(test_user)
p3p_identifier = Identifier(
value='decision_support_p3p', system=DECISION_SUPPORT_GROUP)
wc_identifier = Identifier(
value='decision_support_wisercare', system=DECISION_SUPPORT_GROUP)
ucsf = Organization(name='UCSF Medical Center')
uw = Organization(
name='UW Medicine (University of Washington)')
ucsf.identifiers.append(wc_identifier)
uw.identifiers.append(p3p_identifier)
with SessionScope(db):
db.session.add(ucsf)
db.session.add(uw)
db.session.commit()
user = db.session.merge(user)
user.organizations.append(ucsf)
user.organizations.append(uw)
INTERVENTION.SEXUAL_RECOVERY.public_access = False
with SessionScope(db):
db.session.commit()
ucsf, user, uw = map(db.session.merge, (ucsf, user, uw))
# Full logic from story #127433167
description = (
"[strategy_1: (user NOT IN sexual_recovery)] "
"AND [strategy_2 <a nested combined strategy>: "
"((user NOT IN list of clinics (including UCSF)) OR "
"(user IN list of clinics including UCSF and UW))] "
"AND [strategy_3: (user has NOT started TX)] "
"AND [strategy_4: (user does NOT have PCaMETASTASIZE)]")
d = {
'function': 'combine_strategies',
'kwargs': [
# Not in SR (strat 1)
{'name': 'strategy_1',
'value': 'allow_if_not_in_intervention'},
{'name': 'strategy_1_kwargs',
'value': [{'name': 'intervention_name',
'value': INTERVENTION.SEXUAL_RECOVERY.name}]},
# Not in clinic list (UCSF,) OR (In Clinic UW and UCSF) (#2)
{'name': 'strategy_2',
'value': 'combine_strategies'},
{'name': 'strategy_2_kwargs',
'value': [
{'name': 'combinator',
'value': 'any'}, # makes this combination an 'OR'
{'name': 'strategy_1',
'value': 'not_in_clinic_w_id'},
{'name': 'strategy_1_kwargs',
'value': [{'name': 'identifier_value',
'value': 'decision_support_wisercare'}]},
{'name': 'strategy_2',
'value': 'limit_by_clinic_w_id'},
{'name': 'strategy_2_kwargs',
'value': [{'name': 'identifier_value',
'value': 'decision_support_p3p'}]},
]},
# Not Started TX (strat 3)
{'name': 'strategy_3',
'value': 'tx_begun'},
{'name': 'strategy_3_kwargs',
'value': [{'name': 'boolean_value', 'value': 'false'}]},
# Has Localized PCa (strat 4)
{'name': 'strategy_4',
'value': 'observation_check'},
{'name': 'strategy_4_kwargs',
'value': [{'name': 'display',
'value': CC.PCaLocalized.codings[0].display},
{'name': 'boolean_value', 'value': 'true'}]},
]
}
with SessionScope(db):
strat = AccessStrategy(
name='P3P Access Conditions',
description=description,
intervention_id=INTERVENTION.DECISION_SUPPORT_P3P.id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
# only first two strats true so far, therfore, should be False
assert not ds_p3p.display_for_user(user).access
add_procedure(
code='424313000', display='Started active surveillance')
user = db.session.merge(user)
login()
user.save_observation(
codeable_concept=CC.PCaLocalized, value_quantity=CC.TRUE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='preliminary', issued=None)
with SessionScope(db):
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
# All conditions now met, should have access
assert ds_p3p.display_for_user(user).access
# Remove all clinics, should still have access
user.organizations = []
with SessionScope(db):
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
assert len(user.organizations) == 0
assert ds_p3p.display_for_user(user).access
def test_eproms_p3p_conditions(
test_user, add_procedure, login, deepen_org_tree):
# Test the list of conditions expected for p3p on eproms
# very similar to truenth p3p, plus ! role write_only
ds_p3p = INTERVENTION.DECISION_SUPPORT_P3P
ds_p3p.public_access = False
user = db.session.merge(test_user)
p3p_identifier = Identifier(
value='decision_support_p3p', system=DECISION_SUPPORT_GROUP)
wc_identifier = Identifier(
value='decision_support_wisercare', system=DECISION_SUPPORT_GROUP)
ucsf = Organization(name='UCSF Medical Center')
uw = Organization(
name='UW Medicine (University of Washington)')
ucsf.identifiers.append(wc_identifier)
uw.identifiers.append(p3p_identifier)
with SessionScope(db):
db.session.add(ucsf)
db.session.add(uw)
db.session.commit()
user = db.session.merge(user)
user.organizations.append(ucsf)
user.organizations.append(uw)
INTERVENTION.SEXUAL_RECOVERY.public_access = False
with SessionScope(db):
db.session.commit()
ucsf, user, uw = map(db.session.merge, (ucsf, user, uw))
# Full logic from story #127433167
description = (
"[strategy_1: (user NOT IN sexual_recovery)] "
"AND [strategy_2 <a nested combined strategy>: "
"((user NOT IN list of clinics (including UCSF)) OR "
"(user IN list of clinics including UCSF and UW))] "
"AND [strategy_3: (user has NOT started TX)] "
"AND [strategy_4: (user does NOT have PCaMETASTASIZE)] "
"AND [startegy_5: (user does NOT have roll WRITE_ONLY)]")
d = {
'function': 'combine_strategies',
'kwargs': [
# Not in SR (strat 1)
{'name': 'strategy_1',
'value': 'allow_if_not_in_intervention'},
{'name': 'strategy_1_kwargs',
'value': [{'name': 'intervention_name',
'value': INTERVENTION.SEXUAL_RECOVERY.name}]},
# Not in clinic list (UCSF,) OR (In Clinic UW and UCSF) (#2)
{'name': 'strategy_2',
'value': 'combine_strategies'},
{'name': 'strategy_2_kwargs',
'value': [
{'name': 'combinator',
'value': 'any'}, # makes this combination an 'OR'
{'name': 'strategy_1',
'value': 'not_in_clinic_w_id'},
{'name': 'strategy_1_kwargs',
'value': [{'name': 'identifier_value',
'value': 'decision_support_wisercare'}]},
{'name': 'strategy_2',
'value': 'combine_strategies'},
{'name': 'strategy_2_kwargs',
'value': [
{'name': 'strategy_1',
'value': 'limit_by_clinic_w_id'},
{'name': 'strategy_1_kwargs',
'value': [{'name': 'identifier_value',
'value': 'decision_support_wisercare'}]},
{'name': 'strategy_2',
'value': 'limit_by_clinic_w_id'},
{'name': 'strategy_2_kwargs',
'value': [{'name': 'identifier_value',
'value': 'decision_support_p3p'}]},
]},
]},
# Not Started TX (strat 3)
{'name': 'strategy_3',
'value': 'tx_begun'},
{'name': 'strategy_3_kwargs',
'value': [{'name': 'boolean_value', 'value': 'false'}]},
# Has Localized PCa (strat 4)
{'name': 'strategy_4',
'value': 'observation_check'},
{'name': 'strategy_4_kwargs',
'value': [{'name': 'display',
'value': CC.PCaLocalized.codings[0].display},
{'name': 'boolean_value', 'value': 'true'}]},
# Does NOT have roll WRITE_ONLY (strat 5)
{'name': 'strategy_5',
'value': 'not_in_role_list'},
{'name': 'strategy_5_kwargs',
'value': [{'name': 'role_list',
'value': [ROLE.WRITE_ONLY.value]}]}
]
}
with SessionScope(db):
strat = AccessStrategy(
name='P3P Access Conditions',
description=description,
intervention_id=INTERVENTION.DECISION_SUPPORT_P3P.id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
# only first two strats true so far, therfore, should be False
assert not ds_p3p.display_for_user(user).access
add_procedure(
code='424313000', display='Started active surveillance')
user = db.session.merge(user)
login()
user.save_observation(
codeable_concept=CC.PCaLocalized, value_quantity=CC.TRUE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='final', issued=None)
with SessionScope(db):
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
# All conditions now met, should have access
assert ds_p3p.display_for_user(user).access
# Remove all clinics, should still have access
user.organizations = []
with SessionScope(db):
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
assert len(user.organizations) == 0
assert ds_p3p.display_for_user(user).access
# Finally, add the WRITE_ONLY group and it should disappear
add_role(user, ROLE.WRITE_ONLY.value)
with SessionScope(db):
db.session.commit()
user, ds_p3p = map(db.session.merge, (user, ds_p3p))
assert not ds_p3p.display_for_user(user).access
def test_truenth_st_conditions(
initialize_static, test_user,
add_procedure, login, deepen_org_tree):
# Test the list of conditions expected for SymptomTracker in truenth
sm = INTERVENTION.SELF_MANAGEMENT
sm.public_access = False
user = db.session.merge(test_user)
add_role(user, ROLE.PATIENT.value)
sm_identifier = Identifier(
value='self_management', system=DECISION_SUPPORT_GROUP)
uw = Organization(
name='UW Medicine (University of Washington)')
uw.identifiers.append(sm_identifier)
with SessionScope(db):
db.session.add(uw)
db.session.commit()
user = db.session.merge(user)
user.organizations.append(uw)
INTERVENTION.SEXUAL_RECOVERY.public_access = False
with SessionScope(db):
db.session.add(user)
db.session.commit()
user, uw = map(db.session.merge, (user, uw))
# Full logic from story #150532380
description = (
"[strategy_1: (user NOT IN sexual_recovery)] "
"AND [strategy_2: (user has role PATIENT)] "
"AND [strategy_3: (user has BIOPSY)]")
d = {
'function': 'combine_strategies',
'kwargs': [
# Not in SR (strat 1)
{'name': 'strategy_1',
'value': 'allow_if_not_in_intervention'},
{'name': 'strategy_1_kwargs',
'value': [{'name': 'intervention_name',
'value': INTERVENTION.SEXUAL_RECOVERY.name}]},
# Does has role PATIENT (strat 2)
{'name': 'strategy_2',
'value': 'in_role_list'},
{'name': 'strategy_2_kwargs',
'value': [{'name': 'role_list',
'value': [ROLE.PATIENT.value]}]},
# Has Localized PCa (strat 3)
{'name': 'strategy_3',
'value': 'observation_check'},
{'name': 'strategy_3_kwargs',
'value': [{'name': 'display',
'value': CC.BIOPSY.codings[0].display},
{'name': 'boolean_value', 'value': 'true'}]}
]
}
with SessionScope(db):
strat = AccessStrategy(
name='Symptom Tracker Conditions',
description=description,
intervention_id=INTERVENTION.SELF_MANAGEMENT.id,
function_details=json.dumps(d))
db.session.add(strat)
db.session.commit()
user, sm = map(db.session.merge, (user, sm))
# only first two strats true so far, therfore, should be False
assert not sm.display_for_user(user).access
add_procedure(
code='424313000', display='Started active surveillance')
user = db.session.merge(user)
login()
user.save_observation(
codeable_concept=CC.BIOPSY, value_quantity=CC.TRUE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='unknown', issued=None)
with SessionScope(db):
db.session.commit()
user, sm = map(db.session.merge, (user, sm))
# All conditions now met, should have access
assert sm.display_for_user(user).access
# Remove all clinics, should still have access
user.organizations = []
with SessionScope(db):
db.session.commit()
user, sm = map(db.session.merge, (user, sm))
assert len(user.organizations) == 0
assert sm.display_for_user(user).access
# Finally, remove the PATIENT role and it should disappear
user.roles.pop()
with SessionScope(db):
db.session.add(user)
db.session.commit()
user, sm = map(db.session.merge, (user, sm))
assert not sm.display_for_user(user).access
def test_get_empty_user_intervention(
test_user_login, client, deepen_org_tree):
# Get on user w/o user_intervention
response = client.get('/api/intervention/{i}/user/{u}'.format(
i=INTERVENTION.SELF_MANAGEMENT.name, u=TEST_USER_ID))
assert response.status_code == 200
assert len(response.json.keys()) == 1
assert response.json['user_id'] == TEST_USER_ID
def test_get_user_intervention(
initialize_static, test_user,
login, client, deepen_org_tree):
intervention_id = INTERVENTION.SEXUAL_RECOVERY.id
ui = UserIntervention(intervention_id=intervention_id,
user_id=TEST_USER_ID,
access='granted',
card_html='custom ch',
link_label='link magic',
link_url='http://example.com',
status_text='status example',
staff_html='custom ph')
with SessionScope(db):
db.session.add(ui)
db.session.commit()
login()
response = client.get('/api/intervention/{i}/user/{u}'.format(
i=INTERVENTION.SEXUAL_RECOVERY.name, u=TEST_USER_ID))
assert response.status_code == 200
assert len(response.json.keys()) == 7
assert response.json['user_id'] == TEST_USER_ID
assert response.json['access'] == 'granted'
assert response.json['card_html'] == "custom ch"
assert response.json['link_label'] == "link magic"
assert response.json['link_url'] == "http://example.com"
assert response.json['status_text'] == "status example"
assert response.json['staff_html'] == "custom ph"
def test_communicate(add_user, login, client, deepen_org_tree):
email_group = Group(name='test_email')
foo = add_user(username='foo@example.com')
boo = add_user(username='boo@example.com')
foo, boo = map(db.session.merge, (foo, boo))
foo.groups.append(email_group)
boo.groups.append(email_group)
data = {
'protocol': 'email',
'group_name': 'test_email',
'subject': "Just a test, ignore",
'message':
'Review results at <a href="http://www.example.com">here</a>'
}
login()
response = client.post('/api/intervention/{}/communicate'.format(
INTERVENTION.DECISION_SUPPORT_P3P.name),
content_type='application/json',
data=json.dumps(data))
assert response.status_code == 200
assert response.json['message'] == 'sent'
message = EmailMessage.query.one()
set1 = {foo.email, boo.email}
set2 = set(message.recipients.split())
assert set1 == set2
def test_dynamic_intervention_access():
# Confirm interventions dynamically added still accessible
newbee = Intervention(
name='newbee', description='test', subscribed_events=0)
with SessionScope(db):
db.session.add(newbee)
db.session.commit()
assert INTERVENTION.newbee == db.session.merge(newbee)
def test_bogus_intervention_access(
test_user, login, promote_user, client, deepen_org_tree):
with pytest.raises(AttributeError):
INTERVENTION.phoney
login()
promote_user(role_name=ROLE.SERVICE.value)
data = {'user_id': TEST_USER_ID, 'access': "granted"}
response = client.put('/api/intervention/phoney', data=data)
assert response.status_code == 404
@pytest.fixture
def setUp(initialize_static):
from portal.config.model_persistence import ModelPersistence
from portal.config.site_persistence import models
from portal.models.coding import Coding
from portal.models.research_protocol import ResearchProtocol
eproms_config_dir = os.path.join(
os.path.dirname(__file__), "../portal/config/eproms")
# Load minimal set of persistence files for access_strategy, in same
# order defined in site_persistence
needed = {
ResearchStudy,
ResearchProtocol,
Coding,
Organization,
AccessStrategy,
Intervention}
for model in models:
if model.cls not in needed:
continue
mp = ModelPersistence(
model_class=model.cls, sequence_name=model.sequence_name,
lookup_field=model.lookup_field, target_dir=eproms_config_dir)
mp.import_(keep_unmentioned=False)
def test_self_mgmt(setUp, patient_user, test_user):
"""Patient w/ Southampton org should get access to self_mgmt"""
southampton = Organization.query.filter_by(name='Southampton').one()
test_user = db.session.merge(test_user)
test_user.organizations.append(southampton)
self_mgmt = Intervention.query.filter_by(name='self_management').one()
assert self_mgmt.quick_access_check(test_user)
def test_self_mgmt_user_denied(setUp, test_user):
"""Non-patient w/ Southampton org should NOT get self_mgmt access"""
southampton = Organization.query.filter_by(name='Southampton').one()
test_user.organizations.append(southampton)
self_mgmt = Intervention.query.filter_by(name='self_management').one()
assert not self_mgmt.quick_access_check(test_user)
def test_self_mgmt_org_denied(setUp, patient_user, test_user):
"""Patient w/o Southampton org should NOT get self_mgmt access"""
self_mgmt = Intervention.query.filter_by(name='self_management').one()
user = db.session.merge(test_user)
assert not self_mgmt.quick_access_check(user)
|
from __future__ import division
from builtins import object
import pytest
import numpy as np
import os
import platform
from sporco import util
def fn(prm):
x = prm[0]
return (x - 0.1)**2
def fnnan(prm):
x = prm[0]
if x < 0.0:
return np.nan
else:
return (x - 0.1)**2
def fnv(prm):
x = prm[0]
return ((x - 0.1)**2, (x - 0.5)**2)
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
def test_03(self):
D = np.random.randn(64, 64)
im = util.tiledict(D, sz=(8, 8))
def test_04(self):
D = np.random.randn(8, 8, 64)
im = util.tiledict(D)
def test_05(self):
D = np.random.randn(8, 8, 64)
im = util.tiledict(D, sz=((6, 6, 32), (8, 8, 32)))
def test_06(self):
D = np.random.randn(8, 8, 3, 64)
im = util.tiledict(D)
def test_12(self):
x = np.linspace(-1, 1, 21)
sprm, sfvl, fvmx, sidx = util.grid_search(fn, (x,))
assert np.abs(sprm[0] - 0.1) < 1e-14
assert sidx[0] == 11
def test_13(self):
x = np.linspace(-1, 1, 21)
sprm, sfvl, fvmx, sidx = util.grid_search(fnnan, (x,))
assert np.abs(sprm[0] - 0.1) < 1e-14
assert sidx[0] == 11
def test_14(self):
x = np.linspace(-1, 1, 21)
sprm, sfvl, fvmx, sidx = util.grid_search(fnv, (x,))
assert np.abs(sprm[0][0] - 0.1) < 1e-14
assert np.abs(sprm[0][1] - 0.5) < 1e-14
assert sidx[0][0] == 11
assert sidx[0][1] == 15
def test_15(self):
D = util.convdicts()['G:12x12x72']
assert D.shape == (12, 12, 72)
def test_16(self):
ei = util.ExampleImages()
im = ei.images()
assert len(im) > 0
gp = ei.groups()
assert len(gp) > 0
gi = ei.groupimages(gp[0])
assert len(gi) > 0
im1 = ei.image('sail.png')
im2 = ei.image('sail.png', scaled=True, dtype=np.float32,
idxexp=np.s_[:, 10:-10], zoom=0.5)
im3 = ei.image('sail.png', dtype=np.float32,
idxexp=np.s_[:, 10:-10], zoom=2.0, gray=True)
def test_17(self):
pth = os.path.join(os.path.dirname(util.__file__), 'data')
ei = util.ExampleImages(pth=pth)
im = ei.images()
assert len(im) > 0
def test_18(self):
t = util.Timer()
t.start()
t0 = t.elapsed()
t.stop()
t1 = t.elapsed()
assert t0 >= 0.0
assert t1 >= t0
assert len(t.__str__()) > 0
assert len(t.labels()) > 0
def test_19(self):
t = util.Timer('a')
t.start(['a', 'b'])
t0 = t.elapsed('a')
t.stop('a')
t.stop('b')
t.stop(['a', 'b'])
assert t.elapsed('a') >= 0.0
assert t.elapsed('b') >= 0.0
assert t.elapsed('a', total=False) == 0.0
def test_20(self):
t = util.Timer('a')
t.start(['a', 'b'])
t.reset('a')
assert t.elapsed('a') == 0.0
t.reset('all')
assert t.elapsed('b') == 0.0
def test_21(self):
t = util.Timer()
with util.ContextTimer(t):
t0 = t.elapsed()
assert t.elapsed() >= 0.0
def test_22(self):
t = util.Timer()
t.start()
with util.ContextTimer(t, action='StopStart'):
t0 = t.elapsed()
t.stop()
assert t.elapsed() >= 0.0
def test_23(self):
with pytest.raises(ValueError):
dat = util.netgetdata('http://devnull', maxtry=0)
def test_24(self):
with pytest.raises(util.urlerror.URLError):
dat = util.netgetdata('http://devnull')
def test_25(self):
val = util.in_ipython()
assert val is True or val is False
def test_26(self):
val = util.in_notebook()
assert val is True or val is False
@pytest.mark.skipif(platform.system() == 'Windows',
reason='Feature not supported under Windows')
def test_27(self):
assert util.idle_cpu_count() >= 1
|
from django.shortcuts import render
import logging
# Create your views here.
def inicio(request):
# Aunque le este enviando el context al index.html. El context puede viajar anidamente los template. index.html -> base.html -> navbar.hmtl
context = {"indexNavActiveClass": "active"}
logger = logging.getLogger("myproject")
var = " my variable"
# A string with a variable at the "info" level
# logger.info("The value of var is %s", var)
# logger.warning("Your log message is here. part 2")
# logger.error('Something went wrong! testing 4 - views')
return render(request, 'inicio/inicio.html', context)
|
# std libs
import os
import glob
import json
import mmap
import pickle
import shutil
import tempfile
import fnmatch as fnm
import itertools as itt
from pathlib import Path
from contextlib import contextmanager
# local libs
import docsplice as doc
from recipes.bash import brace_expand_iter
from recipes.string import sub
from recipes.string.brackets import braces
# relative libs
from ..functionals import echo0
FORMATS = {'json': json,
'pkl': pickle} # dill, sqlite
FILEMODES = {pickle: 'b', json: ''}
def guess_format(filename):
# use filename to guess format
ext = Path(filename).suffixes[-1].lstrip('.')
formatter = FORMATS.get(ext, None)
if formatter is None:
raise ValueError(
'Could not guess file format from filename. Please provide the '
'expected format for deserialization of file: {filename!r}'
)
return formatter
def deserialize(filename, formatter=None, **kws):
path = Path(filename)
formatter = formatter or guess_format(path)
with path.open(f'r{FILEMODES[formatter]}') as fp:
return formatter.load(fp, **kws)
def serialize(filename, data, formatter=None, **kws):
"""
Data serialization wrapper that outputs to either json or native pickle
formats.
Parameters
----------
filename : str, Path
[description]
data : object
[description]
formatter : module {json, pickle}, optional
If formatter argument is not explicitly provided (default), it is chosen
based on the extension of the input filename.
"""
path = Path(filename)
if not path.parent.exists():
path.parent.mkdir()
formatter = formatter or guess_format(path)
with path.open(f'w{FILEMODES[formatter]}') as fp:
formatter.dump(data, fp, **kws)
def load_pickle(filename, **kws):
return deserialize(filename, pickle, **kws)
def save_pickle(filename, data, **kws):
serialize(filename, data, pickle, **kws)
def load_json(filename, **kws):
return deserialize(filename, json, **kws)
def save_json(filename, data, **kws):
serialize(filename, data, json, **kws)
def iter_files(path, extensions='*', recurse=False, ignore=()):
if isinstance(ignore, str):
ignore = (ignore, )
for file in _iter_files(path, extensions, recurse):
for pattern in ignore:
if fnm.fnmatchcase(str(file), pattern):
break
else:
yield file
def _iter_files(path, extensions='*', recurse=False):
"""
Generator that yields all files in a directory tree with given file
extension(s), optionally recursing down the directory tree. Brace expansion
syntax from bash is supported, aitrllowing multiple directory trees to be
traversed with a single statement.
Parameters
----------
path : str or Path
Location of the root folder. Can also be a glob pattern of
filenames to load eg: '/path/SHA_20200715.000[1-5].fits'
Pattern can also contain brace expansion patterns
'/path/SHA_202007{15..18}.000[1-5].fits' in which case all valid
files and directories in the range will be traversed.
extensions : str or tuple or list
The filename extensions to consider. All files with any of these
extensions will be included. The same functionality as is provided by
this parameter can be acheived by including the list of file extensions
in the expansion pattern. eg: '/path/*.{png,jpg}' will get all png and
jpg files from path directory.
recurse : bool, default=True
Whether or not to recurse down the directory tree. The same as using
".../**/..." in the glob pattern.
Examples
--------
>>> iter_files()
Yields
-------
pathlib.Path
system path pointing to the file
Raises
------
ValueError
If the given base path does not exist
"""
path = str(path)
# handle brace expansion first
special = bool(braces.match(path, False, must_close=True))
wildcard = glob.has_magic(path) # handle glob patterns
if special | wildcard:
itr = (brace_expand_iter(path) if special else
glob.iglob(path, recursive=recurse))
for path in itr:
yield from iter_files(path, extensions, recurse)
return
path = Path(path)
if path.is_dir():
# iterate all files with given extensions
if isinstance(extensions, str):
extensions = (extensions, )
extensions = f'{{{",".join((ext.lstrip(".") for ext in extensions))}}}'
yield from iter_files(f'{path!s}/{"**/" * recurse}*.{extensions}',
recurse=recurse)
return
if not path.exists():
raise ValueError(f"'{path!s}' is not a directory or a glob pattern")
# break the recurrence
yield path
def iter_ext(files, extensions='*'):
"""
Yield all the files that exist with the same root and stem but different
file extension(s)
Parameters
----------
files : Container or Iterable
The files to consider
extensions : str or Container of str
All file extensions to consider
Yields
-------
Path
[description]
"""
if isinstance(extensions, str):
extensions = (extensions, )
for file in files:
for ext in extensions:
yield from file.parent.glob(f'{file.stem}.{ext.lstrip(".")}')
def iter_lines(filename, *section, mode='r', strip=None):
"""
File line iterator for text files. Optionally return only a section of the
file. Trailing newline character are stripped by default.
Two basic function signatures are accepted:
iter_lines(filename, stop)
iter_lines(filename, start, stop[, step])
Parameters
----------
filename : str, Path
File system location of the file to read
*section
The [start], stop, [step] lines.
mode : str
Mode used for opening files, by default r
strip : str, optional
Characters to strip from lines. The default value depends on the `mode`
parameter. For text mode ('r', 'rt'), strip '\\n', for binary mode
('b'), strip system specific newlines. Note that python automatically
translates system specific newlines in the file to '\\n', for files
opened in text mode. Use `strip=''` or `strip=False` to leave lines
unmodified.
Examples
--------
>>>
Yields
-------
str
lines from the file
"""
# note python automatically translate system newlines to '\n' for files
# opened in text mode, but not in binary mode:
# https://stackoverflow.com/a/38075790/1098683
if strip is None:
strip = os.linesep
strip = strip or ''
if 'b' in mode and isinstance(strip, str):
strip = strip.encode()
with open(str(filename), mode) as fp:
for s in itt.islice(fp, *(section or (None, ))):
yield s.strip(strip)
@doc.splice(iter_lines)
def read_lines(filename, *section, mode='r', strip=None, filtered=None,
echo=False):
"""
Read a subset of lines from a given file.
{Extended Summary}
{Parameters}
filtered : callable or None, optional
A function that will be used to filter out unwnated lines. Filtering
occurs after stripping unwanted characters. The default behaviour
(filtered=None) removed all blank lines from the results.
echo : bool, optional
Whether to print a summary of the read content to stdout,
by default False
Returns
-------
list of str
Lines from the file
"""
# Read file content
content = iter_lines(filename, *section, mode=mode, strip=strip)
if filtered is not False:
content = filter(filtered, content)
content = list(content)
# Optionally print the content
if echo:
print(_show_lines(filename, content))
return content
def read_line(filename, nr, mode='r', strip=None):
return next(iter_lines(filename, nr, nr + 1,
mode=mode, strip=strip))
def _show_lines(filename, lines, n=10, dots='.\n' * 3):
"""Create message for `read_lines`"""
n_lines = len(lines)
n = min(n, n_lines)
if n_lines and n:
msg = (f'Read file {filename!r} containing:'
f'\n\t'.join([''] + lines[:n]))
# Number of ellipsis dots (one per line)
ndot = dots.count('\n')
# TODO: tell nr omitted lines
if n_lines > n:
msg += ('.\n' * ndot)
if n_lines > n + ndot:
msg += ('\n'.join(lines[-ndot:]))
else:
msg = f'File {filename!r} is empty!'
return msg
def count_lines(filename):
"""Fast line count for files"""
filename = str(filename) # conver path objects
if not os.path.exists(filename):
raise ValueError(f'No such file: {filename!r}')
if os.path.getsize(filename) == 0:
return 0
with open(str(filename), 'r+') as fp:
count = 0
buffer = mmap.mmap(fp.fileno(), 0)
while buffer.readline():
count += 1
return count
def write_lines(stream, lines, eol='\n'):
"""
Write multiple lines to a file-like output stream
Parameters
----------
stream : [type]
File-like object
lines : iterable
Sequence of lines to be written to the stream.
eol : str, optional
End-of-line character to be appended to each line, by default ''.
"""
assert isinstance(eol, str)
append = str.__add__ if eol else echo0
for line in lines:
stream.write(append(line, eol))
@contextmanager
def backed_up(filename, mode='w', backupfile=None, exception_hook=None):
"""
Context manager for doing file operations under backup. This will backup
your file before any read / writes are attempted. If something goes terribly
wrong during the attempted operation, the original content will be restored.
Parameters
----------
filename : str or Path
The file to be edited.
mode : str, optional
File mode for opening, by default 'w'.
backupfile : str or Path, optional
Location of the backup file, by default None. The default location will
is the temporary file created by `tempfile.mkstemp`, using the prefix
"backup." and suffix being the original `filename`.
exception_hook : callable, optional
Hook to run on the event of an exception if you wish to modify the
error message. The default, None, will leave the exception unaltered.
Examples
--------
>>> Path('foo.txt').write_text('Important stuff')
... with safe_write('foo.txt') as fp:
... fp.write('Some additional text')
... raise Exception('Catastrophy!')
... Path('foo.txt').read_text()
'Important stuff'
In the example above, the original content was restored upon exception.
Catastrophy averted!
Raises
------
Exception
The type and message of exceptions raised by this context manager are
determined by the optional `exception_hook` function.
"""
# write formatted entries
# backup and restore on error!
path = Path(filename).resolve()
backup_needed = path.exists()
if backup_needed:
if backupfile is None:
bid, backupfile = tempfile.mkstemp(prefix='backup.',
suffix=f'.{path.name}')
else:
backupfile = Path(backupfile)
# create the backup
shutil.copy(str(path), backupfile)
# write formatted entries
with path.open(mode) as fp:
try:
yield fp
except Exception as err:
if backup_needed:
fp.close()
os.close(bid)
shutil.copy(backupfile, filename)
if exception_hook:
raise exception_hook(err, filename) from err
raise
@doc.splice(backed_up, 'summary', omit='Parameters[backupfile]',
replace={'operation': 'write',
'read / ': ''}) # FIXME: replace not working here
def safe_write(filename, lines, mode='w', eol='\n', exception_hook=None):
"""
{Parameters}
lines : list
Lines of content to write to file.
"""
assert isinstance(eol, str)
append = str.__add__ if eol else echo0
with backed_up(filename, mode, exception_hook=exception_hook) as fp:
# write lines
try:
for i, line in enumerate(lines):
fp.write(append(line, eol))
except Exception as err:
if exception_hook:
raise exception_hook(err, filename, line, i) from err
raise
def write_replace(filename, replacements):
if not replacements:
# nothing to do
return
with backed_up(filename, 'r+') as fp:
text = fp.read()
fp.seek(0)
fp.write(sub(text, replacements))
fp.truncate()
@contextmanager
def working_dir(path):
"""
Temporarily change working directory to the given `path` with this context
manager.
Parameters
----------
path : str or Path
File system location of temporary work working directory
Examples
--------
>>> with working_dir('/path/to/folder/that/exists') as wd:
... file = wd.with_name('myfile.txt')
... file.touch()
After the context manager returns, we will be switched back to the original
working directory, even if an exception occured.
Raises
------
ValueError
If `path` is not a valid directory
"""
if not Path(path).is_dir():
raise ValueError("Invalid directory: '{path!s}'")
original = os.getcwd()
os.chdir(path)
try:
yield path
except Exception as err:
raise err
finally :
os.chdir(original)
def walk_level(dir_, depth=1):
"""
Walk the system path, but only up to the given depth
"""
# http://stackoverflow.com/a/234329/1098683
dir_ = dir_.rstrip(os.path.sep)
assert os.path.isdir(dir_)
num_sep = dir_.count(os.path.sep)
for root, dirs, files in os.walk(dir_):
yield root, dirs, files
num_sep_here = root.count(os.path.sep)
if num_sep + depth <= num_sep_here:
del dirs[:]
|
class Solution(object):
# 贪心
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if(prices==None or len(prices)==0):
return 0
min_v = prices[0]
max_v = 0
for i in range(len(prices)):
min_v = min(min_v,prices[i])
max_v = max(max_v,prices[i]-min_v)
return max_v
# 动态规划二维数组
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
n = len(prices)
dp = [[0]*2 for _ in xrange(n)]
dp[0][0] = 0
dp[0][1] = -prices[0]
res = 0
for i in xrange(1,n):
dp[i][0] = max(dp[i-1][0],dp[i-1][1]+prices[i])
dp[i][1] = max(dp[i-1][1],-prices[i])
res = max(res,dp[i][0],dp[i][1])
return res
# 动态规划,两个一维数组
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
n = len(prices)
sell = [0 for _ in xrange(n)]
buy = [0 for _ in xrange(n)]
sell[0] = 0
buy[0] = -prices[0]
res = 0
for i in xrange(1,n):
sell[i] = max(sell[i-1],buy[i-1]+prices[i])
buy[i] = max(buy[i-1],-prices[i])
res = max(res,sell[i],buy[i])
return res
# 动态规划O(1)
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
dp0 = 0
dp1 = -prices[0]
n = len(prices)
res = 0
for i in xrange(1,n):
dp0 = max(dp0,dp1+prices[i])
dp1 = max(dp1,-prices[i])
res = max(res,dp0,dp1)
return res
# 递归(超时)
def maxProfit(self, prices):
if not prices:
return 0
self.res = 0
n = len(prices)
def dfs(index):
if n==index:
return
for i in xrange(index+1,n):
if (prices[i]-prices[index])>self.res:
self.res = prices[i]-prices[index]
dfs(index+1)
dfs(0)
return self.res
# 递归+记忆化(超时)
def maxProfit(self, prices):
if not prices:
return 0
self.res = 0
n = len(prices)
mem = [-1 for _ in xrange(n)]
def dfs(index):
if index==n:
return
if mem[index]>-1:
return mem[index]
for i in xrange(index+1,n):
if prices[i]-prices[index]>self.res:
self.res = prices[i]-prices[index]
mem[index] = self.res
dfs(index+1)
dfs(0)
return self.res
|
import socket
s = socket.socket()
port = 12348
s.connect(('localhost', port))
def math():
s.send(b'1')
data = input("Enter mathematical operation :")
s.send(str(data).encode())
r = s.recv(1024).decode()
print("Result is :", r)
s.close()
def ftp():
s.send(b'2')
fname = input("Enter Filename :")
f = open (fname, "rb")
l = f.read(1024)
while (l):
print("sending....")
s.send(l)
l = f.read(1024)
f.close()
s.close()
def ftp2():
s.send(b'3')
f = open("server2client.txt",'wb')
l = s.recv(1024)
while (l):
f.write(l)
l = s.recv(1024)
f.close()
print("Success")
s.close()
while True :
opt = int(input("1. Mathematical Operation - press 1\n2. Send File - press 2\n3. Recieve file - press 3\n4. Quit - press 4\n----> "))
if opt == 1 :
math()
break
elif opt == 2:
ftp()
break
elif opt == 3:
ftp2()
break
else :
break
s.close()
|
"""Tests for `pypendency` package."""
import uuid
import pytest
import pypendency.models.graph as pmg
from pypendency.models.generics import BaseNode, Relation, Direction
def test_graph_context():
g = pmg.Graph(str(uuid.uuid4()))
with g:
node = BaseNode("TestNode",
slug="t1",
type="Service",
id=str(uuid.uuid4()),
description="a Testnode")
pytest.assume(node.graph)
pytest.assume(node == g.nodes.pop())
def test_relation():
g = pmg.Graph(str(uuid.uuid4()))
with g:
node1 = BaseNode("TestNode1",
slug="t1",
type="Service",
id=str(uuid.uuid4()),
description="a Testnode")
node2 = BaseNode("TestNode2",
slug="t2",
type="Service",
id=str(uuid.uuid4()),
description="a Testnode")
node1.link_to(node2, "link")
expect = Relation(node1, node2, "link", direction=Direction.Link)
actual = node1.relations.pop()
assert expect == actual
|
from .d4pg.engine import Engine as D4PGEngine
def load_engine(config):
return D4PGEngine(config)
|
from yowsup.layers import YowProtocolLayer
from .protocolentities import *
import logging
logger = logging.getLogger(__name__)
class YowContactsIqProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"iq": (self.recvIq, self.sendIq),
"notification": (self.recvNotification, None)
}
super(YowContactsIqProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Contact Iq Layer"
def recvNotification(self, node):
if node["type"] == "contacts":
if node.getChild("remove"):
self.toUpper(RemoveContactNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("add"):
self.toUpper(AddContactNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("update"):
self.toUpper(UpdateContactNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("sync"):
self.toUpper(ContactsSyncNotificationProtocolEntity.fromProtocolTreeNode(node))
else:
logger.warning("Unsupported notification type: %s " % node["type"])
logger.debug("Unsupported notification node: %s" % node)
def recvIq(self, node):
if node["type"] == "result" and node.getChild("sync"):
self.toUpper(ResultSyncIqProtocolEntity.fromProtocolTreeNode(node))
def sendIq(self, entity):
if entity.getXmlns() == "urn:xmpp:whatsapp:sync":
self.toLower(entity.toProtocolTreeNode())
|
from easydict import EasyDict as edict
class Config:
# dataset
# dataloader
# model
MODEL = edict()
MODEL.SCALE = 4
MODEL.IN_CHANNEL = 3
MODEL.OUT_CHANNEL = 3
MODEL.N_FEATURE = 64
MODEL.N_BLOCK = 23
MODEL.GROWTH_CHANNEL= 32
MODEL.DOWN = 1
MODEL.DEVICE = 'cuda'
# solver
# initialization
# log and save
# validation
config = Config()
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/24 17:51
# @Author : islander
# @File : model.py
from pprint import pprint
from typing import Dict, List
import tensorflow as tf
from . import layers
from .utils import *
from . import utils
class NetNotBuiltError(Exception):
pass
class Din(object):
"""Deep Interest Net wrapper
The output of this model is expected to indicate probabilities (e.g., after a softmax)
Args:
input_config: the embedding configuration dict
shared_emb_config: specify shared embedding layers, format: emb_name -> tup of feat_names
feat_names is allowed to be not contained in input_config
attention_layers: list of layers.AirLayer
list attention layer operating seq, (each seq associated with one attention layer)
forward_net: layers.AirLayer
forward net producing score (CTR is the softmaxed score)
required_feat_names: keys in features passed to the build_net func, by default, [].
for each registered feat, the model will map the input to a named input via identity.
Attributes:
tensor_name_dict: maps keys in features to the tensorflow name in the computation graph,
additionally, the following tensor names will also be added:
- probs: computed probabilities
- labels: true labels
- loss: loss in session
note that this attribute will be updated in-place
features_ph: placeholder for the input features, if multiple graphs built, consistent with the current graph
labels_ph: placeholder for the labels, if multiple graphs built, consistent with the current graph
outputs: placeholder for the outputs, if multiple graphs built, consistent with the current graph
session: running session for the model, if multiple graphs built, consistent with the current graph
saver: checkpoint saver for the graph
current_graph: key of the current_graph
Keyword Args:
batch_norm: one of: bn (batchnorm), ln (layernorm), None (do not norm)
include_his_sum: whether to feed a naive history embedding sum into forward net, *besides* the attention output
epsilon: arithmetic robustness
use_seq: whether feed a specific seq info into forward net
this is given as a dict mapping seq_name to bool
use_vec: whether feed vec info into forward net (True is overrided if no VEC category appears in input_config)
use_moving_statistics: whether to use moving statistics for batchnorm in test phase,
note that this actually degrade the performance in practice
can be True or False or 'always' (use in train and eval mode)
Raises:
ValueError:
- when the number of attention layers is not the number of seqs
- when the given attention layers or forward_net is not subclass of layers.AirLayer
- when the given configuration violate rules, e.g, different shape for a shared embedding
"""
def __init__(self, input_config: Dict, shared_emb_config=None, attention_layers=None, forward_net=None,
required_feat_names: List[str] = None,
**kwargs):
super().__init__()
# kwargs
self._batch_norm = kwargs.pop('batch_norm', 'bn')
self._include_his_sum = kwargs.pop('include_his_sum', True)
self._use_moving_statistics = kwargs.pop('use_moving_statistics', True)
self._epsilon = kwargs.pop('epsilon', 1e-7)
# will be override later in config
self._use_vec = False
self._use_seq = False
if required_feat_names is None:
required_feat_names = []
self._required_feat_names = required_feat_names
# generate tensor_name_dict
self.tensor_name_dict = {
'probs': 'predict/probabilities',
'labels': 'labels',
'loss': 'compute_loss/loss',
}
for feat_name in self._required_feat_names:
self.tensor_name_dict[feat_name] = feat_name
# check whether the input_config and shared_emb_config is valid
utils.check_config(input_config=input_config, shared_emb_config=shared_emb_config)
# declare index member variables
self._input_config_raw = input_config
# map feature name to configurations
# feat_name -> config
self._input_config = utils.get_full_input_config(input_config=input_config)
# map feature name to embedding layer name (for shared embedding config)
# feat_name -> emb_layer_name (feat_name if not in shared_emb_config else emb_name)
self._emb_dict = dict()
# map embedding name to whether it is trainable
# emb_name -> bool
self._emb_trainable_dict = dict()
# ordered list of seq_names
# [seq_name_0, seq_name_1, ...]
self._seq_names = set()
# parse input_config (input configurations)
for feat_name, config in input_config.items():
# judge whether to use vec or seq, according to whether there exists such features
input_category = config[constant.InputConfigKeys.CATEGORY]
if utils.is_category(input_category, constant.InputCategoryPlace.VEC):
self._use_vec = True
if utils.is_category(input_category, constant.InputCategoryPlace.SEQ):
self._use_seq = True
# fill _emb_dict with feat_name
if utils.is_category(input_category, constant.InputCategoryType.EMB):
self._emb_dict[feat_name] = feat_name
# for sequence inputs (val_seq or emb_seq), generate mask config
if constant.InputConfigKeys.SEQ_NAME in config:
seq_name = config[constant.InputConfigKeys.SEQ_NAME]
self._seq_names.add(seq_name)
self._seq_names = sorted(list(self._seq_names))
self._use_seq = {seq_name: self._use_seq for seq_name in self._seq_names}
# replace shared embeddings in _emb_dict with emb_name
if shared_emb_config is not None:
for emb_name, feat_names in shared_emb_config.items():
for feat_name in feat_names:
if feat_name in self._emb_dict: # feat_names is allowed to be not contained
self._emb_dict[feat_name] = emb_name
# config trainable
for feat_name, emb_name in self._emb_dict.items():
self._emb_trainable_dict[emb_name] = True
self._bn_before_forward_net_trainable = True
# override use_vec and use_seq from keyed args
if self._use_vec:
self._use_vec = kwargs.pop('use_vec', True)
_use_seq = kwargs.pop('use_seq', self._use_seq)
for seq_name, is_using in self._use_seq.items():
if is_using:
self._use_seq[seq_name] = _use_seq[seq_name]
# create default layers
self._attention_layers, self._forward_net = self.__get_net(attention_layers, forward_net)
if len(kwargs) != 0:
raise ValueError(f"Unrecognized kwargs: {kwargs}")
self._current_graph = None
self._features_phs = {}
self._labels_phs = {}
self._outputss = {}
self._sessions = {}
self._graphs = {}
self._savers = {}
@property
def features_ph(self):
return self._features_phs[self._current_graph]
@property
def labels_ph(self):
return self._labels_phs[self._current_graph]
@property
def outputs(self):
return self._outputss[self._current_graph]
@property
def session(self) -> tf.Session:
return self._sessions[self._current_graph]
@property
def saver(self) -> tf.train.Saver:
return self._savers[self._current_graph]
@property
def graph(self) -> tf.Graph:
return self._graphs[self._current_graph]
@property
def current_graph(self):
return self._current_graph
def switch_graph(self, key):
"""switch the current graph
Args:
key: key for the new graph, consistent with the one provided for Din.build_graph_
"""
if not self.has_graph(key=key):
raise NetNotBuiltError('attempting to switch to graph {}, but has built only the following graphs: {}'.format(
key, self._graphs.keys()
))
self._current_graph = key
def has_graph(self, key):
return key in self._graphs
def load_from_to(self, from_key, to_key):
"""encapsulation of load_from, load values from from_key to to_key
"""
tmp = self.current_graph
self.switch_graph(key=to_key)
self.load_from(from_key)
self.switch_graph(tmp) # to avoid change current graph
def load_from(self, key):
"""load the variables from another graph given by key
Args:
key: source graph key
"""
source_sess = self._sessions[key]
def _get_var_name(variable):
# vars are indicated by name, discard the ':\d' on the right
return variable.name.rsplit(':', 1)[0]
# get values from the source session
def _get_val_var(sess):
with sess.graph.as_default():
# var_name -> (var_value, var_dtype)
return {_get_var_name(_var): (sess.run(_var), _var.dtype) for _var in tf.global_variables()}
source_val_var = _get_val_var(source_sess)
# apply values to the current session
def _load_values(sess, values):
with sess.graph.as_default():
for variable in tf.global_variables():
val, dtype = values[_get_var_name(variable)]
variable.load(val, sess)
_load_values(self.session, source_val_var)
def _get_emb_name2feat_name(self):
emb_name2feat_name = dict()
for feat_name, emb_name in self._emb_dict.items():
emb_name2feat_name[emb_name] = feat_name
return emb_name2feat_name
def build_graph_(self, key, *,
mode, device='gpu', optimizer=None, seed=None):
"""build the graph in self, and initializing pars with random values
Args:
mode: tf.estimator.ModeKeys.TRAIN or EVAL
device: 'cpu' or 'gpu'
key: indicator for the current graph, when switch mode, will use the key
suggested to be set to 'train' and 'eval'
optimizer: optimizer for the model, None means do not build the optimization part
seed: random seed for graph initialization
"""
graph = tf.Graph()
with graph.as_default():
if seed is not None:
tf.random.set_random_seed(seed)
with tf.device(device):
# generate input placeholders
features_ph, labels_ph = utils.get_inputs_ph(input_config=self._input_config_raw, batch_size=None)
outputs = self.build_graph(features=features_ph, labels=labels_ph, mode=mode,
params={'optimizer': optimizer})
session_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=session_config)
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
self._features_phs[key] = features_ph
self._labels_phs[key] = labels_ph
self._outputss[key] = outputs
self._sessions[key] = session
self._graphs[key] = graph
self._savers[key] = saver
if self._current_graph is None: # first graph, set current graph to this graph
self._current_graph = key
def build_graph(self, features: Dict[str, tf.Tensor], labels: tf.Tensor, mode, params=None):
"""build the training graph (including optimization)
Args: the same with model_fn
Returns:
a dict of result tensors
"""
ret = dict()
# name the inputs
tf.identity(labels, name=self.tensor_name_dict['labels'])
for feat_name in self._required_feat_names:
feature = features[feat_name]
tf.identity(feature, name=self.tensor_name_dict[feat_name])
ret["labels"] = labels
ret["logits"] = self.forward(features=features, mode=mode)
with tf.name_scope('predict'):
ret["probs"] = tf.nn.softmax(ret["logits"], name="probabilities")
ret["classes"] = tf.argmax(input=ret["logits"], axis=1)
# Calculate Loss (for both TRAIN and EVAL modes)
with tf.name_scope('compute_loss'):
# tf.losses.sparse_softmax_cross_entropy produces strange results only in odps, seems like a bug?
labels_one_hot = tf.one_hot(ret["labels"], depth=ret["logits"].get_shape()[-1], name='one_hot')
neg_log_probs = - tf.log(ret["probs"] + self._epsilon, name='log_probabilities')
loss_sum = tf.reduce_sum(labels_one_hot * neg_log_probs, axis=1, name='true_label_neg_log_prob')
ret["loss"] = tf.reduce_mean(loss_sum, axis=0, name='loss')
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.name_scope('optimize'):
optimizer = params.pop('optimizer')
optimizer: tf.train.Optimizer
ret['gradient'] = tf.gradients(ret["loss"], tf.trainable_variables())
ret['gradient'], _ = tf.clip_by_global_norm(ret['gradient'], 5)
ret['gradient'] = list(zip(ret['gradient'], tf.trainable_variables()))
train_op = optimizer.apply_gradients(
ret['gradient'], global_step=tf.train.get_or_create_global_step()
)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ret["train_op"] = tf.group([train_op, update_ops])
return ret
def model_fn(self, features, labels, mode, params):
"""model_fn for estimator
"""
outputs = self.build_graph(features=features, labels=labels, mode=mode, params=params)
predictions = {
"classes": outputs['classes'],
"probs": outputs['probs'],
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode, loss=outputs['loss'], train_op=outputs['train_op'])
if mode == tf.estimator.ModeKeys.EVAL:
with tf.name_scope('evaluate'):
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(labels=outputs['labels'], predictions=predictions['classes']),
}
if outputs['probs'].shape[1] == 2: # only log binary aucs
# use tf auc is calculated on multi-classes, but it diverges from binary auc, so have to transform
true_probs = outputs['probs'][:, 1]
num_thresholds = 10000
eval_metric_ops.update({
"auc": tf.metrics.auc(labels=outputs['labels'], predictions=true_probs, name='auc',
num_thresholds=num_thresholds, summation_method='careful_interpolation'),
# lower bound of auc approximation
"auc_min": tf.metrics.auc(labels=outputs['labels'], predictions=true_probs,
num_thresholds=num_thresholds,
summation_method='minoring', name='auc_min'),
# upper bound of auc approximation
"auc_max": tf.metrics.auc(labels=outputs['labels'], predictions=true_probs,
num_thresholds=num_thresholds,
summation_method='majoring', name='auc_max'),
})
return tf.estimator.EstimatorSpec(mode=mode, loss=outputs['loss'], eval_metric_ops=eval_metric_ops)
raise ValueError(f'mode should be tf.estimator.ModeKeys, but got {mode}')
def forward(self, features, mode):
"""forward pass and produce logits
Raises:
RuntimeError: when config parsing failed
"""
tf.train.get_or_create_global_step()
_INPUT_PARSE_FAIL_MSG = 'failure when parsing inputs for Din'
# separate inputs
separated_features = {
constant.InputCategory.EMB_VEC: dict(),
constant.InputCategory.EMB_SEQ: {seq_name: dict() for seq_name in self._seq_names},
constant.InputCategory.EMB_TGT: dict(),
constant.InputCategory.VAL_VEC: dict(),
constant.InputCategory.VAL_SEQ: {seq_name: dict() for seq_name in self._seq_names},
constant.InputCategory.VAL_TGT: dict(),
constant.InputCategory.MASK: dict(),
}
with tf.variable_scope('embedding', reuse=False) as embedding_scope: # create embedding variables, reuse=False
# parse inputs
for feat_name in self._input_config:
feature = features[feat_name]
config = self._input_config[feat_name]
category = config[constant.InputConfigKeys.CATEGORY]
if utils.is_category(category, constant.InputCategory.MASK):
separated_features[category][feat_name] = feature
elif utils.is_category(category, constant.InputCategoryPlace.SEQ):
seq_name = config[constant.InputConfigKeys.SEQ_NAME]
separated_features[category][seq_name][feat_name] = feature
else:
separated_features[category][feat_name] = feature
# get variables
emb_name2emb_variable = dict()
for emb_name, _feat_name in self._get_emb_name2feat_name().items():
feat_name = constant.FEATURE_SEPARATOR.join((constant.FeaturePrefix.FEAT, _feat_name))
emb_shape = self._input_config[feat_name][constant.InputConfigKeys.EMB_SHAPE]
trainable = self._emb_trainable_dict[emb_name]
emb_var = tf.get_variable(emb_name, shape=emb_shape, dtype=tf.float32, trainable=trainable)
emb_name2emb_variable[emb_name] = emb_var
# fetch embeddings
emb_feat = self.__get_embeddings(separated_features[constant.InputCategory.EMB_VEC], emb_name2emb_variable)
separated_features[constant.InputCategory.EMB_VEC] = emb_feat
emb_feat = self.__get_embeddings(separated_features[constant.InputCategory.EMB_TGT], emb_name2emb_variable)
separated_features[constant.InputCategory.EMB_TGT] = emb_feat
for seq_name in self._seq_names:
emb_feat = self.__get_embeddings(separated_features[constant.InputCategory.EMB_SEQ][seq_name],
emb_name2emb_variable)
separated_features[constant.InputCategory.EMB_SEQ][seq_name] = emb_feat
# concat tgt and vec
if self._use_vec:
vec_cat = concat_emb_val(separated_features[constant.InputCategory.EMB_VEC],
separated_features[constant.InputCategory.VAL_VEC], name='concat_vec')
if self._use_seq:
seq_cat = [concat_emb_val(separated_features[constant.InputCategory.EMB_SEQ][seq_name],
separated_features[constant.InputCategory.VAL_SEQ][seq_name],
name=f'concat_seq_{seq_name}')
for seq_name in self._seq_names]
tgt_cat = concat_emb_val(separated_features[constant.InputCategory.EMB_TGT],
separated_features[constant.InputCategory.VAL_TGT], name='concat_tgt')
# apply attention and sum pool, to prepare inputs for forward_net
forward_net_inps = []
if self._use_vec:
forward_net_inps.append(vec_cat)
if self._use_seq:
ordered_masks = get_ordered_dict_values(separated_features[constant.InputCategory.MASK])
for seq_name, seq, mask in zip(self._seq_names, seq_cat, ordered_masks):
if self._use_seq[seq_name]:
att = self._attention_layers[seq_name]
att_seq = att({
constant.InputCategoryPlace.TGT: tgt_cat,
constant.InputCategoryPlace.SEQ: seq,
constant.InputCategory.MASK: mask,
}, mode=mode, name=f'attention_{seq_name}')
forward_net_inps.append(att_seq)
if self._include_his_sum:
sum_seq = tf.reduce_sum(seq, axis=1, name=f'sum_{seq_name}')
forward_net_inps.append(sum_seq)
forward_net_inps.append(tgt_cat)
# prepare forward net input
forward_net_inps = tf.concat(forward_net_inps, axis=-1, name='concat_for_forward_net')
# forward net op
if self._batch_norm is not None:
if self._batch_norm == 'bn':
bn = tf.layers.BatchNormalization(name='forward_net_bn', trainable=self._bn_before_forward_net_trainable)
if self._use_moving_statistics is True:
training = (mode == tf.estimator.ModeKeys.TRAIN)
elif self._use_moving_statistics is False:
training = True
elif self._use_moving_statistics == 'always':
training = False
else:
raise ValueError('unrecognized _use_moving_statistics attribute: {}'.format(
self._use_moving_statistics)
)
forward_net_inps = bn(forward_net_inps, training=training)
elif self._batch_norm == 'ln':
bn = layers.LayerNormalization()
forward_net_inps = bn(forward_net_inps, name='forward_net_bn', mode=mode)
else:
raise ValueError('batch_norm should be None or bn or ln, but got %s' % self._batch_norm)
if self._use_moving_statistics == 'always':
_mode = tf.estimator.ModeKeys.EVAL # always use moving statistics, mode as train
else:
_mode = mode if self._use_moving_statistics else tf.estimator.ModeKeys.TRAIN
logits = self._forward_net(forward_net_inps, name='forward_net', mode=_mode)
return logits
def freeze_embeddings(self, emb_names=None):
"""freeze the embeddings specified in layer_names
Args:
emb_names:
- str: freeze the embedding layer
- list: freeze embedding layers specified
- None: freeze all embedding layers
"""
if emb_names is None:
emb_names = self._emb_trainable_dict.keys()
if isinstance(emb_names, str):
emb_names = [emb_names]
for name in emb_names:
self._emb_trainable_dict[name] = False
def freeze_bn(self):
"""freeze the batchnorm layer before forward_net
"""
self._bn_before_forward_net_trainable = False
def freeze_forward_net(self, layer_names=None):
"""freeze the forward_net layers specified in layer_names
keys: dense_l{idx}, activation_l{idx}
"""
self._forward_net.freeze(layer_names)
def freeze_attention(self, seq_names=None, layer_names=None):
"""freeze the attention layers specified by seq_name and layer_names
- when seq_names is None, freeze all attention layers with layer_names
- when layer_names is None, freeze all layers in the attention layer corresponding to seq_name
"""
if seq_names is None:
seq_names = self._seq_names
for seq_name in seq_names:
layer = self._attention_layers[seq_name]
layer.freeze(layer_names)
def freeze_all(self):
"""freeze all layers
"""
self.freeze_bn()
self.freeze_attention()
self.freeze_embeddings()
self.freeze_forward_net()
def __get_embeddings(self, feat_dict, emb_name2emb_variable):
emb_feats = dict()
for feat_name in feat_dict:
_feat_name = feat_name.split(constant.FEATURE_SEPARATOR)[-1]
emb_indices = feat_dict[feat_name]
emb_name = self._emb_dict[_feat_name]
emb_shape = self._input_config[feat_name][constant.InputConfigKeys.EMB_SHAPE]
emb_indices = utils.replace_out_of_ranges(
value=emb_indices, target_range=(0, emb_shape[0]), name='remove_invalid_emb_id', replace_value=0
)
embeddings = emb_name2emb_variable[emb_name]
emb_feat = tf.nn.embedding_lookup(embeddings, emb_indices, name=f'lookup_{_feat_name}')
emb_feats[feat_name] = emb_feat
return emb_feats
def __get_net(self, attention_layers, forward_net): # create default layers and apply check
# create default attention layers
if attention_layers is None: # construct default attention
attention_layers = {seq_name: layers.DinAttention() for seq_name in self._seq_names}
else: # check the number of attention layers match the number of sequences
if len(attention_layers) != len(self._seq_names):
raise ValueError(f"the number of attention layers should match the number of sequences, "
f"but got {len(attention_layers)} layers and {len(self._seq_names)} sequences")
# create default forward net
if forward_net is None:
forward_net = layers.MLP(layer_dims=[200, 80, 2], activations=layers.Dice())
# check for layer superclass
for seq_name, layer in attention_layers.items():
if not isinstance(layer, layers.AirLayer):
raise ValueError(f"attention_layers should subcalss layers.AirLayer, "
f"but the {seq_name} attention layer is {type(layer)}")
if not isinstance(forward_net, layers.AirLayer):
raise ValueError(f"forward_net should subclass layers.AirLayer, but find {type(forward_net)}")
return attention_layers, forward_net
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 14:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('houses', '0091_auto_20170109_1536'),
]
operations = [
migrations.AddField(
model_name='office',
name='warehouse',
field=models.NullBooleanField(),
),
]
|
import numpy as np
from scipy.special import erfinv
from scipy.stats import norm
from mmu.lib._mmu_core_tests import norm_ppf
from mmu.lib._mmu_core_tests import erfinv as cpp_erfinv
from mmu.lib._mmu_core_tests import binomial_rvs
from mmu.lib._mmu_core_tests import multinomial_rvs
def test_erfinv():
"""Test C++ implemenation of erfinv compared to scipy."""
N = 1000
cnt = 0
for i in np.random.uniform(1e-12, 1 - 1e-12, N):
cnt += np.isclose(erfinv(i), cpp_erfinv(i), atol=1e-15)
assert cnt == N
def test_erfinv_special_cases():
"""Test special cases of erfinv."""
assert np.isnan(cpp_erfinv(np.nan))
assert np.isnan(cpp_erfinv(-1.01))
assert np.isnan(cpp_erfinv(1.01))
assert np.isnan(cpp_erfinv(1. + 1e-12))
assert np.isnan(cpp_erfinv(-1. - 1e-12))
assert np.isinf(cpp_erfinv(1.))
assert np.isneginf(cpp_erfinv(-1.))
assert np.isinf(cpp_erfinv(1. + 1e-16))
assert np.isinf(cpp_erfinv(-1. - 1e-16))
def test_norm_ppf():
"""Test C++ implemenation PPF of Normal dist compared to scipy."""
N = 1000
cnt = 0
for _ in range(N):
p = np.random.uniform(1e-12, 1 - 1e-12)
mu = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(1e5, 1000)
scp_ppf = norm(mu, sigma).ppf(p)
ppf = norm_ppf(mu, sigma, p)
cnt += np.isclose(ppf, scp_ppf, atol=1e-15)
assert cnt == N
def test_binomial_rvs():
p = 0.23
n = 10
size = 100000
outp = binomial_rvs(size, n, p, seed=890714, stream=0)
probas = outp / n
assert np.isclose(probas.mean(), p, rtol=5e-4)
assert np.max(outp) <= 10
p = 0.78
n = 1000
size = 100000
outp = binomial_rvs(size, n, p, seed=890714, stream=0)
probas = outp / n
assert np.isclose(probas.mean(), p, rtol=5e-4)
assert np.max(outp) <= 1000
def test_multinomial_rvs():
rng = np.random.default_rng(87326134)
p = rng.dirichlet(np.ones(4))
n = 10
size=400_000
outp = multinomial_rvs(size, n, p, seed=87326134, stream=0)
obs_probas = (outp / n).mean(0)
assert np.isclose(obs_probas, p, rtol=1e-3).all()
assert np.max(outp) <= n
|
class Module(object):
__key__ = None
def start(self):
pass
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkmeshimplicitdistance.py,v $
## Language: Python
## Date: $Date: 2014/10/24 16:35:13 $
## Version: $Revision: 1.10 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this class was contributed by
## Marco Fedele (marco.fedele@polimi.it)
## Politecnico di Milano
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import vtkvmtk
from vmtk import pypes
class vmtkMeshImplicitDistance(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.ReferenceSurface = None
self.Input = None
self.Mesh = None
self.ArrayName = 'ImplicitDistance'
self.Array = None
self.ComputeSignedDistance = 1
self.DistanceThreshold = None
self.Binary = 0
self.OutsideValue = 1.0
self.InsideValue = 0.0
self.CellData = 0
self.OverwriteOutsideValue = 1
self.SetScriptName('vmtkmeshimplicitdistance')
self.SetScriptDoc('compute distance from a reference surface in an input surface')
self.SetInputMembers([
['Mesh','i','vtkUnstructuredGrid',1,'','the input mesh','vmtkmeshreader'],
['ReferenceSurface','r','vtkPolyData',1,'','the reference surface','vmtksurfacereader'],
['ArrayName','array','str',1,'','name of the array of the surface where the computed values are stored'],
['ComputeSignedDistance','signeddistance','bool',1,'','if true compute signed distance, else unsigned distance'],
['DistanceThreshold','distancethreshold','float',1,'(0.0,)','if set, point more distant than this threshold are taken constant'],
['Binary','binary','bool',1,'','fill the distance array with inside/outside values instead of distance values (overwrite the signeddistance value) '],
['InsideValue','inside','float',1,'','value with which the surface is filled where the distance is negative (binary only)'],
['OutsideValue','outside','float',1,'','value with which the surface is filled where the distance is positive (binary only)'],
['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when the array already exists in the input surface (binary only)'],
['CellData','celldata','bool',1,'','output in a Cell Data array (instead of a Point Data array)']
])
self.SetOutputMembers([
['Mesh','o','vtkUnstructuredGrid',1,'','the output mesh','vmtkmeshwriter']
])
def Execute(self):
if self.Mesh == None:
self.PrintError('Error: No Mesh.')
if self.ReferenceSurface == None:
self.PrintError('Error: No ReferenceSurface.')
from vmtk import vmtkscripts
implicitDistance = vmtkscripts.vmtkSurfaceImplicitDistance()
implicitDistance.Input = self.Mesh
implicitDistance.ReferenceSurface = self.ReferenceSurface
implicitDistance.ArrayName = self.ArrayName
implicitDistance.ComputeSignedDistance = self.ComputeSignedDistance
implicitDistance.DistanceThreshold = self.DistanceThreshold
implicitDistance.Binary = self.Binary
implicitDistance.OutsideValue = self.OutsideValue
implicitDistance.InsideValue = self.InsideValue
implicitDistance.CellData = self.CellData
implicitDistance.OverwriteOutsideValue = self.OverwriteOutsideValue
implicitDistance.Update()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
from datetime import datetime, timedelta
import os
import random
import names
from dateutil.relativedelta import relativedelta
from django.conf import settings
from db.models import Branch, CulturalFit, JobType, Language, LanguageLevel, Skill, SoftSkill, ProfileState, Benefit, \
JobRequirement, JobPostingState, ProjectType, Topic, Keyword, ProjectPostingState
# pylint: disable=R0902
# pylint: disable=C0200
# pylint: disable=R0904
class Random:
def __init__(self):
self._gender_data = ['male', 'female']
path = os.path.join(settings.MEDIA_FIXTURE_ROOT, 'student', 'avatars', 'male')
self._male_avatars = self._load_files(path)
path = os.path.join(settings.MEDIA_FIXTURE_ROOT, 'student', 'avatars', 'female')
self._female_avatars = self._load_files(path)
path = os.path.join(settings.MEDIA_FIXTURE_ROOT, 'student', 'documents')
self._documents = self._load_files(path)
path = os.path.join(settings.MEDIA_FIXTURE_ROOT, 'company', 'moods')
self._moods = self._load_files(path)
path = os.path.join(settings.MEDIA_FIXTURE_ROOT, 'company', 'avatars')
self._logos = self._load_files(path)
self._branches = list(Branch.objects.all().values_list('id', flat=True))
self._cultural_fits = list(CulturalFit.objects.all().values_list('id', flat=True))
self._job_types = list(JobType.objects.all().values_list('id', flat=True))
self._languages = list(Language.objects.all().values_list('id', flat=True))
self._short_list_languages = list(
Language.objects.filter(short_list=True).values_list('id', flat=True))
self._language_levels = list(LanguageLevel.objects.all().values_list('id', flat=True))
self._skills = list(Skill.objects.all().values_list('id', flat=True))
self._soft_skills = list(SoftSkill.objects.all().values_list('id', flat=True))
self._benefits = list(Benefit.objects.all().values_list('id', flat=True))
self._requirements = list(JobRequirement.objects.all().values_list('id', flat=True))
self._project_types = list(ProjectType.objects.all().values_list('id', flat=True))
self._topics = list(Topic.objects.all().values_list('id', flat=True))
self._keywords = list(Keyword.objects.all().values_list('id', flat=True))
self._addresses = self._load_address_data()
self._hobby_data = [
'Gamen', 'Fussball', 'Programmieren', 'Kochen', 'Jodeln', 'Wandern', 'Handball', 'Lego',
'Gitarre', 'Flöte', 'mit dem Hund spazieren', 'Kollegen treffen', 'Ausgang', 'Bowling',
'Malen', 'Zeichnen'
]
self._state_data = [ProfileState.PUBLIC, ProfileState.ANONYMOUS]
self._job_posting_state_data = [JobPostingState.PUBLIC, JobPostingState.DRAFT]
self._project_posting_state_data = [ProjectPostingState.PUBLIC, ProjectPostingState.DRAFT]
self._titles = [
'Praktikant*in Applikationsentwicklung', 'Praktikant*in Systemtechnik',
'Praktikant*in DevOps', 'Praktikant*in Frontendentwicklung', 'Praktikant*in HTML / CSS',
'Praktikant*in Design', 'Praktikant*in UX', 'Praktikant*in Grafik',
'Praktikant*in User Experience', 'Praktikant*in Social Media',
'Praktikant*in Datenbanken', 'Praktikant*in PHP', 'Praktikant*in Python',
'Praktikant*in Javascript', 'Praktikant*in Vue.js / React.js'
]
self._project_titles = [
'Projekt KI', 'Projekt AI', 'Projekt ABC', 'Projekt DEF', 'Projekt GHI', 'Projekt JKL',
'Projekt MNO', 'Projekt Künstliche Intelligenz im Alltag'
]
self._workloads = [50, 60, 70, 80, 90, 100]
def _random(self, items, count):
random.shuffle(items)
if count == 1:
return items[0]
return items[:count]
def _load_files(self, path):
path = os.path.join(settings.MEDIA_ROOT, path)
file_names = [
file_name for file_name in os.listdir(path)
if os.path.isfile(os.path.join(path, file_name))
]
files = []
for file_name in file_names:
if not file_name.lower().endswith(('.png', '.jpg', '.jpeg', '.mp4')):
continue
files.append(file_name)
return files
def _load_address_data(self):
address_list = []
with open('db/seed/data/address_list.txt', encoding='utf-8') as address_file:
lines = address_file.readlines()
for line in lines:
parts = line.split(',')
address = parts[0].strip()
parts2 = parts[1].split(' - ')
address_list.append((address, parts2[0].strip(), parts2[1].strip()))
return address_list
def _date(self, date):
return datetime.strptime(date, '%Y-%m-%d').date()
def _date_between(self, start, end):
start = self._date(start)
end = self._date(end)
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
return start + timedelta(seconds=random_second)
def _has_german(self, languages):
for language in languages:
if language == 5:
return True
return False
def gender(self):
return self._random(self._gender_data, 1)
def name(self, gender):
return names.get_full_name(gender=gender)
def avatar(self, gender):
if gender == 'male':
return self._random(self._male_avatars, 1)
return self._random(self._female_avatars, 1)
def branch(self):
return self._random(self._branches, 1)
def branches(self):
count = random.randint(2, 5)
return self._random(self._branches, count)
def address(self):
return self._random(self._addresses, 1)
def cultural_fits(self):
return self._random(self._cultural_fits, 6)
def date_of_birth(self):
return self._date_between('1978-01-01', '2005-12-31')
def hobbies(self):
count = random.randint(2, 5)
return self._random(self._hobby_data, count)
def job_type(self):
return self._random(self._job_types, 1)
def project_type(self):
return self._random(self._project_types, 1)
def topic(self):
return self._random(self._topics, 1)
def keywords(self):
count = random.randint(2, 3)
return self._random(self._keywords, count)
def project_from_date(self):
return self.job_from_date()
def job_from_date(self):
return self._date_between('2020-01-01', '2021-12-21')
def job_to_date(self, from_date):
end_date = from_date + relativedelta(months=12)
return self._date_between(datetime.strftime(from_date, '%Y-%m-%d'),
datetime.strftime(end_date, '%Y-%m-%d'))
def languages(self):
languages = self._random(self._languages, 3)
if not self._has_german(languages):
languages.append(5) # german
levels = self._random(self._language_levels, len(languages))
result = []
for i in range(0, len(languages)):
obj = {"language": languages[i], "language_level": levels[i]}
result.append(obj)
return result
def languages_shortlist(self):
languages = self._random(self._short_list_languages, 3)
if not self._has_german(languages):
languages.append(5) # german
levels = self._random(self._language_levels, len(languages))
result = []
for i in range(0, len(languages)):
obj = {"language": languages[i], "language_level": levels[i]}
result.append(obj)
return result
def online_projects(self):
count = random.randint(2, 10)
online_projects = []
for i in range(1, count):
online_projects.append(f'http://www.project-{i}.lo')
return online_projects
def skills(self):
count = random.randint(2, 8)
return self._random(self._skills, count)
def soft_skills(self):
return self._random(self._soft_skills, 6)
def documents(self):
count = random.randint(2, 3)
return self._random(self._documents, count)
def mobile(self):
return '+41791234567'
def phone(self):
return '+41791234567'
def distinction(self):
return 'Distinction'
def state(self):
return self._random(self._state_data, 1)
def job_posting_state(self):
return self._random(self._job_posting_state_data, 1)
def project_posting_state(self):
return self._random(self._project_posting_state_data, 1)
def uid(self):
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
numbers = self._random(numbers, len(numbers))
return f'CHE-{numbers.pop(0)}{numbers.pop(0)}{numbers.pop(0)}.' \
f'{numbers.pop(0)}{numbers.pop(0)}{numbers.pop(0)}.{numbers.pop(0)}{numbers.pop(0)}{numbers.pop(0)}'
def description(self):
return 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut ' \
'labore et dolore magna aliqua. Ac tincidunt vitae semper quis lectus nulla at volutpat. Ultrices ' \
'tincidunt arcu non sodales neque sodales ut. Velit sed ullamcorper morbi tincidunt ornare massa. Et ' \
'tortor at risus viverra adipiscing at. Diam quis enim lobortis scelerisque fermentum.'
def services(self):
return 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut ' \
'labore et dolore magna aliqua. Laoreet id donec ultrices tincidunt arcu non sodales. Ut eu sem ' \
'integer vitae. Est velit egestas dui id ornare arcu odio.'
def benefits(self):
count = random.randint(2, 8)
return self._random(self._benefits, count)
def moods(self):
count = random.randint(2, 7)
return self._random(self._moods, count)
def mood(self):
return self._random(self._moods, 1)
def logo(self):
return self._random(self._logos, 1)
def title(self):
return self._random(self._titles, 1)
def project_title(self):
return self._random(self._project_titles, 1)
def workload(self):
return self._random(self._workloads, 1)
def number(self):
return random.randint(1, 5)
def requirements(self):
count = random.randint(2, 5)
return self._random(self._requirements, count)
def graduation(self):
return self._date_between('2020-01-01', '2021-12-21')
|
from keras.preprocessing import image
from keras.models import Sequential,load_model
from keras.layers import Dense,Conv2D,Flatten,BatchNormalization,MaxPool2D,Dropout
import cv2
import numpy as np
from sklearn.utils import shuffle
import os
train_dir = "/data/train"
test_dir = "/data/test"
def data_loader(dir):
labels = []
pictures = []
classe = 0
for classes in os.listdir(dir):
if classes == "with_mask":
classe = 1
else:
classe = 0
for pic in os.listdir(str(dir)+"/"+str(classes)+"/"):
img = cv2.imread(str(dir)+"/"+str(classes)+"/"+pic)
img = cv2.resize(img,(224,224))
pictures.append(img)
labels.append(classe)
return shuffle(pictures,labels,random_state=10000)
x_train,y_train = data_loader(train_dir)
x_test,y_test = data_loader(test_dir)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
model = Sequential()
model.add(Conv2D(124,(3,3),activation="relu",input_shape=(224,224,3)))
model.add(Conv2D(124,(3,3),activation="relu"))
model.add(MaxPool2D((2,2)))
model.add(Conv2D(64,(3,3),activation="relu"))
model.add(Conv2D(64,(3,3),activation="relu"))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),activation="relu"))
model.add(Conv2D(32,(3,3),activation="relu"))
model.add(MaxPool2D((2,2)))
model.add(Conv2D(32,(3,3),activation="relu"))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(64,activation="relu"))
model.add(Dense(32,activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(2,activation="softmax"))
model.compile(optimizer="adam",loss="sparse_categorical_crossentropy",metrics=['accuracy'])
save = model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=50)
model.save("./model.h5")
|
# eine Liste initialisieren
prime_list = [2, 3, 5, 7, 11, 13]
# Listen können jegliche Objekte,
# auch gemischt und mehrfach enthalten
stuff = [5, 1.56363, 5, False, prime_list, None, False]
# einzelne Elemente referenzieren
first_prime = prime_list[0] #2
third_prime = prime_list[2] #5
last_prime = prime_list[-1] #13
# Elemente zu schreiben ist auch möglich
prime_list[0] = 42
# neue Elemente anfügen
prime_list.append(15)
print(prime_list) #[2, 3, 5, 7, 11, 13, 15]
# Elemente entfernen
prime_list.remove(11)
print(prime_list) #[2, 3, 5, 7, 13, 15]
# Elemente per Index entfernen
last_prime = prime_list.pop() #13
print(prime_list) #[2, 3, 5, 7, 11]
|
from flask import Blueprint, render_template, redirect, url_for, flash
from solarvibes.models import User, Agripump, Agrimodule, Field
from flask_login import current_user
from flask_security import login_required
agripump_bp = Blueprint(
'agripump_bp',
__name__,
template_folder="templates"
)
##################
# USER AGRIPUMP
##################
@agripump_bp.route('/', methods=['GET'])
@agripump_bp.route('/<agripump_id>', methods=['GET'])
@login_required
def show(agripump_id = None):
# Validation
if agripump_id == None:
flash('page not allowed')
return redirect(url_for('main.index'))
if Agripump.query.filter_by(id=agripump_id).first() == None:
flash('That agripump do NOT exist')
return redirect(url_for('main.index'))
# objects query
user = current_user
agripump = Agripump.query.filter_by(id = agripump_id).first()
agrimodule = Agrimodule.query.filter_by(id = agripump.agrimodule_id).first()
field = Field.query.filter_by(id = agrimodule.field_id).first()
farm = user.farms.filter_by(id = field.farm_id).first()
crop = field.crops.first()
pump = user.pumps.filter_by(id = agripump.pump_id).first()
print(user)
print(farm)
print(crop)
print(pump)
print(agripump)
# calculating pump information
def mlpm_to_lps(mlpm):
return mlpm / (1000 * 60)
def cm_to_m(cm):
return cm / 100
def w_to_kw(w):
return w / 1000
pump_info = {'pump_name': pump.pump_name, 'pump_brand': pump.pump_brand, 'pump_flow_rate':mlpm_to_lps(pump.pump_flow_rate), 'pump_watts':w_to_kw(pump.pump_watts), 'pump_head':cm_to_m(pump.pump_head)}
# Calculating energy consumption full cycle
def w_to_wm(w):
return w * 60
def wm_to_kwh(wm):
return wm / (60 * 60 * 1000)
pump_Wmin_consumption = w_to_wm(pump.pump_watts)
pump_minutes_on = field.field_water_required_day / pump.pump_flow_rate
pump_Wmin_conmsumption_on = pump_Wmin_consumption * pump_minutes_on
pump_consumption_kwh_per_day = wm_to_kwh(pump_Wmin_conmsumption_on)
# pump_consumption = {'' : , '' : , '' : , '' : , '' : , '' : , }
return render_template('agripump/show.html',
pump = pump_info,
agripump = agripump,
farm = farm,
field = field,
crop = crop,
pump_consumption_kwh_per_day=pump_consumption_kwh_per_day,
sensortype = 'Agripump',
system_name = agrimodule.name)
# AGRIPUMP
# TIME USAGE
# start_hour_per_day = db.Column(db.Integer)
# qty_hour_per_day = db.Column(db.Integer)
# time_per_hour = db.Column(db.Float)
# time_per_day = db.Column(db.Float)
# time_per_cycle = db.Column(db.Float)
# WATER USAGE
# water_per_hour = db.Column(db.Integer)
# water_per_day = db.Column(db.Integer)
# water_per_cycle = db.Column(db.Integer)
# ENERGY USAGE
# energy_per_hour = db.Column(db.Integer)
# energy_per_day = db.Column(db.Integer)
# energy_per_cycle = db.Column(db.Integer)
# PUMP
# brand = db.Column(db.String(25))
# flow_rate = db.Column(db.Float(precision=2), nullable=False)
# height_max = db.Column(db.Float(presicion=2), nullable=False)
# wh = db.Column(db.Float(precision=2), nullable=False)
|
import tensorflow as tf
classes = 3
labels = tf.constant([1, 0, 2]) # 输入的元素值最小为0,最大为2
output = tf.one_hot(labels, depth=classes)
print("result of labels1:", output)
print("\n")
|
#!/usr/bin/env python
# coding: utf-8
import time
from operator import itemgetter
from aiocache import SimpleMemoryCache
from aiocache.serializers import JsonSerializer
from sanic import Sanic
from sanic.exceptions import NotFound
from sanic.response import json
from sanic_jinja2 import SanicJinja2
from tinydb import TinyDB
from config.config import (
match_type, site_name, upload_image_path,
cpu_count, knn_match_num, bf_match_num
)
from mlibs.orb_features import generate_image_feature
from mlibs.orb_matches import knn_match, bf_match
app = Sanic(__name__)
app.static('/static', './static')
jinja = SanicJinja2(app)
cache_features = {}
@app.exception(NotFound)
async def ignore_404s(request, exception):
return json({'code': '404'})
@app.route("/")
@jinja.template('index.html')
async def index(request):
response_dict = {
'site_name': site_name,
}
return response_dict
@app.route("/search")
@jinja.template('search.html')
async def view_search_result(request):
cache = SimpleMemoryCache(serializer=JsonSerializer())
response_dict = await cache.get("response_dict")
if not response_dict:
response_dict = {}
return response_dict
@app.route("/search", methods=["POST"])
@jinja.template('search.html')
async def new_search(request):
upload_image = request.files.get("image")
if not upload_image:
raise NotFound(message='not image file')
image_types = ['image/jpeg', 'image/jpg', 'image/png']
if upload_image.type not in image_types:
raise NotFound(message='not image file')
upload_image_type = upload_image.type.split('/')[-1]
file_name = str(time.time())[:10] + '.' + upload_image_type
file_path = upload_image_path + file_name
with open(file_path, "wb") as f:
f.write(request.files["image"][0].body)
search_results = image_search(file_path)[:5]
cache = SimpleMemoryCache(serializer=JsonSerializer())
response_dict = {
'site_name': site_name,
'upload_image': file_name,
'search_results': search_results
}
await cache.set("response_dict", response_dict)
return response_dict
def image_search(image_path=None):
""" 图像搜索 """
global cache_features
match_results = []
match_results_append = match_results.append
search_feature = generate_image_feature(image_path, False)[1]
for image_uid, feature in cache_features.items():
if match_type == 1:
# knn 匹配
match_num = knn_match(search_feature, feature)
if match_num < knn_match_num:
# 少于knn_match_num个特征点就跳过
continue
else:
match_num = bf_match(search_feature, feature)
# 蛮力匹配
if match_num < bf_match_num:
# 少于bf_match个特征点就跳过
continue
match_results_append((image_uid, match_num))
match_results = sorted(match_results, key=itemgetter(1), reverse=True)
return match_results
if __name__ == '__main__':
db = TinyDB('./static/dataset.db')
table = db.table('feature')
cache_features = table.all()[0]
app.run(host='0.0.0.0', port=5555, workers=cpu_count)
|
from typing import List, Optional
from src.utils import print_util
# https://yzhong-cs.medium.com/serialize-and-deserialize-complex-json-in-python-205ecc636caa
class ShortcutParam(object):
def __init__(self, name: str, dynamic: bool = False, value: Optional[str] = ''):
self.name = name
self.dynamic = dynamic
self.value = value
@classmethod
def from_json(cls, data):
return cls(**data)
class Shortcut(object):
def __init__(self, name: str, module: Optional[str] = None, shell: Optional[str] = None, params: List[ShortcutParam] = None):
if not module and not shell:
print(print_util.color('Must provide either module name or shell command to a shortcut', color_name='red'))
raise TypeError
self.name = name
self.shell = None if not shell else shell
self.module = module
self.params = [] if not params else params
def get_dynamic_params(self) -> List[ShortcutParam]:
return list(filter(lambda x: x.dynamic, self.params))
def get_dynamic_param_names(self) -> List[str]:
return list(map(lambda x: x.name, self.get_dynamic_params()))
def get_static_params(self) -> List[ShortcutParam]:
return list(filter(lambda x: not x.dynamic, self.params))
def get_static_param_names(self) -> List[str]:
return list(map(lambda x: x.name, self.get_static_params()))
def get_param(self, name: str) -> Optional[ShortcutParam]:
param = None
for p in self.params:
if p.name == name:
param = p
break
return param
def get_usage_string(self) -> str:
usage = f'{self.name} '
params = self.get_dynamic_param_names()
for param in params:
usage += f'<{param}> '
return usage
def get_help_description(self) -> str:
if self.shell:
return print_util.text_wrap(f"Tasks an agent to run the shell command '{self.shell}'")
module = self.module
default_params = list(map(lambda x: f"{x.name}: {x.value}", self.get_static_params()))
description = f"Tasks the agent to run module {module}."
if len(default_params) > 0:
description += ' Default parameters include:\n'
description += '\n'.join(default_params)
return print_util.text_wrap(description)
@classmethod
def from_json(cls, data):
if 'params' not in data or data['params'] is None:
data['params'] = []
else:
data['params'] = list(map(ShortcutParam.from_json, data['params']))
return cls(**data)
|
import os
class XYZNamingScheme:
file_type = 'png'
basedir = './'
def __init__(self, file_type, basedir="./"):
self.file_type = file_type
self.basedir = basedir
def bound_name(self, lonTS, latTS):
"""Generate the name reference for the given bound"""
return "%s/%s" % (str(lonTS), str(latTS))
def adjust_file_type(self):
"""Change file extension to fit the file format"""
if self.file_type == 'jpeg':
return 'jpg'
else:
return self.file_type
def name_for(self, scale, lonTS, latTS):
"""Return a string for the file name for the given tile within a metatile"""
return os.path.join(self.dir_for(scale, lonTS, latTS) + "." + str(self.adjust_file_type()))
def dir_for(self, scale, lonTS, latTS):
"""Return a string for the directory that contains the given metatile bounds"""
return os.path.join(self.basedir, "xyz", str(scale), self.bound_name(lonTS, latTS))
|
import json
import boto3
from flask import Flask, request
app = Flask(__name__)
@app.route('/devices/<device_id>', methods=['GET', 'POST'])
def manage_devices(device_id):
client = boto3.client('iot-data')
if request.method == 'GET':
response = client.get_thing_shadow(thingName=device_id)
else:
user_data = request.get_json()
state = {
'state': {
'desired': {
user_data['user_id']: user_data['contact_details']
}
}
}
response = client.update_thing_shadow(
thingName=device_id, payload=json.dumps(state))
return response['payload'].read()
|
from .toms import convert_toms_str
from .toms import replace_millis_str
from .toms import split_timestamp
__version__ = '0.4.5'
__all__ = ['convert_toms_str', 'split_timestamp', 'replace_millis_str']
|
#!/usr/bin/env python3
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Docker launch script for Alphafold docker image."""
import os
import pathlib
import signal
from typing import Tuple
import shutil
import subprocess
from absl import app
from absl import flags
from absl import logging
import docker
from docker import types
flags.DEFINE_bool(
'use_gpu', True, 'Enable NVIDIA runtime to run with GPUs.')
flags.DEFINE_string(
'gpu_devices', 'all',
'Comma separated list of devices to pass to NVIDIA_VISIBLE_DEVICES.')
flags.DEFINE_list(
'fasta_paths', None, 'Paths to FASTA files, each containing a prediction '
'target that will be folded one after another. If a FASTA file contains '
'multiple sequences, then it will be folded as a multimer. Paths should be '
'separated by commas. All FASTA paths must have a unique basename as the '
'basename is used to name the output directories for each prediction.')
flags.DEFINE_list(
'is_prokaryote_list', None, 'Optional for multimer system, not used by the '
'single chain system. This list should contain a boolean for each fasta '
'specifying true where the target complex is from a prokaryote, and false '
'where it is not, or where the origin is unknown. These values determine '
'the pairing method for the MSA.')
flags.DEFINE_string(
'output_dir', '/tmp/alphafold',
'Path to a directory that will store the results.')
flags.DEFINE_string(
'data_dir', None,
'Path to directory with supporting data: AlphaFold parameters and genetic '
'and template databases. Set to the target of download_all_databases.sh.'
'If use_templates is False and use_precomputed_msas is True, and all MSAs '
'have indeed been precomputed, only the AlphaFold parameters'
'(not the databases) are required.')
flags.DEFINE_string(
'docker_image_name', 'alphafold', 'Name of the AlphaFold Docker image.')
flags.DEFINE_string(
'max_template_date', None,
'Maximum template release date to consider (ISO-8601 format: YYYY-MM-DD). '
'Important if folding historical test sets.')
flags.DEFINE_enum(
'db_preset', 'full_dbs', ['full_dbs', 'reduced_dbs'],
'Choose preset MSA database configuration - smaller genetic database '
'config (reduced_dbs) or full genetic database config (full_dbs)')
flags.DEFINE_enum(
'model_preset', 'monomer',
['monomer', 'monomer_casp14', 'monomer_ptm', 'multimer'],
'Choose preset model configuration - the monomer model, the monomer model '
'with extra ensembling, monomer model with pTM head, or multimer model')
flags.DEFINE_boolean(
'benchmark', False,
'Run multiple JAX model evaluations to obtain a timing that excludes the '
'compilation time, which should be more indicative of the time required '
'for inferencing many proteins.')
flags.DEFINE_integer(
'random_seed', None, 'The random seed for the data '
'pipeline. By default, this is randomly generated. Note '
'that even if this is set, Alphafold may still not be '
'deterministic, because processes like GPU inference are '
'nondeterministic.')
flags.DEFINE_boolean(
'use_precomputed_msas', False,
'Whether to read MSAs that have been written to disk. WARNING: This will '
'not check if the sequence, database or configuration have changed.')
flags.DEFINE_boolean(
'only_msas', False,
'Whether to only build MSAs, and not do any prediction.')
flags.DEFINE_boolean(
'amber', True,
'Whether to do an Amber relaxation of the models.')
flags.DEFINE_boolean(
'use_templates', True,
'Whether to search for template structures.')
flags.DEFINE_string(
'singularity_image_path', None,
'Complete path to the singularity image.')
flags.DEFINE_boolean(
'dev', False, 'Run inside alphafold-dev Docker container. '
'This is meant for modifying AlphaFold without re-building the Docker image '
'The AlphaFold source code is bound to the container '
'from an external location, defined as environment variable ALPHAFOLD_DIR.'
'ALPHAFOLD_DIR should be set to <AlphaFold git repo dir>/alphafold.'
)
FLAGS = flags.FLAGS
_ROOT_MOUNT_DIRECTORY = '/mnt/'
def _create_mount(mount_name: str, path: str) -> Tuple[types.Mount, str]:
path = os.path.abspath(path)
source_path = os.path.dirname(path)
target_path = os.path.join(_ROOT_MOUNT_DIRECTORY, mount_name)
logging.info('Mounting %s -> %s', source_path, target_path)
mount = types.Mount(target_path, source_path, type='bind', read_only=True)
return mount, os.path.join(target_path, os.path.basename(path))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.use_templates:
if FLAGS.max_template_date is None:
raise app.UsageError(
'If templates are used, max_template_date must be defined')
require_all_databases = True
if FLAGS.use_precomputed_msas and not FLAGS.use_templates \
and not FLAGS.only_msas:
require_all_databases = False
# You can individually override the following paths if you have placed the
# data in locations other than the FLAGS.data_dir.
# Path to the Uniref90 database for use by JackHMMER.
uniref90_database_path = os.path.join(
FLAGS.data_dir, 'uniref90', 'uniref90.fasta')
# Path to the Uniprot database for use by JackHMMER.
uniprot_database_path = os.path.join(
FLAGS.data_dir, 'uniprot', 'uniprot.fasta')
# Path to the MGnify database for use by JackHMMER.
mgnify_database_path = os.path.join(
FLAGS.data_dir, 'mgnify', 'mgy_clusters_2018_12.fa')
# Path to the BFD database for use by HHblits.
bfd_database_path = os.path.join(
FLAGS.data_dir, 'bfd',
'bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt')
# Path to the Small BFD database for use by JackHMMER.
small_bfd_database_path = os.path.join(
FLAGS.data_dir, 'small_bfd', 'bfd-first_non_consensus_sequences.fasta')
# Path to the Uniclust30 database for use by HHblits.
uniclust30_database_path = os.path.join(
FLAGS.data_dir, 'uniclust30', 'uniclust30_2018_08', 'uniclust30_2018_08')
# Path to the PDB70 database for use by HHsearch.
pdb70_database_path = os.path.join(FLAGS.data_dir, 'pdb70', 'pdb70')
# Path to the PDB seqres database for use by hmmsearch.
pdb_seqres_database_path = os.path.join(
FLAGS.data_dir, 'pdb_seqres', 'pdb_seqres.txt')
# Path to a directory with template mmCIF structures, each named <pdb_id>.cif.
template_mmcif_dir = os.path.join(FLAGS.data_dir, 'pdb_mmcif', 'mmcif_files')
# Path to a file mapping obsolete PDB IDs to their replacements.
obsolete_pdbs_path = os.path.join(FLAGS.data_dir, 'pdb_mmcif', 'obsolete.dat')
if not FLAGS.dev:
alphafold_path = pathlib.Path(__file__).parent.parent
data_dir_path = pathlib.Path(FLAGS.data_dir)
if alphafold_path == data_dir_path or alphafold_path in data_dir_path.parents:
raise app.UsageError(
f'The download directory {FLAGS.data_dir} should not be a subdirectory '
f'in the AlphaFold repository directory. If it is, the Docker build is '
f'slow since the large databases are copied during the image creation.')
mounts = []
command_args = []
# Mount each fasta path as a unique target directory.
target_fasta_paths = []
for i, fasta_path in enumerate(FLAGS.fasta_paths):
mount, target_path = _create_mount(f'fasta_path_{i}', fasta_path)
mounts.append(mount)
target_fasta_paths.append(target_path)
command_args.append(f'--fasta_paths={",".join(target_fasta_paths)}')
database_paths = [
('uniref90_database_path', uniref90_database_path),
('mgnify_database_path', mgnify_database_path),
('data_dir', FLAGS.data_dir),
('template_mmcif_dir', template_mmcif_dir),
('obsolete_pdbs_path', obsolete_pdbs_path),
]
if FLAGS.model_preset == 'multimer':
database_paths.append(('uniprot_database_path', uniprot_database_path))
database_paths.append(('pdb_seqres_database_path',
pdb_seqres_database_path))
else:
database_paths.append(('pdb70_database_path', pdb70_database_path))
if FLAGS.db_preset == 'reduced_dbs':
database_paths.append(('small_bfd_database_path', small_bfd_database_path))
else:
database_paths.extend([
('uniclust30_database_path', uniclust30_database_path),
('bfd_database_path', bfd_database_path),
])
if not require_all_databases:
database_paths2 = []
for db_name, db_path in database_paths:
if db_name == "data_dir" or os.path.exists(db_path) \
or os.path.exists(db_path + "_hhm.ffindex"):
database_paths2.append((db_name, db_path))
database_paths = database_paths2
for name, path in database_paths:
if path:
mount, target_path = _create_mount(name, path)
mounts.append(mount)
command_args.append(f'--{name}={target_path}')
output_target_path = os.path.join(_ROOT_MOUNT_DIRECTORY, 'output')
output_dir = os.path.abspath(FLAGS.output_dir)
mounts.append(types.Mount(output_target_path, output_dir, type='bind'))
command_args.extend([
f'--output_dir={output_target_path}',
f'--max_template_date={FLAGS.max_template_date}',
f'--db_preset={FLAGS.db_preset}',
f'--model_preset={FLAGS.model_preset}',
f'--benchmark={FLAGS.benchmark}',
f'--use_precomputed_msas={FLAGS.use_precomputed_msas}',
f'--only_msas={FLAGS.only_msas}',
f'--amber={FLAGS.amber}',
f'--use_templates={FLAGS.use_templates}',
'--logtostderr',
])
if FLAGS.random_seed is not None:
command_args.append(
f'--random_seed={FLAGS.random_seed}')
if FLAGS.is_prokaryote_list:
command_args.append(
f'--is_prokaryote_list={",".join(FLAGS.is_prokaryote_list)}')
docker_image_name = FLAGS.docker_image_name
if FLAGS.dev:
alphafold_dir = os.environ.get("ALPHAFOLD_DIR")
if alphafold_dir is None:
raise app.UsageError('ALPHAFOLD_DIR is undefined')
run_alphafold_py = os.path.join(alphafold_dir, "run_alphafold.py")
if not os.path.exists(run_alphafold_py):
raise app.UsageError('ALPHAFOLD_DIR must contain "run_alphafold.py"')
mounts.append(types.Mount(
"/app/alphafold", alphafold_dir,
type='bind', read_only=True)
)
if docker_image_name == "alphafold":
docker_image_name = "alphafold-dev"
singularity_image_path = FLAGS.singularity_image_path
if singularity_image_path:
if shutil.which("singularity") is None:
raise app.UsageError('Could not find path to the "singularity" binary. '
'Make sure it is installed on your system.')
if not os.path.exists(singularity_image_path):
raise app.UsageError('Could not find singularity image.')
singularity_command = ["singularity", "run", "--cleanenv"]
singularity_command += ["--env", "TF_FORCE_UNIFIED_MEMORY=1"]
singularity_command += ["--env", "XLA_PYTHON_CLIENT_MEM_FRACTION=4.0"]
singularity_command += ["--env", "OPENMM_CPU_THREADS=8"]
if FLAGS.use_gpu:
singularity_command += ["--nv"]
if FLAGS.gpu_devices != "all":
os.environ["SINGULARITYENV_CUDA_VISIBLE_DEVICES"] = FLAGS.gpu_devices
for mount in mounts:
mount_readonly = "ro" if mount["ReadOnly"] else "rw"
singularity_command += ['--bind', f'{mount["Source"]}:{mount["Target"]}:{mount_readonly}']
singularity_command += [singularity_image_path]
singularity_command += command_args
print(" ".join(singularity_command))
subprocess.run(singularity_command)
else: # execute with Docker
client = docker.from_env()
container = client.containers.run(
image=docker_image_name,
command=command_args,
runtime='nvidia' if FLAGS.use_gpu else None,
remove=True,
detach=True,
mounts=mounts,
environment={
'NVIDIA_VISIBLE_DEVICES': FLAGS.gpu_devices,
# The following flags allow us to make predictions on proteins that
# would typically be too long to fit into GPU memory.
'TF_FORCE_UNIFIED_MEMORY': '1',
'XLA_PYTHON_CLIENT_MEM_FRACTION': '4.0',
})
# Add signal handler to ensure CTRL+C also stops the running container.
signal.signal(signal.SIGINT,
lambda unused_sig, unused_frame: container.kill())
for line in container.logs(stream=True):
logging.info(line.strip().decode('utf-8'))
if __name__ == '__main__':
flags.mark_flags_as_required([
'data_dir',
'fasta_paths'
])
app.run(main)
|
import telegram
from telegram import ParseMode
import time
class TelegramBot:
def __init__(self, token, chatId, botEnabled=True, monitor=None):
self.token = token
self.chatId = chatId
self.bot = telegram.Bot(token=self.token)
self.botEnabled = botEnabled
self.monitor = monitor
def getChatId(self):
return self.chatId
def sendMessage(self, text,chatId=None):
print(f"----\nBot Telegram (chat id: {chatId})\n{text}")
if self.botEnabled:
for __ in range(10):
try:
self.bot.send_message(chat_id=self.getChatId() if chatId is None else chatId, text=text,
parse_mode=ParseMode.MARKDOWN,disable_web_page_preview=True)
except telegram.error.RetryAfter as ra:
if self.monitor is not None:
self.monitor.sendMessage("Telegram", ra, text)
if int(ra.retry_after) > 60:
print("Flood control exceeded. Retry in 60 seconds")
time.sleep(60)
else:
print(ra)
time.sleep(int(ra.retry_after))
continue
except Exception as e:
if self.monitor is not None:
self.monitor.sendMessage("Telegram", e, text)
else:
break |
import colored
import pandas as pd
import matplotlib.pyplot as plt
print(colored.stylize("\n---- | Plotting COVID-19 cases in specified country | ----\n", colored.fg("red")))
# dataframe
df = pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
# smaller more useful dataframe
small_df = df[["location", "new_cases", "total_deaths", "population", "date"]]
small_df_values = small_df.values
# user interaction
country = input("Enter the country: ")
# limitation to one country
mask = small_df_values[:,0] == country
small_df_values_with_mask = small_df_values[mask]
print(f"\nPopulation of {country}: {int(small_df_values_with_mask[-1:,3][0])}")
print(f"Total deaths: {int(small_df_values_with_mask[-1:,2][0])}\n")
# last 14 days and its cases
dates = [ i[8:] for i in small_df_values_with_mask[:,4][-15:-1]]
cases = small_df_values_with_mask[:,1][-15:-1]
fig = plt.figure("Plotting with Pyplot")
plt.scatter(dates, cases)
plt.plot(dates, cases)
plt.title("COVID-19 cases of the last 14 days")
plt.xlabel("Date - 2020")
plt.ylabel("Cases")
plt.show()
|
import numpy as np
from sklearn.linear_model import LinearRegression
import math
from itertools import combinations
def print_LR(X, w, y, add_bias = True, n = 5, verbose = False):
W = np.diag(w)
if verbose:
print(W)
Z1 = np.dot(np.dot(X.T,W), X)
Z2 = np.linalg.inv(Z1)
Z3 = np.dot(Z2, np.dot(X.T,W))
beta = np.dot(Z3, y)
if verbose:
print("Solution of regression:", beta)
print(beta[-2])
if add_bias:
weight_sum = np.sum(beta[:-1])
else:
weight_sum = np.sum(beta)
if verbose:
print("Sum of weights (w.o. bias) :", weight_sum)
if add_bias and verbose:
print("Bias :", beta[-1])
return beta[:-1]
def generate_X(n, interactions = [], add_bias = True):
'''
n : number of elements
interactions : [(0,1), (2,3), ..., (4,5)]
'''
X = []
m = n + len(interactions) + int(add_bias)
for i in range(n+1):
for index_array in list(combinations(range(n), i)):
x = np.zeros(m)
if len(index_array) > 0:
x[np.array(list(index_array))] = 1
for j, inter in enumerate(interactions):
if set(inter).issubset(set(index_array)):
x[j + n] = 1
if add_bias:
x[-1] = 1
X.append(x)
return np.array(X)
def nCr(n,r):
f = math.factorial
return int(f(n) / f(r) / f(n-r))
def kernel_shap_weight(n, inf = 100000):
w = [inf]
for s in range(1, n):
for _ in range(nCr(n,s)):
#w.append(1)
w.append(float(1)/ nCr(n,s)/ (n-s) / s )
w.append(inf)
return w
def uniform_weight(n, inf = 100000):
w = [inf]
for _ in range(n-2):
w.append(1)
w.append(inf)
return w
def getshap(n, y):
X = generate_X(n , [], True)
w = kernel_shap_weight(n)
shap = print_LR(X, w, y, False, n)
#print(shap)
return shap
def getshap_sample(n, X, y):
w = uniform_weight(n)
shap = print_LR(X, w, y, False, n)
#print(shap)
return shap |
import argparse
import sys
import hashlib
import hmac
from binascii import hexlify
try:
from scapy.all import *
except ImportError:
print "please install scapy: http://www.secdev.org/projects/scapy/ "
sys.exit()
def get_challenge_response(cfg_pcap_file):
r = rdpcap(cfg_pcap_file)
lens = map(lambda x: x.len, r)
pckt_lens = dict([(i, lens[i]) for i in range(0,len(lens))])
# try to find challenge packet
pckt_108 = 0 #challenge packet (from server)
for (pckt_indx, pckt_len) in pckt_lens.items():
if pckt_len+14 == 108 and hexlify(r[pckt_indx].load)[14:24] == '7202002732':
pckt_108 = pckt_indx
break
# try to find response packet
pckt_141 = 0 #response packet (from client)
_t1 = dict([ (i, lens[i]) for i in pckt_lens.keys()[pckt_108:] ])
for pckt_indx in sorted(_t1.keys()):
pckt_len = _t1[pckt_indx]
if pckt_len+14 == 141 and hexlify(r[pckt_indx].load)[14:24] == '7202004831':
pckt_141 = pckt_indx
break
# try to find auth result packet
pckt_84 = 0 # auth answer from plc: pckt_len==84 -> auth ok
pckt_92 = 0 # auth answer from plc: pckt_len==92 -> auth bad
for pckt_indx in sorted(_t1.keys()):
pckt_len = _t1[pckt_indx]
if pckt_len+14 == 84 and hexlify(r[pckt_indx].load)[14:24] == '7202000f32':
pckt_84 = pckt_indx
break
if pckt_len+14 == 92 and hexlify(r[pckt_indx].load)[14:24] == '7202001732':
pckt_92 = pckt_indx
break
print "found packets indeces: pckt_108=%d, pckt_141=%d, pckt_84=%d, pckt_92=%d" % (pckt_108, pckt_141, pckt_84, pckt_92)
if pckt_84:
print "auth ok"
else:
print "auth bad. for brute we need right auth result. exit"
sys.exit()
challenge = None
response = None
raw_challenge = hexlify(r[pckt_108].load)
if raw_challenge[46:52] == '100214' and raw_challenge[92:94] == '00':
challenge = raw_challenge[52:92]
print "found challenge: %s" % challenge
else:
print "cannot find challenge. exit"
sys.exit()
raw_response = hexlify(r[pckt_141].load)
if raw_response[64:70] == '100214' and raw_response[110:112] == '00':
response = raw_response[70:110]
print "found response: %s" % response
else:
print "cannot find response. exit"
sys.exit()
return challenge, response
def calculate_s7response(password, challenge):
challenge = challenge.decode("hex")
return hmac.new( hashlib.sha1(password).digest(), challenge, hashlib.sha1).hexdigest()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crack S7 password using a dictionary')
parser.add_argument('file', metavar='FILE', type=str, help='A PCAP file containing S7 traffic')
parser.add_argument('dict', metavar='DICT', type=str, help='Dictionary to use when cracking')
args = parser.parse_args()
print "using pcap file: %s" % args.file
challenge, response = get_challenge_response(args.file)
print "start password bruteforsing ..."
for p in open(args.dict):
p = p.strip()
if response == calculate_s7response(p, challenge):
print "found password: %s" % p
sys.exit()
print "password not found. try another dictionary."
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
default_args = {'owner': 'afroot05'}
dag = DAG('stock_3minute_deploy_production',
default_args=default_args,
schedule_interval='0 6 * * *',
catchup=False,
start_date=datetime(2021, 5, 24, 6, 0))
stock_3minute_deploy_production = BashOperator(task_id="stock_3minute_deploy_production",
bash_command="sh /usr/lib/carter/stock-3minute-source/stock_3minute_source/scripts/call_stock_3minute.sh ",
dag=dag)
|
# 5 seti 0 8 4 E = 0
# 6 bori 4 65536 3 D = E | 65536 (1000000000000000..)
# 7 seti 707129 0 4 E = 707129
# 8 bani 3 255 2 C = D & 255
# 9 addr 4 2 4 E = E + C
# 10 bani 4 16777215 4 E = E & 11111111111111
# 11 muli 4 65899 4 E *= 4
# 12 bani 4 16777215 4 E = E & 11111111111111
# 13 gtir 256 3 2 if 256 > D:
# Value Out
# go to 6
# 17 seti 0 7 2 C = 0
# 18 addi 2 1 1 B = C + 1
# 19 muli 1 256 1 B *= 256
# 20 gtrr 1 3 1 if B > D:
# 26 setr 2 4 3 D = C
# 27 seti 7 4 5 go to 8
# 24 addi 2 1 2 C += 1
# 25 seti 17 1 5 go to 18
#
prev = []
A=B=C=D=E = 0
D = E | 65536
E = 707129
X = 0
while True:
X += 1
C = D & 255
E = (((E + C) & 16777215) * 4) & 16777215
print(A,B,C,D,E)
if X > 5000: break
if 256 > D:
if E in prev:
print("Found")
print(E)
prev.append(E)
D = E | 65536
E = 707129
if len(prev) > 10: break
continue
C = 0
while True:
B = (C + 1) * 256
if B > D:
D = C
break
else:
C += 1
continue
|
import unittest
import pytest
import spikeextractors as se
from spikesorters import KlustaSorter
from spikesorters.tests.common_tests import SorterCommonTestSuite
# This run several tests
@pytest.mark.skipif(not KlustaSorter.installed, reason='klusta not installed')
class KlustaCommonTestSuite(SorterCommonTestSuite, unittest.TestCase):
SorterClass = KlustaSorter
if __name__ == '__main__':
KlustaCommonTestSuite().test_on_toy()
KlustaCommonTestSuite().test_several_groups()
KlustaCommonTestSuite().test_with_BinDatRecordingExtractor()
|
"""
parser.py
"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
def parse_args(description, defaults={}):
d = {'e': 0.1, 'f': '.3f', 'l': 0.1, 'n': 100}
for key, value in defaults.items():
d[key] = value
parser = ArgumentParser(
description=description,
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-e', '--error', metavar='ERR', type=float,
default=d['e'], help='error (noise) applied to targets')
parser.add_argument('-f', '--format', metavar='FMT', type=str,
default=d['f'], help='output float format ')
parser.add_argument('-l', '--lrate', metavar='LRT', type=float,
default=d['l'], help='learning rate')
parser.add_argument('-n', '--numsteps', metavar='N', type=int,
default=d['n'], help='number of steps to train')
parser.add_argument('-p', '--showplots', action='store_true',
help='show plots (requires matplotlib)')
parser.add_argument('-v', '--verbose', action='store_true',
help='enable verbose output')
args = parser.parse_args()
return args
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeLibsvm op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow
tensorflow.compat.v1.disable_eager_execution()
from tensorflow import errors # pylint: disable=wrong-import-position
from tensorflow import dtypes # pylint: disable=wrong-import-position
from tensorflow import sparse # pylint: disable=wrong-import-position
from tensorflow import test # pylint: disable=wrong-import-position
import tensorflow_io.libsvm as libsvm_io # pylint: disable=wrong-import-position
class DecodeLibsvmOpTest(test.TestCase):
"""DecodeLibsvmOpTest"""
def test_basic(self):
"""test_basic"""
with self.cached_session() as sess:
content = [
"1 1:3.4 2:0.5 4:0.231", "1 2:2.5 3:inf 5:0.503",
"2 3:2.5 2:nan 1:0.105"
]
sparse_features, labels = libsvm_io.decode_libsvm(
content, num_features=6)
features = sparse.to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [1, 1, 2])
self.assertAllClose(
features, [[0, 3.4, 0.5, 0, 0.231, 0], [0, 0, 2.5, np.inf, 0, 0.503],
[0, 0.105, np.nan, 2.5, 0, 0]])
def test_n_dimension(self):
"""test_n_dimension"""
with self.cached_session() as sess:
content = [["1 1:3.4 2:0.5 4:0.231", "1 1:3.4 2:0.5 4:0.231"],
["1 2:2.5 3:inf 5:0.503", "1 2:2.5 3:inf 5:0.503"],
["2 3:2.5 2:nan 1:0.105", "2 3:2.5 2:nan 1:0.105"]]
sparse_features, labels = libsvm_io.decode_libsvm(
content, num_features=6, label_dtype=dtypes.float64)
features = sparse.to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3, 2])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [[1, 1], [1, 1], [2, 2]])
self.assertAllClose(
features, [[[0, 3.4, 0.5, 0, 0.231, 0], [0, 3.4, 0.5, 0, 0.231, 0]], [
[0, 0, 2.5, np.inf, 0, 0.503], [0, 0, 2.5, np.inf, 0, 0.503]
], [[0, 0.105, np.nan, 2.5, 0, 0], [0, 0.105, np.nan, 2.5, 0, 0]]])
def test_dataset(self):
"""test_dataset"""
libsvm_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_libsvm", "sample")
dataset = libsvm_io.make_libsvm_dataset(
libsvm_file, num_features=6)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
sparse_features, labels = iterator.get_next()
features = sparse.to_dense(sparse_features, validate_indices=False)
with self.cached_session() as sess:
sess.run(init_op)
f, l = sess.run([features, labels])
self.assertAllEqual(l, [1])
self.assertAllClose(f, [[0, 3.4, 0.5, 0, 0.231, 0]])
f, l = sess.run([features, labels])
self.assertAllEqual(l, [1])
self.assertAllClose(f, [[0, 0, 2.5, np.inf, 0, 0.503]])
f, l = sess.run([features, labels])
self.assertAllEqual(l, [2])
self.assertAllClose(f, [[0, 0.105, np.nan, 2.5, 0, 0]])
with self.assertRaises(errors.OutOfRangeError):
sess.run([features, labels])
if __name__ == "__main__":
test.main()
|
# coding: utf-8
import sys
import grpc
import grpc_nlu_pb2
import grpc_nlu_pb2_grpc
if len(sys.argv) != 2:
print('Usage: python test_grpc.py "address:port"')
exit(1)
address = sys.argv[1]
channel = grpc.insecure_channel(address)
stub = grpc_nlu_pb2_grpc.RouteNLUStub(channel)
domains = stub.GetDomains(grpc_nlu_pb2.Empty())
print("***")
print("GetDomains:")
print("***")
print(domains)
text_to_nlu = grpc_nlu_pb2.TextToParse(
text="pizza à Rennes",
count=2,
lang="fr",
domain="POI")
def generate_to_nlu():
to_nlu = [text_to_nlu] * 2
to_nlu.append(grpc_nlu_pb2.TextToParse(
text="le café de la gare en bretagne",
count=2,
lang="fr",
domain="POI"))
for elem in to_nlu:
yield elem
parsed = stub.GetNLU(text_to_nlu)
print("***")
print("GetNLU:")
print("***")
print(parsed)
responses = stub.StreamNLU(generate_to_nlu())
print("***")
print("StreamNLU:")
print("***")
for response in responses:
print(response)
|
import os,re, math
from xml.dom.minidom import *
from svgelements import *
iconRoot = ".."
blockList = ["dot"]
for filename in os.listdir(iconRoot):
if not filename.endswith(".svg"):
continue
filePath = os.path.join(iconRoot, filename)
source = open(filePath,'r').read()
if 'viewBox="0 0 60 60"' in source and filename.replace(".svg","") not in blockList:
# update default viewBox
svg = SVG.parse(filePath)
viewBox = [math.ceil(v) for v in svg.bbox()]
x = int(viewBox[0])-1
y = int(viewBox[1])-1
w = math.ceil(viewBox[2]-x)
h = math.ceil(viewBox[3]-y)
source = re.sub(r'viewBox=".*?"',f'viewBox="{x} {y} {w} {h}"', source)
#remove comments
source = re.sub(r'<!--.*?-->',"", source)
dom = parseString(source)
#remove attributes
for attribute in ["id","data-name","enable-background","version","x","y","xml:space","title","xmlns:xlink","style"]:
try:
dom.firstChild.removeAttribute(attribute)
except:pass
#remove all titles
titles = dom.getElementsByTagName("title")
for title in titles:
print(title)
title.parentNode.removeChild(title)
#replace all colors
elements = []
elements.extend(dom.getElementsByTagName("path"))
elements.extend(dom.getElementsByTagName("rect"))
elements.extend(dom.getElementsByTagName("polygon"))
elements.extend(dom.getElementsByTagName("ellipse"))
elements.extend(dom.getElementsByTagName("circle"))
for path in elements:
if path.getAttribute("fill"):
path.setAttribute("fill","currentcolor")
source= dom.toxml()
print(filePath)
source = source.replace('<?xml version="1.0" ?>','')
f = open(filePath, 'w').write(source)
|
import urllib
def get_sentence_from_source(source):
# The next two lines are absolutely the most horrible lines I've written in five years!
direct = [{**t, "direct": True} for t in [{k: sentence[k] for k in ('text', 'id', 'lang')} for sentence in source[1]]]
indirect = [{**t, "direct": False} for t in [{k: sentence[k] for k in ('text', 'id', 'lang')} for sentence in source[2]]]
return {
"translations": direct + indirect,
"text": source[0]['text'],
"lang": source[0]['lang'],
"id": source[0]['id'],
}
def build_search_url_from_request_data(request_data):
searchText = request_data.get("text")
if searchText is None:
return None
params = {
"from": request_data.get("from", "und"),
"to": request_data.get("to", "und"),
"page": request_data.get("page", "1"),
"user": request_data.get("user", ""),
"orphans": request_data.get("orphans", "no"),
"unapproved": request_data.get("unapproved", "no"),
"has_audio": request_data.get("has_audio", ""),
"tags": request_data.get("tags", ""),
"list": request_data.get("list", ""),
"native": request_data.get("native", ""),
"trans_filter": request_data.get("trans_filter", "limit"),
"trans_to": request_data.get("trans_to") or request_data.get("to", "und"),
"trans_link": request_data.get("trans_link", ""),
"trans_user": request_data.get("trans_user", ""),
"trans_orphan": request_data.get("trans_orphan", ""),
"trans_unapproved": request_data.get("trans_unapproved", ""),
"trans_has_audio": request_data.get("trans_has_audio", ""),
"sort": request_data.get("sort", "relevance"),
"sort_reverse": request_data.get("sort_reverse", ""),
}
fragments = ["&{}={}".format(k, urllib.parse.quote(params[k])) for k in params]
url = "https://www.tatoeba.org/eng/sentences/search?query=" + urllib.parse.quote(searchText)
for f in fragments:
url += f
return url
|
#!/usr/bin/python
'''
Defines a non-blocking lock context for threading.Lock objects
'''
import time
class NBLockContext(object):
'''
Creates a context for threading.Lock objects, similar to the default but which
returns the result of a non-blocking acquisition, rather than blocking till acquiring
Example Usage:
# No timeout, try once and report
with NBLockContext(mylock) as acquired:
if acquired:
# do stuff
pass
else:
# don't do stuff (or realize the lock is NOT acquired)
pass
# With timeout, interval
with NBLockContext(mylock, timeout=5, sleep_interval=0.1) as acquired:
if acquired:
print 'Acquired within 5s'
else:
print 'Didn't lock within 5s'
'''
def __init__(self, lock, timeout=0, sleep_interval=0.01):
self.lock = lock
self.timeout = timeout
self.sleep_interval = sleep_interval
assert hasattr(self.lock, 'acquire')
assert hasattr(self.lock, 'release')
self.acquired = False
def __enter__(self):
'''
Enter method, returns the value given "as" in the with
'''
if self.timeout != 0:
start = time.time()
self.acquired = self.lock.acquire(False)
while (not self.acquired) and (self.timeout != 0) and (time.time() - start < self.timeout):
time.sleep(self.sleep_interval)
self.acquired = self.lock.acquire(False)
return self.acquired
def __exit__(self, type, value, traceback):
'''
Exit method, releases the lock if we acquired it
'''
if self.acquired:
self.lock.release()
self.acquired = False
|
from os.path import join
import json
import re
import string
from collections import OrderedDict
from markdown import Markdown
class Term(dict):
id = None
def __init__(self, *args, key="", **kwargs) -> None:
super().__init__(*args, **kwargs)
self.id = key
def set_item(self, item, data, lang="de", append=False):
if lang not in self:
self[lang] = {}
if append and item in self[lang]:
self[lang][item] += f" {data}"
else:
self[lang][item] = data
def merge(self, term):
for k, v in term.items():
if k not in self:
self[k] = v
class Glossary(dict):
DEF_RE = re.compile(r"^[ ]{0,3}\[[ ]*id=([^\]]*)[ ]*\]")
localized_glossaries = {}
def __init__(self, json_file=None):
super(Glossary, self).__init__()
if json_file:
self.parse_json(json_file)
def get_localized_glossary(self, lang):
glossary = OrderedDict((c, {}) for c in string.ascii_uppercase)
for t in self.values():
if lang in t:
term = t[lang]["term"]
glossary[term[0].upper()][term] = t[lang]["definition"]
return glossary
def parse_markdown(self, filename, lang="de"):
md = Markdown()
with open(filename, "r", encoding="utf-8") as fh:
lines = fh.readlines()
etree = md.parser.parseDocument(lines)
terms = []
current_term = None
for item in etree.iter():
if item.tag == "h2":
current_term = Term()
terms.append(current_term)
current_term.set_item("term", item.text.strip(), lang=lang)
elif item.tag == "p" and current_term:
m = self.DEF_RE.search(item.text)
if m:
current_term.id = m.groups()[0].strip()
else:
current_term.set_item(
"definition", item.text, append=True, lang=lang
)
for t in terms:
if t.id in self:
self[t.id].merge(t)
else:
self[t.id] = t
self.localized_glossaries[lang] = self.get_localized_glossary(lang)
def parse_json(self, filename):
with open(filename) as fh:
raw_data = json.load(fh)
languages = set()
for k, v in raw_data.items():
self[k] = Term(key=k, **v)
languages.update(v.keys())
for lang in languages:
self.localized_glossaries[lang] = self.get_localized_glossary(lang)
@staticmethod
def from_markdown(basedir="."):
g = Glossary()
for lang, fname in {'de': 'Glossar.md', 'en': 'Glossary.md'}.items():
g.parse_markdown(join(basedir, fname), lang=lang)
return g
if __name__ == "__main__":
g = Glossary.from_markdown()
with open("glossary.json", "w") as fh:
json.dump({k: g[k] for k in sorted(g)}, fh, indent=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.