blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
83d8f01b28464ec55edaaa39fe4cb19a0347229f | Python | xamroot/cryptopals | /set2/challenge10.py | UTF-8 | 1,008 | 2.859375 | 3 | [] | no_license | from Crypto.Cipher import AES
import base64
import sys
sys.path.insert(1, '../tools/')
import CBC as c
def xor(block0, block1):
ret = bytearray()
for (a,b) in zip(block0,block1):
ret.append(a^b)
return bytes(ret)
def cbc_encrypt(plainblocks, iv, cipher):
ret = []
prev_block = iv
for p in plainblocks:
prev_block = cipher.encrypt( xor(p, prev_block) )
ret.append(prev_block)
return ret
def cbc_decrypt(cipherblocks, iv, cipher):
ret = []
prev_block = iv
for c in cipherblocks:
print(c)
ret.append(xor(cipher.decrypt(c), prev_block))
prev_block = c
return ret
block_size = 16
key = "YELLOW SUBMARINE"
iv = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
cryptsys = AES.new(key, AES.MODE_ECB)
with open("10.txt") as fh:
ciphertext = base64.b64decode(fh.read())
'''
cipherblocks = [ciphertext[i:i+block_size] for i in range(0,len(ciphertext),block_size)]
print(cbc_decrypt(cipherblocks,iv,cryptsys))
'''
print(c.make_blocks(ciphertext, block_size)) | true |
17b62124b2e11e91054331ad4722edf4c8732306 | Python | shriki001/Operating-Systems | /Python/Class/ex3.py | UTF-8 | 1,711 | 3.90625 | 4 | [] | no_license | #%%--------------------------------------------------------------------------%%#
#ex3.1
lst1 = [int(x) for x in input("enter first series").split()]
lst2 = [int(x) for x in input("enter second series").split()]
if [x**2 for x in lst1] == lst2:
m_list = [x + y for x, y in zip(lst1, lst2)]
print(m_list)
################################################################################
#ex3.2
lst3 = [x for x in input("enter series").split()]
print(list(filter(lambda x: x.isdigit(), lst3)))
################################################################################
#ex3.3
lst4 = []
for x in range(1, 200):
if x % 35 == 0:
lst4 += [x]
for i, a in enumerate(lst4):
print(i + 1, a)
################################################################################
#ex3.4
def add(x, y): return x + y
def sub(x, y): return x - y
def mul(x, y): return x * y
def div(x, y): return x / y
def fdiv(x, y): return x // y
def mod(x, y): return x % y
def exp(x, y): return x ** y
funcs = [add, sub, mul, div, fdiv, mod, exp]
var1 = int(input("Enter your first number:"))
var2 = int(input("Enter your second number:"))
print("""‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐
C A L C U L A T I O N S
‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐
1. Addition
2. Subtraction
3. Multiplication
4. Division
5. Floor Division
6. Modulus
7. Exponent
‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐‐
""")
value = list(map(lambda x: x(var1, var2), funcs))
print(value)
#%%--------------------------------------------------------------------------%%#
| true |
e143bb92c6799bc8e8b73e47544df1cdfb433668 | Python | VanyaVoykova/SoftUni-Math-Concepts-For-Developers-February-2021 | /Training-Math-Codes/cryptography.py | UTF-8 | 1,113 | 3.515625 | 4 | [] | no_license | import matplotlib.pyplot as plt
from secrets import randbits
from sympy import Mul, factorint
import timeit
def get_bits(start_bit, end_bit, step=8):
return [bit for bit in range(start_bit, end_bit, step)]
def get_times(bits):
fact_times = []
mul_times = []
for bit in bits:
fact_param = randbits(bit)
mul_param = [k ** v for k, v in factorint(fact_param).items()]
fact_time = timeit.timeit(lambda: factorint(fact_param), "from sympy import factorint", number=1000)
mul_time = timeit.timeit(lambda: Mul(*mul_param), "from sympy import Mul", number=1000)
fact_times.append(fact_time)
mul_times.append(mul_time)
return fact_times, mul_times
def plot_result(bits, fact, mul):
plt.plot(bits, fact)
plt.plot(bits, mul)
plt.legend(["Factorization", "Multiplication"])
plt.xlabel("Bits")
plt.ylabel("Time[s]")
plt.grid()
plt.show()
bits_list = get_bits(8, 64)
factorizations, multiplications = get_times(bits_list)
plot_result(bits_list, factorizations, multiplications)
print(factorizations)
print(multiplications)
| true |
898519a17c84070ce82bd6a9d19211d1f57e0397 | Python | ramadnsyh/twitter-news-summarization | /tweet_summarization.py | UTF-8 | 3,458 | 2.71875 | 3 | [] | no_license | import os
from bs4 import BeautifulSoup
import requests
from requests_oauthlib import OAuth1
from dotenv import load_dotenv
from gensim.summarization import summarize, keywords
import argparse
load_dotenv()
def env_vars(request):
return os.environ.get(request, None)
def check_authentication():
auth = authentication()
url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
requests.get(url, auth=auth)
def authentication():
API_KEY = env_vars("API_KEY")
API_SECRET_KEY = env_vars("API_SECRET_KEY")
ACCESS_TOKEN = env_vars("ACCESS_TOKEN")
ACCESS_SECRET_TOKEN = env_vars("ACCESS_SECRET_TOKEN")
auth = OAuth1(API_KEY, API_SECRET_KEY, ACCESS_TOKEN, ACCESS_SECRET_TOKEN)
return auth
def get_user_timeline(username, total_tweet=10):
auth = authentication()
tweets = requests.get(
"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name={}&count={}".format(username, total_tweet),
auth=auth
)
return tweets.json()
def news_scrapper(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
# Get headline
title = soup.find('h1').get_text()
# Get body news
p_tags = soup.find_all('p')
p_tags_text = [tag.get_text().strip() for tag in p_tags]
sentence_list = [sentence for sentence in p_tags_text if not '\n' in sentence]
sentence_list = [sentence for sentence in sentence_list if '.' in sentence]
article = ' '.join(sentence_list)
return title, article
def retweet_tweet(id_str):
auth = authentication()
retweet = requests.post(
"https://api.twitter.com/1.1/statuses/retweet/{}.json".format(id_str),
auth=auth
)
return retweet.json()
def reply_tweet(user_mention, body, tweet_id):
auth = authentication()
return requests.post("https://api.twitter.com/1.1/statuses/update.json", auth=auth, data={
"status": "@{} {}".format(user_mention, body),
"in_reply_to_status_id": tweet_id,
}).json()
def main():
try:
parser = argparse.ArgumentParser(
description='Twitter news summarization',
prog='PROG', conflict_handler='resolve'
)
parser.add_argument('username', metavar='U', type=str,
help='Tweet username that you want to post')
parser.add_argument('--count', type=int, default=1, nargs='?',
help='total tweets you want to repost')
args = parser.parse_args()
check_authentication()
tweets = get_user_timeline(args.username, total_tweet=args.count)
for tweet in tweets:
try:
_, article = news_scrapper(tweet["entities"]["urls"][0]["url"])
retweet = retweet_tweet(id_str=tweet["id_str"])
id_str = retweet["id_str"]
user_mention = retweet["entities"]["user_mentions"][0]["screen_name"]
summarization = summarize(article, split=True)
for summary in summarization:
reply = reply_tweet(user_mention=user_mention, body=summary, tweet_id=id_str)
id_str = reply["id_str"]
user_mention = reply["entities"]["user_mentions"][0]["screen_name"]
break
except:
pass
except BaseException as e:
print(e)
pass
if __name__ == "__main__":
main()
| true |
55df9b47747a5c42869b00b28227553ececc7704 | Python | alexmereuta/Lab_SI | /Lab1_SI/clientUDP.py | UTF-8 | 311 | 2.859375 | 3 | [] | no_license | import socket
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
MESSAGE = "This is a UDP message!"
print ("UDP target IP:", UDP_IP)
print ("UDP targer port:", UDP_PORT)
print ("message:", MESSAGE)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP
s.sendto(MESSAGE.encode('utf-8'), (UDP_IP, UDP_PORT))
| true |
a3535a3946e13aadfa53c2ca7bca79e8576e75d7 | Python | twathit/algorithm | /changes.py | UTF-8 | 2,152 | 3.578125 | 4 | [] | no_license | #钱币找零:假设用于找零的钱币包括四种:25美分,10美分,5美分,1美分,求给用户找回数目最少的钱币。
#方法一
def recMC(coinValueList,changes):
minCoins=changes
if changes in coinValueList:
return 1
else:
for i in [c for c in coinValueList if c <= changes]:
numcoins = 1+ recMC(coinValueList,changes-i)
if numcoins < minCoins:
minCoins = numcoins
return minCoins
if __name__ =='__main__':
print(recMC([1,5,10,25],63))
#方法二:加备忘录装饰器
def memo(f):
memo={}
def wrapper(L,x):
if x not in memo:
memo[x]=f(L,x)
return memo[x]
return wrapper
@memo
def recMC(coinValueList,changes):
minCoins=changes
if changes in coinValueList:
return 1
else:
for i in [c for c in coinValueList if c <= changes]:
numcoins = 1+ recMC(coinValueList,changes-i)
if numcoins < minCoins:
minCoins = numcoins
return minCoins
if __name__ =='__main__':
print(recMC([1,5,10,25],63))
#方法三:自底向上
def dpMakeChange(coinValueList,changes):
minCoins={}
for cents in range(changes+1):
coinCount=cents
for i in [c for c in coinValueList if c <= cents]:
if minCoins[cents-i]+1<coinCount:
coinCount=minCoins[cents-i]+1
minCoins[cents]=coinCount
return minCoins[changes]
if __name__ =='__main__':
print(dpMakeChange([1,5,10,25],63))
#扩展:同时输出需要哪些钱币
def dpMakeChange(coinValueList,changes):
minCoins={}
coinUsed={}
newcoin=coinValueList[0]
for cents in range(changes+1):
coinCount=cents
for i in [c for c in coinValueList if c <= cents]:
if minCoins[cents-i]+1<coinCount:
coinCount=minCoins[cents-i]+1
newcoin=i
minCoins[cents]=coinCount
coinUsed[cents]=newcoin
return minCoins[changes],coinUsed
def printCoins(coinUsed,changes):
coin=changes
thisCoin=[]
while coin>0:
thisCoin.append(coinUsed[coin])
coin=coin-thisCoin[-1]
print(thisCoin)
if __name__=='__main__':
coinCount,coinUsed=dpMakeChange([1,5,10,25],63)
print('The minimum coins are:',coinCount)
print('They are:')
printCoins(coinUsed,63) | true |
22aa0377be716b0ab13183a1e89a533d223efa50 | Python | QTtrash/insta-pie-bot | /webapp/Utilities.py | UTF-8 | 470 | 2.734375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | # Random comments, that i copied from Instagram, bot needs to blend in
comments = ['Damn, son, where did you find this?',
'Ayyyyy lmao, n1, n1, dude',
'What in the flying frick is that even suppose to mean?',
'Hello, officer, arrogant people wildin',
'True, it really do be like that',
'Yes, I do agree with this sentiment',
'No, I, in fact, don\'t agree',
'Officer, this one right here']
| true |
6e8b9ce0f5226bf6b2dfbcdaec4db1f691c00f95 | Python | coding1617/Word-Search | /instructions.py | UTF-8 | 2,212 | 3.484375 | 3 | [] | no_license | from tkinter import *
class Instruction_page(Frame):
def __init__(self, master, return_home):
"""Initialize Frame."""
self.return_home = return_home
super(Instruction_page, self).__init__(master, background = "mistyrose")
master.title("Instructions Page")
self.grid()
self.create_widgets()
def create_widgets(self):
Label(self, text="").grid(row=0, column=0)
self.story_txt = Text(self, font="Verdana 15 bold", fg="Crimson", bg = "mistyrose", width=60, height=28, wrap=WORD)
self.story_txt.grid(row=0, column=1, columnspan=5)
instructions = ("Welcome to Phoenix Word Search!!!\n\n"
"Here you can challenge your brain by playing word search.\n\n"
"You can choose from three levels -- easy, medium, and hard.\n\n"
"When you find a word, replace all the letters of the word in the word search with an "
"asterisk [*].\n\nThen press the check button to see if you've found a word!\n\n"
"If time is running out or you are desperate, you can replace a single letter with an asterisk and if that "
"letter is in any of the words, it will turn lowercase.\n\n"
"Bonus! All three word searches have a theme of phoenixes and fire!\n\n"
"If you change a letter that's not part of a word to an asterisk, then when \nthe word search "
"resets, it will change to a different random letter. \n\nDon't get confused!\n\n"
"When you're ready, click the 'home page' button to return to the home screen and "
"start your puzzle!\n\n"
"Can you beat the clock and solve them all?"
)
self.story_txt.delete(0.0, END)
self.story_txt.insert(0.0, instructions)
self.home_bttn = Button(self, text="home page", command=self.back_to_home)
Button(self, text="Home Page",
font="fixedsys 20", fg="light gray",
bg="maroon",
command=self.back_to_home
).grid(row=4, column=1, sticky=W)
def back_to_home(self):
self.return_home() | true |
774d6789cb107c790b057320f8b235d0dc535e5e | Python | susu25/DouBanTV | /spider/parse.py | UTF-8 | 493 | 2.65625 | 3 | [] | no_license | import time
from retrying import retry
from config import SPIDER_HEADERS
import requests
@retry(stop_max_attempt_number=3)
def _parse_url(url):
response = requests.get(url,timeout=5,headers = SPIDER_HEADERS)
assert response.status_code == 200
return response.content.decode()
def parse_url(url):
print("now parseing",url)
try:
time.sleep(1)
html_str = _parse_url(url)
except Exception as e:
print(e)
html_str = None
return html_str | true |
d236624f8d7eb91c223d8ebf1e62658e6995564b | Python | Jmyerzzz/lmu-artificial-intelligence | /HW1/Pathfinder.py | UTF-8 | 5,958 | 3.546875 | 4 | [] | no_license | '''
The Pathfinder class is responsible for finding a solution (i.e., a
sequence of actions) that takes the agent from the initial state to all
of the goals with optimal cost.
This task is done in the solve method, as parameterized
by a maze pathfinding problem, and is aided by the SearchTreeNode DS.
Jackson Myers
'''
import unittest
from queue import PriorityQueue
from MazeProblem import MazeProblem
from SearchTreeNode import SearchTreeNode
def total_cost(current, child):
return current.totalCost + child
def heuristic(current, goals):
distances = []
for goal in goals:
distances.append(abs(current[0]-goal[0])+abs(current[1]-goal[1]))
return min(distances)
def is_goal(state, goals):
return goals.count(state) > 0
def get_actions(current, path_root):
actions = []
while current.parent is not path_root.parent:
actions.insert(0, current.action)
current = current.parent
if current.parent is None:
break
return actions
def solve(problem, initial, goals):
frontier = PriorityQueue()
root = SearchTreeNode(initial, None, None, 0, heuristic(initial, goals))
path_root = root
frontier.put(root)
closed_list = {}
actions = []
while not frontier.empty():
current = frontier.get()
if is_goal(current.state, goals):
goals.remove(current.state)
actions.extend(get_actions(current, path_root))
path_root = current
if not goals:
return actions
frontier.queue.clear()
closed_list.clear()
closed_list[current.state] = 1
for node in problem.transitions(current.state):
if node[2] not in closed_list:
child = SearchTreeNode(node[2], node[0], current, total_cost(current, node[1]), heuristic(node[2], goals))
frontier.put(child)
return None
class PathfinderTests(unittest.TestCase):
def test_maze1(self):
maze = ["XXXXXXX",
"X.....X",
"X.M.M.X",
"X.X.X.X",
"XXXXXXX"]
problem = MazeProblem(maze)
initial = (1, 3)
goals = [(5, 3)]
soln = solve(problem, initial, goals)
(soln_cost, is_soln) = problem.soln_test(soln, initial, goals)
self.assertTrue(is_soln)
self.assertEqual(soln_cost, 8)
def test_maze2(self):
maze = ["XXXXXXX",
"X.....X",
"X.M.M.X",
"X.X.X.X",
"XXXXXXX"]
problem = MazeProblem(maze)
initial = (1, 3)
goals = [(3, 3),(5, 3)]
soln = solve(problem, initial, goals)
(soln_cost, is_soln) = problem.soln_test(soln, initial, goals)
self.assertTrue(is_soln)
self.assertEqual(soln_cost, 12)
def test_maze3(self):
maze = ["XXXXXXX",
"X.....X",
"X.M.MMX",
"X...M.X",
"XXXXXXX"]
problem = MazeProblem(maze)
initial = (5, 1)
goals = [(5, 3), (1, 3), (1, 1)]
soln = solve(problem, initial, goals)
(soln_cost, is_soln) = problem.soln_test(soln, initial, goals)
self.assertTrue(is_soln)
self.assertEqual(soln_cost, 12)
def test_maze4(self):
maze = ["XXXXXXX",
"X.....X",
"X.M.XXX",
"X...X.X",
"XXXXXXX"]
problem = MazeProblem(maze)
initial = (5, 1)
goals = [(5, 3), (1, 3), (1, 1)]
soln = solve(problem, initial, goals)
self.assertTrue(soln == None)
def test_maze5(self):
maze = ["XXXXXXX",
"X...X.X",
"X.XXXMX",
"X.MM.MX",
"XXXXXXX"]
problem = MazeProblem(maze)
initial = (1, 3)
goals = [(3,1), (5,1), (4,3)]
soln = solve(problem, initial, goals)
(soln_cost, is_soln) = problem.soln_test(soln, initial, goals)
self.assertTrue(is_soln)
self.assertEqual(soln_cost, 22)
def test_maze6(self):
maze = ["XXXXXXXXXXX",
"X..MMM.MX.X",
"X.X.XXX.X.X",
"X...XXX...X",
"X....MX.XXX",
"X.M.XXX...X",
"X...M...X.X",
"XXXXXXXXXXX"]
problem = MazeProblem(maze)
initial = (9, 6)
goals = [(6, 1), (3, 5), (9, 1)]
soln = solve(problem, initial, goals)
(soln_cost, is_soln) = problem.soln_test(soln, initial, goals)
self.assertTrue(is_soln)
self.assertEqual(soln_cost, 31)
def test_maze7(self):
maze = ["XXXXXXXXXXX",
"X.X..MX...X",
"X.XMM.MM.XX",
"XM..XMM.X.X",
"X.X....MX.X",
"X..MX.X...X",
"X.M..X....X",
"XXXXXXXXXXX"]
problem = MazeProblem(maze)
initial = (5, 2)
goals = [(9, 1), (9, 6), (1, 1), (4, 6), (5, 5)]
soln = solve(problem, initial, goals)
(soln_cost, is_soln) = problem.soln_test(soln, initial, goals)
self.assertTrue(is_soln)
self.assertEqual(soln_cost, 55)
def test_maze8(self):
maze = ["XXXXXXXXXXX",
"X.X..MX...X",
"X.XMM.MM.XX",
"XM..XMM.X.X",
"X.X....MX.X",
"X..MX.X..XX",
"X.M..X..X.X",
"XXXXXXXXXXX"]
problem = MazeProblem(maze)
initial = (5, 2)
goals = [(9, 1), (9, 6), (1, 1), (4, 6), (5, 5)]
soln = solve(problem, initial, goals)
self.assertTrue(soln == None)
if __name__ == '__main__':
unittest.main()
| true |
5786135e7d7068575b1dfa2fbe80b1c053491b65 | Python | FujitaHirotaka/djangoruler3 | /examples/django/応用/簡易アップローダー/project/media/広島/_iterator.py | UTF-8 | 447 | 4.09375 | 4 | [] | no_license | class MyIterator(object):
def __init__(self, *numbers):
self._numbers=numbers
self._i=0
def __iter__(self):
return self
def __next__(self):
if self._i==len(self._numbers):
raise StopIteration()
value=self._numbers[self._i]
self._i+=1
return value
my_iterator=MyIterator(10,20,30)
print(my_iterator)
for num in my_iterator:
print("hello %d" % num) | true |
f120141098feb6b58c81d25c72ea932d4fa15063 | Python | simonsny/challenge-card-game-becode | /utils/game.py | UTF-8 | 2,756 | 4.03125 | 4 | [] | no_license | from utils.player import Player
from utils.deck import Deck
class Board:
def __init__(self, players: list = None):
"""
:param players: List of players that will play the game. Can be added later.
"""
if players:
self.players = players
else:
self.players = []
self.turn_count = 0
self.history_cards = []
self.active_cards = []
def start_game(self, players: list = None):
"""
:param players: List of players that will play the game.
If not defined here of when creating the board, the game will ask for input.
Method of Board that starts the game.
The game asks for the input of all players if not already entered previously.
Next it fills the deck, shuffles and randomly distributes the cards to the players.
Then the game will make each player play one card per turn, until no more cards are left.
It is possible for some players to have one turn more then others.
"""
if not players:
if not self.players:
self.input_players()
else:
self.players = players
self.create_deck()
self.deck.fill_and_distribute(self.players)
i = 0
while len(self.history_cards) < 52:
print(f'\n********** BEGINNING OF ROUND {self.turn_count} **********\n')
print('Turn count:', self.turn_count)
i+= 1
for player in self.players:
player.play()
self.active_cards.append(player.active_card)
print(player)
print('\n')
print(f'Active cards: {self.active_cards}')
#print('\n\n')
self.turn_count += 1
self.history_cards.extend(self.active_cards)
self.active_cards = []
print(f'\n************* END OF ROUND {self.turn_count-1} *************\n')
def create_deck(self):
"""
Function that creates a deck inside the board
"""
self.deck = Deck()
def input_players(self):
"""
Asks for the users to input the players' names and puts them in a the players list.
"""
print("Please input all players here one by one.\nWe need at least 2 players and max 52. \
\nPress 'Enter' in a blank feeld to start the game.")
i = 1
while i <= 52:
input_string = input(f'Player {i}: ')
print()
if input_string == "":
if len(self.players) < 2:
print("Please enter a name.")
continue
break
self.players.append(Player(input_string))
i += 1
| true |
870631772c54762d80469ac1004f93b5c652c647 | Python | Yonimdo/Python-Intro | /ListComprehension/42/42.py | UTF-8 | 589 | 3.671875 | 4 | [] | no_license | def word_lengths(s):
# ==== YOUR CODE HERE ===
# =======================
return [len(w) for w in s.split()]
def max_word_length(s):
# ==== YOUR CODE HERE ===
# =======================
return max(word_lengths(s))
result = word_lengths("Contrary to popular belief Lorem Ipsum is not simply random text")
print("Result:", result)
assert result == [8, 2, 7, 6, 5, 5, 2, 3, 6, 6, 4]
print("OK")
result = max_word_length("Contrary to popular belief Lorem Ipsum is not simply random text")
print("Result:", result)
assert result == 8
print("OK")
| true |
aa332da03a0d41a15cd6a834aed451b5e346a7ec | Python | qh96/leetcode | /solutions/807.custom-sort-string/custom-sort-string.py | UTF-8 | 530 | 2.890625 | 3 | [] | no_license | class Solution:
def customSortString(self, S, T):
"""
:type S: str
:type T: str
:rtype: str
"""
d = {}
set_S = set(S)
ans = ''
for i in T:
if i in d:
d[i] += 1
else:
d[i] = 1
# print(d)
for i in S:
if i in d:
for _ in range(d[i]):
ans += i
for i in T:
if i not in set_S:
ans += i
return ans | true |
715577069ed1c3c3e2979ecee1f3fcefee2c6700 | Python | MacRayy/exam-trial-basics | /countas/count-as.py | UTF-8 | 742 | 3.90625 | 4 | [] | no_license | # Create a function that takes a filename as string parameter,
# counts the occurances of the letter "a", and returns it as a number.
# If the file does not exist, the function should return 0 and not break.
# print(count_as("afile.txt")) # should print 28
# print(count_as("not-a-file")) # should print 0
def count_as(file_name):
try:
f = open(file_name, "r")
text = f.read()
a_counter = 0
for letter in text:
if letter == "a" or letter == "A":
a_counter += 1
return a_counter
except FileNotFoundError:
return 0
print(count_as("/Users/MrFox/OneDrive/greenfox/exam-trial-basics/countas/afile.txt"))
print(count_as("no_such_file.txt"))
| true |
1ea04101db67ca546aa2ff63e6d3f713590b70b9 | Python | mdhiggins/ardsnet-calculator | /ardsnet.py | UTF-8 | 6,003 | 2.953125 | 3 | [
"MIT"
] | permissive | import enum
class Gender(enum.Enum):
Male = "male"
Female = "female"
class Patient():
__PBW_BASE_VALUE__ = {
Gender.Male: 50.0,
Gender.Female: 45.5,
}
def __init__(self, gender, height):
self.gender = gender
self.height = height
@property
def pbw(self):
return self.__PBW_BASE_VALUE__.get(self.gender) + (2.3 * (self.height - 60))
class Vent():
def __init__(self, vt, rr, fio2, peep):
self.vt = vt
self.rr = rr
if fio2 > 1:
fio2 = fio2 / 100
self.fio2 = fio2
self.peep = peep
def minuteVentilation(self, patient):
return self.vt * patient.pbw * self.rr
def getVtByWeight(self, patient):
return self.vt / patient.pbw
def setVtByWeight(self, vt, patient):
self.vt = vt / patient.pbw
def __str__(self):
return "%0.02fml/kg %d %0.0f%% +%d" % (self.vt, self.rr, self.fio2 * 100, self.peep)
@property
def fio2String(self):
return "%0.0f%%" % (self.fio2 * 100)
def __eq__(self, other):
if isinstance(other, Vent):
return self.vt == other.vt and self.rr == other.rr and self.fio2 == other.fio2 and self.peep == other.peep
return False
class ARDSNet():
__PPLAT_MAX__ = 30
__PPLAT_MIN__ = 25
__RR_MIN__ = 8
__RR_MAX__ = 35
__PH_GOAL_MAX__ = 7.45
__PH_GOAL_MIN__ = 7.30
__PH_MIN__ = 7.15
__VT_MIN__ = 4
__VT_MAX__ = 8
__VT_GOAL__ = 6
__PAO2_MIN__ = 55
__PAO2_MAX__ = 80
__SPO2_MIN__ = 89
__SPO2_MAX__ = 95
__PEEP_DELTA_MAX__ = 2
__FIO2_DELTA_MAX__ = 0.2
__LOWER_PEEP_HIGHER_FIO2__ = [
(0.3, 5),
(0.4, 5),
(0.4, 8),
(0.5, 8),
(0.5, 10),
(0.6, 10),
(0.7, 10),
(0.7, 12),
(0.7, 14),
(0.8, 14),
(0.9, 14),
(0.9, 16),
(0.9, 18),
(1.0, 18),
(1.0, 20),
(1.0, 22),
(1.0, 24)
]
__HIGHER_PEEP_LOWER_FIO2__ = [
(0.3, 5),
(0.3, 8),
(0.3, 10),
(0.3, 12),
(0.3, 14),
(0.4, 14),
(0.4, 16),
(0.5, 16),
(0.5, 18),
(0.5, 20),
(0.6, 20),
(0.7, 20),
(0.8, 20),
(0.8, 22),
(0.9, 22),
(1.0, 22),
(1.0, 24)
]
__SPO2_TO_PAO2__ = {
89: 56.0,
90: 58.0,
91: 60.0,
92: 64.0,
93: 68.0,
94: 73.0,
95: 80.0,
}
def __init__(self, vent):
self.vent = vent
@staticmethod
def spo2ToPaO2(spo2):
if not spo2:
return None
spo2 = int(spo2)
if spo2 <= ARDSNet.__SPO2_MIN__:
return ARDSNet.__PAO2_MIN__ - 1
if spo2 >= ARDSNet.__SPO2_MAX__:
return ARDSNet.__PAO2_MAX__ + 1
return ARDSNet.__SPO2_TO_PAO2__.get(spo2)
def adjustVent(self, ph=None, o2=None, pplat=None, hp=False):
new = Vent(self.vent.vt, self.vent.rr, self.vent.fio2, self.vent.peep)
if o2:
lphf, hplf = self.adjustByPaO2(o2, self.vent.fio2, self.vent.peep)
new.fio2, new.peep = hplf if hp else lphf
if pplat:
new.vt = self.adjustByPplat(pplat, self.vent.vt)
if ph:
new.vt, new.rr = self.adjustBypH(ph, new.vt, self.vent.rr)
if round(new.vt) > self.__VT_GOAL__ and self.vent.vt == new.vt and (not ph or ph > self.__PH_MIN__):
new.vt = round(new.vt) - 1
self.vent = new
return new
def adjustByPplat(self, pplat, vt):
if pplat > self.__PPLAT_MAX__:
vt = round(vt) - 1
elif pplat < self.__PPLAT_MIN__ and vt < 6:
vt = round(vt) + 1
if vt > self.__VT_MAX__:
vt = self.__VT_MAX__
elif vt < self.__VT_MIN__:
vt = self.__VT_MIN__
return vt
def adjustBypH(self, pH, vt, rr):
if pH < self.__PH_MIN__:
if self.vent.rr < self.__RR_MAX__:
rr = self.__RR_MAX__
else:
rr = self.__RR_MAX__
vt = round(vt) + 1
if vt > self.__VT_MAX__:
vt = self.__VT_MAX__
elif pH < self.__PH_GOAL_MIN__:
rr = rr + 2
if rr > self.__RR_MAX__:
rr = self.__RR_MAX__
elif pH > self.__PH_GOAL_MAX__:
rr = rr - 2
return vt, rr
def adjustBySpO2(self, spo2, fio2, peep):
pao2 = self.spo2ToPaO2(spo2)
return self.adjustByPaO2(pao2, fio2, peep)
def adjustByPaO2(self, pao2, fio2, peep):
lphf = (fio2, peep)
hplf = (fio2, peep)
if pao2 < self.__PAO2_MIN__:
ll = [x for x in self.__LOWER_PEEP_HIGHER_FIO2__ if (x[0] >= fio2 and x[1] > peep) or (x[0] > fio2 and x[1] >= peep)]
lh = [x for x in self.__HIGHER_PEEP_LOWER_FIO2__ if (x[0] >= fio2 and x[1] > peep) or (x[0] > fio2 and x[1] >= peep)]
if len(ll) > 0:
lphf = ll[0]
if len(lh) > 0:
hplf = lh[0]
elif pao2 > self.__PAO2_MAX__:
hl = [x for x in self.__LOWER_PEEP_HIGHER_FIO2__[::-1] if (x[0] <= fio2 and x[1] < peep) or (x[0] < fio2 and x[1] <= peep)]
hh = [x for x in self.__HIGHER_PEEP_LOWER_FIO2__[::-1] if (x[0] <= fio2 and x[1] < peep) or (x[0] < fio2 and x[1] <= peep)]
if len(hl) > 0:
lphf = self.changeLimiter(hl[0], fio2, peep)
if len(hh) > 0:
hplf = self.changeLimiter(hh[0], fio2, peep)
return lphf, hplf
def changeLimiter(self, pair, fio2, peep):
newfio2, newpeep = pair
if fio2 - newfio2 > self.__FIO2_DELTA_MAX__:
newfio2 = fio2 - self.__FIO2_DELTA_MAX__
if peep - newpeep > self.__PEEP_DELTA_MAX__:
newpeep = peep - self.__PEEP_DELTA_MAX__
if newpeep == 6:
newpeep = 5
return newfio2, newpeep
| true |
daf62555433e2d4fcd82423f7585ec0abdb4a8fb | Python | yamlfullsan/cursopython | /condicional.py | UTF-8 | 226 | 3.609375 | 4 | [] | no_license | print("Programa de evaluación")
nota_alumno=input("Introduce la nota: ")
def evaluacion(nota):
valoracion="aprobado"
if nota<5:
valoracion="suspenso"
return valoracion
print(evaluacion(int(nota_alumno)))
| true |
68bafc365711610eb56fcd7122baaa2f8c6dcc7e | Python | vinayakushakola/Patterns | /2. NumberPatterns.py | UTF-8 | 2,314 | 3.8125 | 4 | [] | no_license | print("Pattern 1")
for i in range(1, 5):
for j in range(i):
print(f'{i} ', end="")
print()
print("Pattern 2")
for i in range(1, 5):
for j in range(i):
print(f'{j+1} ', end="")
print()
print("Pattern 3")
for i in range(1,6):
for j in range(5-i):
print(end=" ")
for k in range(i):
print(f'{i} ', end="")
print()
space = 1
for i in range(4,0,-1):
for j in range(space):
print(end=" ")
for k in range(i):
print(f'{i} ',end="")
print()
space += 1
print()
print("Pattern 4")
number = 1
for i in range(1,5):
for j in range(i):
print(f'{number} ', end="")
number += 1
print()
print("Pattern 5")
number = 1
for i in range(1,5):
for j in range(i):
print(f'{number} ', end="")
number += 1
print()
number2 = 4
number3 = 2
number4 = 1
for i in range(1,4):
for j in range(4-i):
if i == 1:
print(f'{number2} ', end="")
number2 += 1
elif i==2:
print(f'{number3} ', end="")
number3 += 1
elif i == 3:
print(f'{number4} ', end="")
print()
print("Pattern 6")
number = 1
for i in range(1,5):
for k in range(4-i):
print(end=" ")
for j in range(i):
print(f'{number} ', end="")
number += 1
print()
number2 = 4
number3 = 2
number4 = 1
for i in range(1,4):
for k in range(i):
print(end=" ")
for j in range(4-i):
if i == 1:
print(f'{number2} ', end="")
number2 += 1
elif i==2:
print(f'{number3} ', end="")
number3 += 1
elif i == 3:
print(f'{number4} ', end="")
print()
print("Pattern 7")
number = 1
for i in range(1,5):
for k in range(4-i):
print(end=" ")
for j in range(i):
print(f' {number} ', end="")
number += 1
print()
number2 = 4
number3 = 2
number4 = 1
for i in range(1,4):
for k in range(i):
print(end=" ")
for j in range(4-i):
if i == 1:
print(f' {number2} ', end="")
number2 += 1
elif i==2:
print(f' {number3} ', end="")
number3 += 1
elif i == 3:
print(f' {number4} ', end="")
print()
print("Pattern 8")
| true |
92e89899ddaeeadb607798814185ff1872e80914 | Python | shahineb/archives1819 | /reinforcement-learning/HWK1/1_Dynamic_Programming/utils.py | UTF-8 | 627 | 3.078125 | 3 | [] | no_license | import numpy as np
def bellman_operator(r, P, V, gamma):
"""Computes Bellman Operator application on V
Parameters
----------
x : int
state index
r : numpy.array
reward matrix (n_states_, n_actions_)
P : numpy.array
transition probability matrix
(n_states_, n_actions_, n_states_)
V : numpy.array
state value function vector (n_states_,)
gamma : float
discount factor
Returns
-------
float
bellman_operator applied to V
"""
foo = r + gamma*np.sum(P*V, axis=2)
max_value = np.max(foo, axis=1)
return max_value
| true |
88ed2453ff90332e0f1c68cb537a1031bacf11b5 | Python | mahdifarhang/DA_CAs | /ca2/q2.py | UTF-8 | 550 | 2.96875 | 3 | [] | no_license | n, m = [int(x) for x in raw_input().split()]
array = [[int(x), 0] for x in raw_input().split()]
temp = [int(x) for x in raw_input().split()]
for i in xrange(n):
array[i][1] = temp[i]
rooms = []
for i in xrange(m):
rooms.append([0, 0])
sorted_list = sorted(array, key=lambda x: x[0])
flag = True
for i in xrange(n):
for j in xrange(m):
if (sorted_list[i][0] >= rooms[j][1]):
rooms[j][0] = sorted_list[i][0]
rooms[j][1] = sorted_list[i][1]
break
elif (j == m - 1):
print(0)
flag = False
if not(flag):
break
if (flag):
print(1) | true |
71dd9b29b8044832face453ea3c46a0acd5f9922 | Python | Fredooooooo/incubator-iotdb | /importerCSV-py/src/utils/RowRecord.py | UTF-8 | 978 | 2.59375 | 3 | [
"Apache-2.0",
"EPL-1.0",
"MIT",
"BSD-3-Clause",
"CDDL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | from IoTDBConstants import TSDataType
from Field import Field
class RowRecord(object):
def __init__(self, timestamp, field_list=None):
self.__timestamp = timestamp
self.__field_list = field_list
def add_field(self, field):
self.__field_list.append(field)
def add_field(self, value, data_type):
self.__field_list.append(Field.get_field(value, data_type))
def __str__(self):
str_list = [str(self.__timestamp)]
for field in self.__field_list:
str_list.append("\t\t")
str_list.append(str(field))
return "".join(str_list)
def get_timestamp(self):
return self.__timestamp
def set_timestamp(self, timestamp):
self.__timestamp = timestamp
def get_fields(self):
return self.__field_list
def set_fields(self, field_list):
self.__field_list = field_list
def set_field(self, index, field):
self.__field_list[index] = field
| true |
ed2a5fa7b84ef49ec8b7121a664994a5f9aa4e5c | Python | bgants/vagrantProjects | /spark/testPythonContext.py | UTF-8 | 575 | 2.625 | 3 | [] | no_license | from __future__ import division
from pyspark import SparkConf, SparkContext
import sys
conf = SparkConf().setMaster("local").setAppName("My App")
sc = SparkContext(conf = conf)
autoData = sc.textFile("/vagrant/autos.csv")
autoCount = autoData.count()
diesels = autoData.filter(lambda line: "diesel" in line)
dieselCount = diesels.count()
print("Total autos {} ".format(autoCount) )
print("Total diesels {} ".format(dieselCount) )
dieselPercentage = ((dieselCount/autoCount) * 100)
print("Percentage of diesels to autos is {} ".format(dieselPercentage) )
sys.exit()
| true |
745046ba178a80a3afd01a2a2cb608008f4013df | Python | gauravaror/programming | /calcEqn.py | UTF-8 | 1,107 | 2.96875 | 3 | [] | no_license | from collections import defaultdict
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
hh = defaultdict(dict)
for eqn,val in zip(equations, values):
a,b = eqn
hh[a][b] = val
hh[b][a] = 1/val
output = []
def bfs(curr, target):
stack = [(1, curr)]
seen = set()
seen.add(curr)
while len(stack) > 0:
currval, elem = stack.pop()
if elem not in hh:
return -1
elif elem == target:
return currval
for key,val in hh[elem].items():
if key not in seen:
seen.add(key)
stack.append((currval*val, key))
return -1
for q1,q2 in queries:
if q1 not in hh or q2 not in hh:
output.append(-1)
continue
output.append(bfs(q1, q2))
return output
| true |
4015da9a11ffac7f2fafb3e70b52b19a7096259e | Python | BrandonKirklen/randomProjects | /AdventOfCode2018/day1/day1.py | UTF-8 | 1,370 | 3.640625 | 4 | [] | no_license | #!/usr/bin/python
import unittest
def freqCalc(input):
return sum(input)
def freqDouble(input):
seenFreqs = {0}
currentSum = 0
found = False
while not found:
for x in input:
currentSum += x
if currentSum in seenFreqs:
found = True
break
else:
seenFreqs.add(currentSum)
# print("Seen Freqs:" + str(seenFreqs))
# print("currentSum:" + str(currentSum))
return currentSum
class MyTest(unittest.TestCase):
def test_calcs(self):
self.assertEqual(freqCalc([1,1,1]), 3)
self.assertEqual(freqCalc([1,1,-2]), 0)
self.assertEqual(freqCalc([-1,-2,-3]), -6)
def test_doubles(self):
self.assertEqual(freqDouble([1, -1]), 0)
self.assertEqual(freqDouble([3,3,4,-2,-4]), 10)
self.assertEqual(freqDouble([-6,3,8,5,-6]), 5)
self.assertEqual(freqDouble([7,7,-2,-7,-4]), 14)
def main():
with open("/Users/brandonkirklen/Code/Personal/randomProjects/AdventOfCode2018/day1/input.txt") as f:
content = []
for line in f:
if line.strip():
content.append(int(line))
print("Freq: " + str(freqCalc(content)))
print("Freq at First Double: " + str(freqDouble(content)))
if __name__ == '__main__':
# unittest.main()
main()
| true |
734226c0713e8c41a09405dacd398f0c6a0eb9b8 | Python | Mongoos/Hello-python-projects | /random_codes/Battleship.py | UTF-8 | 206 | 2.765625 | 3 | [] | no_license | import numpy as np
import random as rn
def generate_your_board(board):
"""asks user to place their battleships on their generated board."""
your_board = np.zeros(shape=(10,10))
print(your_board)
| true |
3c7ee08ef83da00c0171b24e0ccc8a78a0220936 | Python | vandanparmar/SURFcode | /dct/sim/dctcont.py | UTF-8 | 18,022 | 3.15625 | 3 | [] | no_license | """
.. _continuous:
Continuous Simulation (:mod:`cont`)
=========================================
Solving setups of the form,
.. math::
\dot{x} = \mathbf{A}x + \mathbf{B}u \n
y = \mathbf{C}x + \mathbf{D}u
Relevant Examples
******************
* :ref:`continuous_eg`
* :ref:`network_eg`
Initialisation and Setting Matrices
************************************
.. autosummary::
:toctree:
cont.__init__
cont.setABC
cont.setA
cont.setB
cont.setC
cont.setx0
cont.set_plot_points
Getting Values
***************
.. autosummary::
:toctree:
cont.get_x
cont.get_y
cont.get_x_set
cont.get_y_set
Plotting and Saving
********************
.. autosummary::
:toctree:
cont.set_plot_points
cont.plot
cont.plot_state
cont.plot_output
cont.plot_impulse
cont.plot_step
cont.plot_comp
cont.save_state
cont.save_output
cont.lqr
cont.inf_lqr
Simulation Properties
**********************
.. autosummary::
:toctree:
cont.is_controllable
cont.is_observable
cont.is_stable
"""
import numpy as np
from scipy import linalg
from dct.tools import *
class cont:
"""Class to contain objects and functions for carrying out continuous simulations of the form,
Attributes:
A (ndarray): Drift matrix
B (ndarray): Input matrix
C (ndarray): Output matrix
plot_points (int): Number of points to plot when plotting, default is 100
x0 (ndarray): Initial conditions
"""
def __init__(self,n=None,no=None,nu=None):
"""Generate a cont object.
Args:
n (int, optional): Dimensions of n x n drift matrix, A
no (int, optional): Dimension of n x no input matrix, B
nu (int, optional): Dimensions of nu x n output matrix, C
"""
if(n is None):
print('Initilising with empty matrices, please specify using "setABC".')
self.A = np.array([])
self.B = None
self.C = None
self.__ready = False
else:
self.__ready = True
self.A = random_stable(n)
if(no is None):
self.C = None
else:
self.C = random_mat(no,n)
if(nu is None):
self.B = None
else:
self.B = random_mat(n,nu)
self.x0 = np.random.rand(n,1)
self.plot_points = 100
def setABC(self,A,B=None,C=None):
"""Set A, B, C matrices for a continuous simulation.
Args:
A (ndarray): Drift matrix
B (ndarray, optional): Input matrix
C (ndarray, optional): Output matrix
Returns:
cont: Updated cont object
"""
shapeA = np.shape(A)
if(shapeA[0] == shapeA[1]):
self.A = np.array(A)
n = shapeA[0]
self.x0 = np.random.rand(n,1)
self.__ready = True
else:
print('Please supply a square A matrix.')
if(C is not None):
if(np.shape(C)[1]==n):
self.C = C
elif(np.shape(C)[0]==n):
# self.C = np.transpose(np.array(C))
print('Dimensions ',np.shape(C),' are not acceptable. You may wish to transpose this matrix.')
else:
print('Dimensions ',np.shape(C),' are not acceptable, please reenter.')
if(B is not None):
if(np.shape(B)[0]==n):
self.B = np.array(B)
elif(np.shape(B)[1]==n):
# self.B = np.transpose(np.array(B))
print('Dimensions ',np.shape(B),' are not acceptable. You may wish to transpose this matrix.')
else:
print('Dimensions ',np.shape(B),' are not acceptable, please reenter.')
return self
def ready(self):
if(self.__ready):
return True
else:
print('Please set A, B and C using setABC.')
return False
def setA(self,A):
"""Set drift matrix, A.
Args:
A (ndarray): n x n drift matrix, A
Returns:
cont: Updated cont object
"""
if(self.C is not None):
if(np.shape(A)[0]==np.shape(self.C)[0]):
self.A = np.array(A)
else:
print('Dimensions of A not compatible, please try again.')
else:
print('Please set A, B and C using setABC.')
return self
def setB(self,B):
"""Set input matrix, B.
Args:
B (ndarray): n x no input matrix, B
Returns:
cont: Updated cont object
"""
n = np.shape(self.A)[0]
if(np.shape(B)[0]==n):
self.B = np.array(B)
elif(np.shape(B)[1]==n):
# self.B = np.transpose(np.array(B))
print('Dimensions ',np.shape(B),' are not acceptable. You may wish to transpose this matrix.')
else:
print('Dimensions ',np.shape(B),' are not acceptable, please reenter.')
return self
def setC(self,C):
"""Set output matrix, C.
Args:
C (ndarray): nu x n output matrix, C
Returns:
cont: Updated cont object
"""
n = np.shape(self.A)[0]
if(np.shape(C)[1]==n):
self.C = np.array(C)
elif(np.shape(C)[0]==n):
# self.C = np.transpose(np.array(C))
print('Dimensions ',np.shape(C),' are not acceptable. You may wish to transpose this matrix.')
else:
print('Dimensions ',np.shape(C),' are not acceptable, please reenter.')
return self
def setx0(self,x0):
"""Set intital conditions, x0.
Args:
x0 (ndarray): n x 1 initial conditions, x0
Returns:
cont: Updated cont object
"""
if(np.shape(x0)==(np.shape(self.A)[0],1)):
self.x0 = x0
else:
print('x0 dimensions should be',(np.shape(self.A)[0],1),', please try again.')
return self
def set_plot_points(self,points):
"""Set number of points to use when plotting, plot_points.
Args:
points (int): The number of points to use
Returns:
cont: Updated cont object
"""
if(points<10000):
self.plot_points = points
return self
def get_x(self,t):
"""Calculate a state vector at a particular time.
Args:
t (int): Time at which to return state vector
Returns:
ndarray: n x 1 state vector at time t
"""
if(self.ready()):
x = np.matmul(linalg.expm(self.A*t),self.x0)
return x
def get_y(self,t):
"""Calculate an output vector at a particular time.
Args:
t (int): Time at which to return output vector
Returns:
ndarray: no x 1 output vector at time t
"""
if(self.ready()):
y = np.matmul(self.C,self.get_x(t))
return y
def get_C_dim(self):
if(self.ready()):
dim = np.shape(self.C)
if(len(dim)==1):
toReturn = 1
else:
toReturn = dim[0]
return toReturn
def get_B_dim(self):
if(self.ready()):
dim = np.shape(self.B)
if(len(dim)==1):
toReturn = 1
else:
toReturn = dim[1]
return toReturn
def get_x_set(self,times):
"""Calculate a set of x values.
Args:
times (array): Array of times at which to return state vectors
Returns:
ndarray: n x len(times) set of state vectors
"""
if(self.ready()):
xs = self.get_x(times[0])
for time in times[1:]:
xs = np.append(xs,self.get_x(time),axis=1)
return xs
def get_y_set(self,times,xs=None):
"""Calculate a set of y values.
Args:
times (array): Array of times at which to return output vectors
xs (ndarray, optional): Existing array of state vectors
Returns:
ndarray: n0 x len(times) set of output vectors
"""
if(self.ready()):
if(xs is None):
ys = self.get_y(times[0])
for time in times[1:]:
ys = np.append(ys,self.get_y(time),axis=1)
else:
ys = np.matmul(self.C,xs)
return ys
def is_controllable(self):
"""Tests if the cont object is controllable.
Returns:
bool: Boolean, true if the cont configuration is controllable
ndarray: Controllability grammian from Lyapunov equation
"""
if (self.ready()):
if (self.B is not None):
q = -np.matmul(self.B,self.B.conj().T)
x_c = linalg.solve_lyapunov(self.A,q)
controllable = (linalg.eigvals(x_c)>0).sum() == np.shape(self.A)[0]
return [controllable,x_c]
else:
print("Please set B.")
def is_observable(self):
"""Tests if the cont object is observable.
Returns:
bool: Boolean, true if the cont configuration is observable
ndarray: Observability grammian from Lyapunov equation
"""
if (self.ready()):
if(self.C is not None):
q = -np.matmul(self.C.conj().T,self.C)
y_o = linalg.solve_lyapunov(self.A.conj().T,q)
y_o = y_o.conj().T
observable = (linalg.eigvals(y_o)>0).sum() == np.shape(self.A)[0]
return [observable,y_o]
else:
print("Please set C.")
def is_stable(self):
"""Tests if the cont object is stable.
Returns:
bool: Boolean, true if the cont configuration is observable
array: The eigenvalues of the A matrix
"""
if (self.ready()):
eigs = linalg.eigvals(self.A)
toReturn = False
if ((np.real(eigs)<=0).sum()) == np.shape(self.A)[0]:
toReturn = True
return [toReturn,eigs]
def impulse(self,time):
"""
"""
if(self.ready()):
if(self.B is not None and self.C is not None):
h = np.matmul(np.matmul(self.C,np.expm(self.A*time)),self.B)
return h
else:
print("Please set A, B and C.")
def step(self,time):
"""
"""
if(self.ready()):
if(self.B is not None and self.C is not None):
a_inv = linalg.inv(self.A)
s = np.matmul(self.C,np.matmul(a_inv,np.matmul(linalg.expm(time*self.A)-np.identity(np.shape(self.A)[0]),self.B)))
return s
else:
print("Please set A,B and C first.")
def save_state(self,filename,times,plot_points=None,xs=None):
"""Save a set of state vectors.
Args:
filename (str): Name of file or filepath for save file
times (array): Array of times of state vectors to be saved
plot_points (int, optional): Number of points to save, defaults to self.plot_points
xs (ndarray, optional): Existing set of state vectors to save
Returns:
cont: To allow for chaining
"""
if(self.ready()):
if(plot_points is None):
plot_points = self.plot_points
eigvals = linalg.eigvals(self.A)
start,end = times
if(xs is None):
self.get_x_set(times)
if(len(xs)>10000):
print('Too many states to save.')
else:
comment = 'A eigenvalues: '+ str(eigvals)+'\nstart time: '+str(start)+'\nend time: '+str(end)
np.savetxt(filename,xs,header=comment)
return self
def save_output(self,filename,times,plot_points=None,ys=None):
"""Save a set of output vectors
Args:
filename (str): Name of file or filebath for save file
times (int): Array of times of output vectors to be saved
plot_points (int, optional): Number of points to save, defaults to self.plot_points
ys (ndarray, optional): Existing set of output vectors to save
Returns:
cont: To allow for chaining
"""
if(self.ready()):
if(plot_points is None):
plot_points = self.plot_points
eigvals = linalg.eigvals(self.A)
start,end = times
if(ys is None):
self.get_y_set(times)
if(len(ys)>10000):
print('Too many outputs to save.')
else:
comment = 'A eigenvalues: '+ str(eigvals)+'\nstart time: '+str(start)+'\nend time: '+str(end)
np.savetxt(filename,ys,header=comment)
return self
def plot(self,times,plot_points=None,filename=None,grid=False):
"""Plot both states and outputs (if C is given) of a cont object for a given amount of time.
Args:
times (array): An array for the form [start time, end time]
plot_points (int, optional): The number of points to use when plotting, default is the internal value, defaulted at 100
filename (str, optional): Filename to save output to, does not save if none provided
grid (bool, optional): Display grid, default is false
"""
if(self.ready()):
if(self.C is None):
self.plot_state(times,plot_points,filename,grid)
return
if(plot_points is None):
plot_points = self.plot_points
start,end = times
points = plot_points
t = np.linspace(start,end,points)
x = self.get_x_set(t)
print(np.shape(x))
y = self.get_y_set(t,x)
plot_sio(self,t,False,grid,x,y)
if(filename is not None):
filename_x = 'state_'+filename
filename_y = 'output_'+filename
self.save_state(filename_x,times,points,x)
self.save_output(filename_y,times,points,y)
def plot_state(self,times,plot_points=None,filename=None,grid=False):
"""Plot states of a cont object for a given amount of time.
Args:
times (array): An array for the form [start time, end time]
plot_points (int, optional): The number of points to use when plotting, default is the internal value, defaulted at 100
filename (str, optional): Filename to save output to, does not save if none provided
grid (bool, optional): Display grid, default is false
"""
if(self.ready()):
if(plot_points is None):
plot_points=self.plot_points
start,end = times
points = plot_points
t = np.linspace(start,end,points)
x = self.get_x_set(t)
plot_sio(self,t,False,grid,x=x)
if(filename is not None):
self.save_state(filename,times,points,x)
def plot_output(self,times,plot_points=None,filename=None,grid=False):
"""Plot outputs (if C is given) of a cont object for a given amount of time.
Args:
times (array): An array for the form [start time, end time]
plot_points (int, optional): The number of points to use when plotting, default is the internal value, defaulted at 100
filename (str, optional): Filename to save output to, does not save if none provided
grid (bool, optional): Display grid, default is false
"""
if(self.ready()):
if(plot_points is None):
plot_points=self.plot_points
start,end = times
points = plot_points
t = np.linspace(start,end,points)
y = self.get_y_set(t)
plot_sio(self,t,False,grid,y=y)
if(filename is not None):
self.save_output(filename,times,points,y)
def plot_impulse(self,times,inputs=None, outputs=None,plot_points=None,filename=None,grid=False):
"""Plots output responses to input impulses grouped by input.
Args:
times (array): An array of the form [start time, end time]
inputs (array, optional): The inputs to plot, defaults to all inputs
outputs (array, optional): The outputs to plot, defaults to all outputs
plot_points (int, optional): The number of points to use when plotting, default is the internal value, defaulted at 100
filename (str, optional): Filename to save output to, does not save if none provided
grid (bool, optional): Display grid, default is false
"""
if(self.ready()):
if(self.B is not None and self.C is not None):
start,end = times
t = np.linspace(start,end,self.plot_points)
if(inputs is None):
inputs = np.arange(1,np.shape(self.B)[1]+1)
if(outputs is None):
outputs = np.arange(1,np.shape(self.C)[0]+1)
if(plot_points is None):
plot_points = self.plot_points
impulse = np.array([np.matmul(self.C,np.matmul(linalg.expm(self.A*t_i),self.B)) for t_i in t])
#impulse[t,n_c,n_b]
plot_resp(self,t,inputs,outputs,False,grid,impulse,"Impulse")
if(filename is not None):
return
else:
print("Please set A, B and C.")
def plot_step(self,times,inputs=None, outputs=None,plot_points=None,filename=None,grid=False):
"""Plots output responses to step inputs grouped by input.
Args:
times (array): An array of the form [start time, end time]
inputs (array, optional): The inputs to plot, defaults to all inputs
outputs (array, optional): The outputs to plot, defaults to all outputs
plot_points (int, optional): The number of points to use when plotting, default is the internal value, defaulted at 100
filename (str, optional): Filename to save output to, does not save if none provided
grid (bool, optional): Display grid, default is false
"""
if(self.ready()):
if(self.B is not None and self.C is not None):
start,end = times
t = np.linspace(start,end,self.plot_points)
if(inputs is None):
inputs = np.arange(1,np.shape(self.B)[1]+1)
if(outputs is None):
outputs = np.arange(1,np.shape(self.C)[0]+1)
if(plot_points is None):
plot_points = self.plot_points
inv_a = linalg.inv(self.A)
step = np.array([np.matmul(self.C,np.matmul(inv_a,np.matmul(linalg.expm(self.A*t_i)-np.identity(np.shape(self.A)[0]),self.B))) for t_i in t])
#step[t,n_c,n_b]
plot_resp(self,t,inputs,outputs,False,grid,step,"Step")
if(filename is not None):
return
else:
print("Please set A, B and C.")
def lqr(self,R,Q,Q_f,times=None,grid=False,plot_points=None):
"""
"""
if(self.ready()):
if(self.B is not None):
if(R is None):
R = 0.2*np.eye(np.shape(self.B)[1])+1e-6
if(Q is None):
Q = np.eye(np.shape(self.A)[0])
return
def inf_lqr(self,R,Q,times=None,grid=False,plot_points=None):
"""Computes the infinite horizon linear quadratic regulator given weighting matrices, R and Q. Can plot inputs and state.
Args:
R (ndarray): Input weighting matrix
Q (ndarray): State weighting matrix
times (array, optional): An array of the form [start time, end time], does not plot if not specified
plot_points (int, optional): The number of points to use when plotting, default is the internal value, defaulted at 100
grid (bool, optional): Display grid, default is false
Returns:
(tuple): tuple containing:
- P (ndarray): Solution to the continuous algebraic Ricatti equation
- K (ndarray): The input matrix, u = Kx
"""
if(self.ready()):
if(self.B is not None):
if(R is None):
R = 0.2*np.eye(np.shape(self.B)[1])+1e-6
if(Q is None):
Q = np.eye(np.shape(self.A)[0])
P = linalg.solve_continuous_are(self.A,self.B,Q,R)
K = -np.matmul(linalg.inv(R),np.matmul(self.B.T,P))
if(times is not None):
if(plot_points is None):
plot_points = self.plot_points
start,end = times
t = np.linspace(start,end,plot_points)
x = np.array([np.matmul(linalg.expm((self.A+np.matmul(self.B,K))*t_i),self.x0) for t_i in t])
u = np.matmul(K,x)
x = x[:,:,0].T
u = u[:,:,0].T
plot_sio(self,t,False,grid,x=x,u=u)
return (P,K)
else:
print("Please set A, B and C using setABC.")
def plot_comp(self,length=0):
"""
"""
vals,vecs = linalg.eig(self.A)
d_ratios = -np.real(vals)/np.abs(vals)
pairs = sorted(zip(d_ratios,vecs.T),key = lambda x: x[0])
if(length ==2 or length ==4):
pairs = pairs[::2][0:length]
elif(length==1):
pairs = np.array([pairs[0]])
else:
if (len(pairs)>=4):
pairs = pairs[::2][0:4]
elif(len(pairs)>=2):
pairs = pairs[::2][0:2]
else:
pairs = np.array([pairs[0]])
compass(pairs)
| true |
3569e26a85575f8de8663f4ac921e5237a8565a7 | Python | Amazon-Lab206-Python/Todd_Enders | /OOP/MathDojo.py | UTF-8 | 737 | 3.609375 | 4 | [] | no_license | class MathDojo(object):
def __init__(self):
self.result = 0
def add(self, *nums):
for obj in nums:
if type(obj) is list or type(obj) is tuple:
for num in obj:
self.result += num
else:
self.result += obj
return self
def subtract(self, *nums):
for obj in nums:
if type(obj) is list or type(obj) is tuple:
for num in obj:
self.result -= num
else:
self.result -= obj
return self
md = MathDojo()
print md.add(2).add(2,5).subtract(3,2).result
print md.add([1], 3,4).add([3,5,7,8], [2,4.3,1.25]).subtract(2, [2,3], [1.1,2.3]).result
| true |
59a0701749f40d289f4e13a24cb185869929101d | Python | JaeDukSeo/Personal_Daily_NeuralNetwork_Practice | /3_tensorflow/archieve/b_dense_net_part_practice.py | UTF-8 | 1,351 | 2.8125 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
# 1. Make the Graph
graph = tf.Graph()
with graph.as_default():
input_1 = tf.placeholder('float',[3,3])
batch_norm = tf.contrib.layers.batch_norm(input_1)
input_2 = tf.placeholder('float',[1,4,4,1])
max_pool = tf.nn.max_pool(input_2,ksize=[1,2, 2,1], strides=[1, 2, 2,1], padding='SAME')
avg_pool = tf.nn.avg_pool(input_2,ksize=[1,2, 2,1], strides=[1, 2, 2,1], padding='SAME')
# 2. Make the Session
with tf.Session(graph = graph) as sess :
sess.run(tf.global_variables_initializer())
batch_norm_input = np.array([
[3,3,3],
[3,4,3],
[3,3,3]
])
batch_norm_data = sess.run(batch_norm, feed_dict={input_1:batch_norm_input})
print batch_norm_data,'\n\n'
max_pool_input = np.array([
[3,3,0.4,2],
[3,3,0.4,1],
[3,3,3,4],
[3,3,3,4],
])
max_pool_input = np.expand_dims(max_pool_input,axis=0)
max_pool_input = np.expand_dims(max_pool_input,axis=3)
# Max Pool - choose the max element
max_pool_data = sess.run(max_pool, feed_dict={input_2:max_pool_input})
print max_pool_data,'\n\n'
# Avg Pool - Combine the values and average them
avg_pool_data = sess.run(avg_pool, feed_dict={input_2:max_pool_input})
print avg_pool_data
# ------ END OF THE CODE --- | true |
5d89c1a5987331b0966925288c47ca3b0e12bdb3 | Python | pro1zero/Machine-Learning-Loan-Prediction | /details.py | UTF-8 | 684 | 2.75 | 3 | [] | no_license | import sqlite3
conn = sqlite3.connect('customer.db')
cursor = conn.execute("SELECT NAME,GENDER,AGE,MARRIED,DEPENDENTS,EDUCATION,SELF_EMPLOYED,MONTHLY_INCOME,YEARLY_INCOME,LOAN_AMOUNT,LOAN_AMOUNT_TERM,PROPERTY_AREA from CUST_DATA")
for data in cursor:
print("name=",data[0])
print("gender=",data[1])
print("age=",data[2])
print("married=",data[3])
print("dependents=",data[4])
print("education=",data[5])
print("self_employed=",data[6])
print("monthly_income=",data[7])
print("yearly_income=",data[8])
print("loan_amount=",data[9])
print("loan_amount_term=",data[10])
print("property_area=",data[11])
conn.close()
| true |
b51878e6813a4bf01d7ca0a45ac275f080bfd1ef | Python | P-Swati/MayLeetCodeChallange | /Day29_CourseSchedule.py | UTF-8 | 4,552 | 3.421875 | 3 | [] | no_license | #approach 1: in dfs, if an edge to a node which is already visited **within cur recursion stack** is encountered, then there is a cycle.
class Solution:
def detectCycle(self,start,adjList,visited):
visited[start]=1
for i in adjList[start]:
if(visited[i]==0):
if(self.detectCycle(i,adjList,visited)==True): # dont use return self.detectCycle(...)
return True
else: # if any neighbour is already visited
return True
visited[start]=0 #vvi
#here we mark as unvisited because we dont want that this node be seen as visited when
#we start rec call with other nodes, we only want this as visited within the cur recursion stack
return False
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
adjList=collections.defaultdict(list)
for i in prerequisites:
adjList[i[1]].append(i[0])
# print(adjList)
visited=[0]*pow(10,5)
ans=False
for i in range(numCourses):
# if(visited[i]==0):
ans= ans or self.detectCycle(i,adjList,visited)
if(ans==True):
break
return not ans
# approach 2 : coloring algo : this algo makes sure -
# a) the completely processed nodes are not subjected to be processed again
# b) we can track which nodes are currently under process, and only an edge to nodes being currently processed (visited=1) ( and not those nodes
# were processed in the past (visited=2)) marks the presence of cycle.
class Solution:
def detectCycle(self,start,adjList,visited):
visited[start]=1
for i in adjList[start]:
if(visited[i]==0):
if(self.detectCycle(i,adjList,visited)==True): # dont use return self.detectCycle(...)
return True
elif(visited[i]==1): # if any neighbour is already visited
return True
visited[start]=2 #vvi
#here we mark as 2 because we dont want that this node be seen as visited when
#we start rec call with other nodes, we only want this as visited within the cur recursion stack
# marking as 2 instead as 0 also makes sure that dfs wont be again started at this node
# as well as this node is not considered as part of currently processing (rec stack) and is previously processed node
return False
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
adjList=collections.defaultdict(list)
for i in prerequisites:
adjList[i[1]].append(i[0])
# print(adjList)
visited=[0]*pow(10,5)
ans=False
for i in range(numCourses):
# print(visited)
if(visited[i]==0): #only unprocessed nodes will be considered for dfs
ans= ans or self.detectCycle(i,adjList,visited)
if(ans==True):
break
return not ans
#approach 3: topological sort (kahn's algorithm)
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
# construct adjList and indegrees dict
indegree={}
adjList=collections.defaultdict(list)
for i in prerequisites:
if(i[0] not in indegree.keys()):
indegree[i[0]]=1
else:
indegree[i[0]]+=1
adjList[i[1]].append(i[0])
print(indegree)
print(adjList)
#BFS : topo sort
queue=deque()
#add all nodes with indegree=0 to queue
for i in range(numCourses):
if(i not in indegree.keys()): #indegree =0
queue.append(i)
# remove one node from queue, and reduce indegrees of its adj vertices by 1, if the also become 0, add them to the queue as well,
visitedCount=0
while(queue):
cur=queue.popleft()
visitedCount+=1
for i in adjList[cur]:
indegree[i]-=1
if(indegree[i]==0):
queue.append(i)
return visitedCount == numCourses #if all the nodes were visited, it means no cycle
| true |
3ee2ad67fabef9374e124b210b5a36a785ec69f3 | Python | youngce/FightTheLandlordBot | /handleImg.py | UTF-8 | 878 | 2.59375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import cv2
img=cv2.imread("./test.png")
# r = 1
#
# fig, ax = plt.subplots()
# ax.imshow(img, extent=(0,img.shape[1]/r,0,img.shape[0]/r) )
# ax.set_xlabel("distance [m]")
# ax.set_ylabel("distance [m]")
#
# plt.show()
# r=cv2.selectROI(img)
r=[216, 27, 259, 34]
# print(r)
# cv2.rectangle(img,(r[0],r[1]),(r[0]+r[2],r[1]+r[3]),(0,255,0),5)
imCrop = img[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]
# cv2.imwrite("./round.png",imCrop)
# Display cropped image
cv2.imshow("round.png", imCrop)
# cv2.imshow("rec",img)
import pytesseract
from PIL import Image
# pImg.fromarray(imCrop)
# edges = cv2.Canny(imCrop,100,200)
roundImg=Image.fromarray(imCrop)
# cv2.imread("round.png_screenshot_11.12.2019.png")
res = pytesseract.image_to_string(roundImg,lang="eng")
print("res: "+res)
cv2.waitKey(0)
cv2.destroyAllWindow()
| true |
824da89fe96748c616ac895a44a462cc5561e0fe | Python | reesezxf/pickleFilter | /pickleFilter.py | UTF-8 | 1,451 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
# coding:utf-8
# author 9ian1i
# created at 2017.03.24
# a demo for filter unsafe callable object
from pickle import Unpickler as Unpkler
from pickle import *
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# 修改以下白名单,确认你允许通过的可调用对象
allow_list = [str, int, float, bytes, unicode]
class FilterException(Exception):
def __init__(self, value):
super(FilterException, self).__init__('the callable object {value} is not allowed'.format(value=str(value)))
def _hook_call(func):
"""装饰器 用来在调用callable对象前进行拦截检查"""
def wrapper(*args, **kwargs):
if args[0].stack[-2] not in allow_list:
# 我直接抛出自定义错误,改为你想做的事
raise FilterException(args[0].stack[-2])
return func(*args, **kwargs)
return wrapper
# 重写了反序列化的两个函数
def load(file):
unpkler = Unpkler(file)
unpkler.dispatch[REDUCE] = _hook_call(unpkler.dispatch[REDUCE])
return Unpkler(file).load()
def loads(str):
file = StringIO(str)
unpkler = Unpkler(file)
unpkler.dispatch[REDUCE] = _hook_call(unpkler.dispatch[REDUCE])
return unpkler.load()
def _filter_test():
test_str = 'c__builtin__\neval\np0\n(S"os.system(\'net\')"\np1\ntp2\nRp3\n.'
loads(test_str)
if __name__ == '__main__':
_filter_test()
| true |
bf4a14e7a15202dd98b0e7d072c07af43c19c3ed | Python | dannysvof/SUAEx | /select_aspects/select.py | UTF-8 | 2,386 | 2.890625 | 3 | [] | no_license | import codecs
def format_lines(attib_file):
totales = []
with open(attib_file, 'r') as f:
lines = f.readlines()
arr_letras = []
arr_pesos = []
for i in range(len(lines)):
if(i%4==0):
arr_letras.append(lines[i].strip().split(' '))
#print(lines[i].strip())
elif(i%4==3):
pesos = lines[i].strip().split(' ')
arr_pesos.append(pesos)
float_vals = [float(val) for val in pesos]
total = 0
for x in float_vals:
total+=x
totales.append(total)
return (arr_letras,arr_pesos, totales)
def sort_list(lista):
indexs = sorted(range(len(lista)), key=lambda k: lista[k], reverse=True)
return indexs
f_attrib_weights_rf1 = '../word_simils/simils_rest/staff.txt'
f_attrib_weights_rf2 = '../word_simils/simils_rest/ambience.txt'
f_attrib_weights_rf3 = '../word_simils/simils_rest/food.txt'
(ar_letras_rf1, ar_pesos_rf1, totales_staff) = format_lines(f_attrib_weights_rf1)
(ar_letras_rf2, ar_pesos_rf2, totales_ambience) = format_lines(f_attrib_weights_rf2)
(ar_letras_rf3, ar_pesos_rf3, totales_food) = format_lines(f_attrib_weights_rf3)
#suaex_labels = codecs.open('test_labels_abae.txt','w')
suaex_labels = []
with open('../category_atribution/test_labels_abae.txt','r') as f:
suaex_labels = f.readlines()
words_cat1 = set()
words_cat2 = set()
words_cat3 = set()
for letras, valores1, valores2, valores3, label in zip(ar_letras_rf1, ar_pesos_rf1, ar_pesos_rf2, ar_pesos_rf3, suaex_labels):
label = label.strip()
if label == "Staff":#usar los valores1
ordered_indexs = sort_list(valores1)
selected_words = [letras[ordered_indexs[0]]]
words_cat1 = words_cat1.union(set(selected_words))
elif label == "Ambience":#usar los valores2
ordered_indexs = sort_list(valores2)
selected_words = [letras[ordered_indexs[0]]]
words_cat2 = words_cat2.union(set(selected_words))
elif label == "Food":#usar los valores3
ordered_indexs = sort_list(valores3)
selected_words = [letras[ordered_indexs[0]]]
words_cat3 = words_cat3.union(set(selected_words))
else:
print("aq")
print("group 1 - Staff")
print(list(words_cat1)[:50])
print("group 2 - Ambience")
print(list(words_cat2)[:50])
print("group 3 - Food")
print(list(words_cat3)[:50])
| true |
7874c29a0cc568942ffa79d7e9e1ff68b93c441d | Python | rcamilo1526/Data_Science_introduction | /Basico/randomgame.py | UTF-8 | 605 | 3.765625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 11:26:45 2019
@author: Estudiantes
"""
from random import randrange
r=randrange(100)
print(r)
tries=0
for i in range(11):
print('Intento {}:'.format(tries+1))
a=int(input('Ingrese el numero: '))
if a==r:
print('Adivino el numero {} en {} intentos'.format(a,tries+1))
break
elif a>r:
print('el numero a adivinar es menor\n')
tries +=1
elif a<r:
print('el numero a adivinar es mayor\n')
tries += 1
else:
print('\nNo adivino el numero, el numero era {}'.format(r))
| true |
c62391b29ed3cc1b496498ff1ee8584754f2bea1 | Python | blueskywalker/junkyard | /python/greedy/powerset.py | UTF-8 | 322 | 3.3125 | 3 | [] | no_license | def powerset(data):
if len(data) == 0:
return [[]]
pivot = data[0]
results = powerset(data[1:])
new_results= results.copy()
for item in results:
new_results.append([pivot] + item)
return new_results
data = ['a', 'b', 'c']
print(list(filter(lambda x: len(x) == 2,powerset(data))))
| true |
1b4b66f45cf8ba43a52cef6cd894889dcc117a23 | Python | nikhilsampangi/CSES_Problem_Set | /1_IntroductoryProblems/10_TrailingZeros.py | UTF-8 | 147 | 3.59375 | 4 | [] | no_license | if __name__ == "__main__":
n = int(input())
p = 5
sol = 0
while n >= p:
sol += n//p
p = p*5
print(sol)
| true |
3f0eff6732ef4d2006f33b373f3b1dcd1a81a354 | Python | metadatacenter/cedar-util | /scripts/python/cedar/utils/storer.py | UTF-8 | 1,828 | 2.84375 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
utils.storer
~~~~~~~~~~~~~~
This module provides utility functions that are used to create a CEDAR
resource (template/element/instance) via a POST request.
"""
import requests
import json
from urllib.parse import quote
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def store_resource(api_key, request_url, resource):
response = send_post_request(api_key, request_url, resource)
if response.status_code == requests.codes.ok:
document = json.loads(response.text)
return document
else:
response.raise_for_status()
def store_template(server_address, api_key, template, folder_id):
request_url = server_address + "/templates?folder_id=" + escape(folder_id)
return store_resource(api_key, request_url, template)
def store_element(server_address, api_key, element, folder_id):
request_url = server_address + "/template-elements?folder_id=" + escape(folder_id)
return store_resource(api_key, request_url, element)
def store_field(server_address, api_key, field, folder_id):
request_url = server_address + "/template-fields?folder_id=" + escape(folder_id)
return store_resource(api_key, request_url, field)
def store_instance(server_address, api_key, instance, folder_id):
request_url = server_address + "/template-instances?folder_id=" + escape(folder_id)
return store_resource(api_key, request_url, instance)
def send_post_request(api_key, request_url, resource):
headers = {
'Content-Type': "application/json",
'Authorization': api_key
}
response = requests.request("POST", request_url, json=resource, headers=headers, verify=False)
return response
def escape(s):
return quote(str(s), safe='') | true |
0e165278c3c4335e7b78d97485e00880917d6066 | Python | Fr4nc3/code-hints | /python/Scientific/sample2/projectile.py | UTF-8 | 1,434 | 3.28125 | 3 | [] | no_license | # *************************************
# @Fr4nc3
# file: projectile.py
# implement methods
# g(h)
# s_next ( s_current, v_current, delta_t)
# v_next(s_next, v_current, delta_t)
# s_sim(t, v_init, s_init, delta_t)
# s_standard(t,v_init)
# *************************************
GRAVITATIONAL_CONSTANT = 6.6742e-11 # gravitational constant in N*(m/kg)2
ME = 5.9736e24 # mass of the earth
RE = 6.371e6 # radius of the earth
def g(h):
H = RE + h
const = GRAVITATIONAL_CONSTANT * ME
if H != 0: # avoid division by zero
return const / H ** 2
else:
return const / RE ** 2 # ignoring h
def s_next(s_current, v_current, delta_t):
''' Implements eq: s(t+∆t) = s(t) + v(t)∙∆t '''
return s_current + v_current * delta_t
def v_next(s_next, v_current, delta_t):
''' Implements v(t+∆t) = v(t) - g(s(t+∆t)) ∙ ∆t '''
# s_next is already calculated used s_next()
return v_current - g(s_next) * delta_t
def s_sim(t, v_0, s_0, delta_t):
''' Implements simulated position
note: this was the only way that s_sim can uses methods g, v_next,
and s_next at the same time '''
s_n = s_next(s_0, v_0, delta_t) # get the s_next
v_n = v_next(s_n, v_0, delta_t) # uses s_next to calculate v_next
return -0.5 * g(s_n) * t ** 2 + v_n * t
def s_standard(t, v_0):
# calculate position assuming h is 0 witch g~9.8
return -0.5 * g(0) * t ** 2 + v_0 * t
| true |
4a18ae9b71b01cf91408fc8b3e66ded0fb99278c | Python | ashcrow/trello-card-maker | /trello-card-maker | UTF-8 | 4,691 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2016 Steve Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Simple tool to make trello cards via yaml.
"""
import argparse
import logging
import os
import yaml
from trello import TrelloClient
def setup_logging(level):
"""
Sets up the logger.
:param level: The level to use. This can be str or int.
:type level: mixed
:returns: The configured logger.
:rtype: logging.Logger
"""
if isinstance(level, str):
level = level.upper()
logger = logging.getLogger('trello')
handler = logging.StreamHandler()
handler.formatter = logging.Formatter('%(levelname)s - %(message)s')
logger.addHandler(handler)
logger.setLevel(level)
return logger
def main():
"""
Main entrypoint.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config',
default=os.path.realpath(os.path.expanduser('~/.config/tcm.yaml')))
parser.add_argument('-l', '--log-level', default='info')
parser.add_argument('new_card', nargs=1)
args = parser.parse_args()
logger = setup_logging(args.log_level)
logger.debug('Command line arguments: {}'.format(args))
try:
with open(args.config) as c:
C = yaml.load(c.read())
except Exception as error:
parser.error('Could not parse configuration file: {}: {}'.format(
type(error), error))
try:
with open(args.new_card[0], 'r') as card_yaml:
d = yaml.load(card_yaml.read())
except Exception as error:
parser.error('Could not parse new card file: {}: {}'.format(
type(error), error))
client = TrelloClient(
api_key=C['api_key'],
api_secret=C['api_secret'],
token=C['token']
)
# We have to list board to find the right one
logger.debug('Listing boards')
for board in client.list_boards():
if board.name == d['board']:
logger.info('Found board "{}"'.format(board.name))
# We have to list lists to find the right one
logger.debug('Listing lists')
for tlist in board.all_lists():
if tlist.name == d['list']:
logger.info('Found list "{}"'.format(tlist.name))
card = tlist.add_card(d['title'], d['description'])
logger.info('Created card "{}"'.format(card.name))
for checklist, tasks in d.get('checklists', {}).items():
card.add_checklist(checklist, tasks)
logger.info('Added checklist "{}" to card "{}"'.format(
checklist, card.name))
if d.get('labels'):
labels = {}
# We have to list labels on the board to find the
# right one
logger.debug('Listing labels for board "{}"'.format(
board.name))
for label in board.get_labels():
labels[label.name] = label
for label_name in d['labels']:
card.add_label(labels[label_name])
logger.info('Added label "{}" for card "{}"'.format(
label_name, card.name))
logger.info('Card link: {}'.format(card.short_url))
raise SystemExit(0)
logger.error(
'Unable to locate list "{}" on board "{}"'.format(
d['list'], d['board']))
raise SystemExit(2)
if __name__ == '__main__':
main()
| true |
553c64c46410c442b340060e7d2ee70953c6d901 | Python | PrimeCodingSolutions/otree-core | /otree/extensions.py | UTF-8 | 2,327 | 2.578125 | 3 | [
"MIT"
] | permissive | from importlib import import_module
from django.conf import settings
import importlib.util
import sys
"""
(THIS IS CURRENTLY PRIVATE API, MAY CHANGE WITHOUT NOTICE)
To create an oTree extension, add a package called ``otree_extensions``
to your app, and add the app name in settings.py to EXTENSION_APPS.
It can contain any of the following submodules:
urls.py
-------
should contain a variable ``urlpatterns``, which will be appended to
oTree's built-in URL patterns.
routing.py
----------
Should contain a variable ``websocket_routes``,
with a list of channel routes, as described in the Django channels documentation.
admin.py
--------
This module allows you to define custom data exports that will be included
in oTree's data export page. Define a variable ``data_export_views``,
which is a list of Django class-based views (see Django docs).
Each view should define a ``get()`` method with the following signature::
def get(self, request, *args, **kwargs):
This method should return an HTTP response with
the exported data (e.g. CSV, XLSX, JSON, etc), using the appropriate MIME type
on the HTTP response.
Each view must also have the following attributes:
- ``url_pattern``: the URL pattern string, e.g. '^mychat_export/$'
- ``url_name``: see Django docs on reverse resolution of URLs, e.g. 'mychat_export'
- ``display_name``: The text of the download hyperlink on the data export page
(e.g. "Chat Data Export")
You don't need to worry about login_required and AUTH_LEVEL;
oTree will handle this automatically.
"""
from logging import getLogger
logger = getLogger(__name__)
def get_extensions_modules(submodule_name):
modules = []
find_spec = importlib.util.find_spec
for app_name in getattr(settings, 'EXTENSION_APPS', []):
package_dotted = f'{app_name}.otree_extensions'
submodule_dotted = f'{package_dotted}.{submodule_name}'
# need to check if base package exists; otherwise we get ImportError
if find_spec(package_dotted) and find_spec(submodule_dotted):
modules.append(import_module(submodule_dotted))
return modules
def get_extensions_data_export_views():
view_classes = []
for module in get_extensions_modules('admin'):
view_classes += getattr(module, 'data_export_views', [])
return view_classes
| true |
23febe2d60eec809186243cf9481043fb6521217 | Python | AnXnA05/python_practic | /0817_triangle.py | UTF-8 | 290 | 3.6875 | 4 | [] | no_license | #coding=utf-8
a = float(input('a = '))
b = float(input('b = '))
c = float(input('c = '))
if a+b>c and a+c>b and b+c>a:
print('周长为 %.2f' % (a+b+c))
p=(a+b+c)/2
print('面积为 %.2f' % (p*(p-a)*(p-b)*(p-c)**0.5))
else:
print('您输入的数据无法组成三角形') | true |
94a540d33c1fac6ccb19a78fcf621519eadd8f31 | Python | thongdong7/tb-api | /tb_api/utils/json_utils.py | UTF-8 | 352 | 2.53125 | 3 | [] | no_license | import json
from six import string_types
from tb_ioc.class_utils import get_class
class JsonDumper(object):
def __init__(self, cls=None):
if cls:
if isinstance(cls, string_types):
cls = get_class(cls)
self.cls = cls
def dumps(self, data):
return json.dumps(data, cls=self.cls, indent=2)
| true |
579dbfcf7e77d69070fe84bd750defb308832fbd | Python | lukeburpee/archived-legalease-code | /legalease/pst-master/ocr/spark-newman-human-receipt-detection/NeuroTools/analysis.py | UTF-8 | 17,710 | 2.921875 | 3 | [] | no_license | """
NeuroTools.analysis
===================
A collection of analysis functions that may be used by NeuroTools.signals or other packages.
.. currentmodule:: NeuroTools.analysis
Classes
-------
.. autosummary::
TuningCurve
Functions
---------
.. autosummary::
:nosignatures:
ccf
crosscorrelate
make_kernel
simple_frequency_spectrum
"""
import numpy as np
from NeuroTools import check_dependency
HAVE_MATPLOTLIB = check_dependency('matplotlib')
if HAVE_MATPLOTLIB:
import matplotlib
matplotlib.use('Agg')
else:
MATPLOTLIB_ERROR = "The matplotlib package was not detected"
HAVE_PYLAB = check_dependency('pylab')
if HAVE_PYLAB:
import pylab
else:
PYLAB_ERROR = "The pylab package was not detected"
def ccf(x, y, axis=None):
"""Fast cross correlation function based on fft.
Computes the cross-correlation function of two series.
Note that the computations are performed on anomalies (deviations from
average).
Returns the values of the cross-correlation at different lags.
Parameters
----------
x, y : 1D MaskedArrays
The two input arrays.
axis : integer, optional
Axis along which to compute (0 for rows, 1 for cols).
If `None`, the array is flattened first.
Examples
--------
>>> z = arange(5)
>>> ccf(z,z)
array([ 3.90798505e-16, -4.00000000e-01, -4.00000000e-01,
-1.00000000e-01, 4.00000000e-01, 1.00000000e+00,
4.00000000e-01, -1.00000000e-01, -4.00000000e-01,
-4.00000000e-01])
"""
assert x.ndim == y.ndim, "Inconsistent shape !"
# assert(x.shape == y.shape, "Inconsistent shape !")
if axis is None:
if x.ndim > 1:
x = x.ravel()
y = y.ravel()
npad = x.size + y.size
xanom = (x - x.mean(axis=None))
yanom = (y - y.mean(axis=None))
Fx = np.fft.fft(xanom, npad, )
Fy = np.fft.fft(yanom, npad, )
iFxy = np.fft.ifft(Fx.conj() * Fy).real
varxy = np.sqrt(np.inner(xanom, xanom) * np.inner(yanom, yanom))
else:
npad = x.shape[axis] + y.shape[axis]
if axis == 1:
if x.shape[0] != y.shape[0]:
raise ValueError("Arrays should have the same length!")
xanom = (x - x.mean(axis=1)[:, None])
yanom = (y - y.mean(axis=1)[:, None])
varxy = np.sqrt((xanom * xanom).sum(1) *
(yanom * yanom).sum(1))[:, None]
else:
if x.shape[1] != y.shape[1]:
raise ValueError("Arrays should have the same width!")
xanom = (x - x.mean(axis=0))
yanom = (y - y.mean(axis=0))
varxy = np.sqrt((xanom * xanom).sum(0) * (yanom * yanom).sum(0))
Fx = np.fft.fft(xanom, npad, axis=axis)
Fy = np.fft.fft(yanom, npad, axis=axis)
iFxy = np.fft.ifft(Fx.conj() * Fy, n=npad, axis=axis).real
# We just turn the lags into correct positions:
iFxy = np.concatenate((iFxy[len(iFxy) / 2:len(iFxy)],
iFxy[0:len(iFxy) / 2]))
return iFxy / varxy
from NeuroTools.plotting import get_display, set_labels
HAVE_PYLAB = check_dependency('pylab')
def crosscorrelate(sua1, sua2, lag=None, n_pred=1, predictor=None,
display=False, kwargs={}):
"""Cross-correlation between two series of discrete events (e.g. spikes).
Calculates the cross-correlation between
two vectors containing event times.
Returns ``(differeces, pred, norm)``. See below for details.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_.
Parameters
----------
sua1, sua2 : 1D row or column `ndarray` or `SpikeTrain`
Event times. If sua2 == sua1, the result is the autocorrelogram.
lag : float
Lag for which relative event timing is considered
with a max difference of +/- lag. A default lag is computed
from the inter-event interval of the longer of the two sua
arrays.
n_pred : int
Number of surrogate compilations for the predictor. This
influences the total length of the predictor output array
predictor : {None, 'shuffle'}
Determines the type of bootstrap predictor to be used.
'shuffle' shuffles interevent intervals of the longer input array
and calculates relative differences with the shorter input array.
`n_pred` determines the number of repeated shufflings, resulting
differences are pooled from all repeated shufflings.
display : boolean
If True the corresponding plots will be displayed. If False,
int, int_ and norm will be returned.
kwargs : dict
Arguments to be passed to np.histogram.
Returns
-------
differences : np array
Accumulated differences of events in `sua1` minus the events in
`sua2`. Thus positive values relate to events of `sua2` that
lead events of `sua1`. Units are the same as the input arrays.
pred : np array
Accumulated differences based on the prediction method.
The length of `pred` is ``n_pred * length(differences)``. Units are
the same as the input arrays.
norm : float
Normalization factor used to scale the bin heights in `differences` and
`pred`. ``differences/norm`` and ``pred/norm`` correspond to the linear
correlation coefficient.
Examples
--------
>> crosscorrelate(np_array1, np_array2)
>> crosscorrelate(spike_train1, spike_train2)
>> crosscorrelate(spike_train1, spike_train2, lag = 150.0)
>> crosscorrelate(spike_train1, spike_train2, display=True,
kwargs={'bins':100})
See also
--------
ccf
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
"""
assert predictor is 'shuffle' or predictor is None, "predictor must be \
either None or 'shuffle'. Other predictors are not yet implemented."
#Check whether sua1 and sua2 are SpikeTrains or arrays
sua = []
for x in (sua1, sua2):
#if isinstance(x, SpikeTrain):
if hasattr(x, 'spike_times'):
sua.append(x.spike_times)
elif x.ndim == 1:
sua.append(x)
elif x.ndim == 2 and (x.shape[0] == 1 or x.shape[1] == 1):
sua.append(x.ravel())
else:
raise TypeError("sua1 and sua2 must be either instances of the" \
"SpikeTrain class or column/row vectors")
sua1 = sua[0]
sua2 = sua[1]
if sua1.size < sua2.size:
if lag is None:
lag = np.ceil(10*np.mean(np.diff(sua1)))
reverse = False
else:
if lag is None:
lag = np.ceil(20*np.mean(np.diff(sua2)))
sua1, sua2 = sua2, sua1
reverse = True
#construct predictor
if predictor is 'shuffle':
isi = np.diff(sua2)
sua2_ = np.array([])
for ni in xrange(1,n_pred+1):
idx = np.random.permutation(isi.size-1)
sua2_ = np.append(sua2_, np.add(np.insert(
(np.cumsum(isi[idx])), 0, 0), sua2.min() + (
np.random.exponential(isi.mean()))))
#calculate cross differences in spike times
differences = np.array([])
pred = np.array([])
for k in xrange(0, sua1.size):
differences = np.append(differences, sua1[k] - sua2[np.nonzero(
(sua2 > sua1[k] - lag) & (sua2 < sua1[k] + lag))])
if predictor == 'shuffle':
for k in xrange(0, sua1.size):
pred = np.append(pred, sua1[k] - sua2_[np.nonzero(
(sua2_ > sua1[k] - lag) & (sua2_ < sua1[k] + lag))])
if reverse is True:
differences = -differences
pred = -pred
norm = np.sqrt(sua1.size * sua2.size)
# Plot the results if display=True
if display:
subplot = get_display(display)
if not subplot or not HAVE_PYLAB:
return differences, pred, norm
else:
# Plot the cross-correlation
try:
counts, bin_edges = np.histogram(differences, **kwargs)
edge_distances = np.diff(bin_edges)
bin_centers = bin_edges[1:] - edge_distances/2
counts = counts / norm
xlabel = "Time"
ylabel = "Cross-correlation coefficient"
#NOTE: the x axis corresponds to the upper edge of each bin
subplot.plot(bin_centers, counts, label='cross-correlation', color='b')
if predictor is None:
set_labels(subplot, xlabel, ylabel)
pylab.draw()
elif predictor is 'shuffle':
# Plot the predictor
norm_ = norm * n_pred
counts_, bin_edges_ = np.histogram(pred, **kwargs)
counts_ = counts_ / norm_
subplot.plot(bin_edges_[1:], counts_, label='predictor')
subplot.legend()
pylab.draw()
except ValueError:
print("There are no correlated events within the selected lag"\
" window of %s" % lag)
else:
return differences, pred, norm
def _dict_max(D):
"""For a dict containing numerical values, return the key for the
highest value. If there is more than one item with the same highest
value, return one of them (arbitrary - depends on the order produced
by the iterator).
"""
max_val = max(D.values())
for k in D:
if D[k] == max_val:
return k
def make_kernel(form, sigma, time_stamp_resolution, direction=1):
"""Creates kernel functions for convolution.
Constructs a numeric linear convolution kernel of basic shape to be used
for data smoothing (linear low pass filtering) and firing rate estimation
from single trial or trial-averaged spike trains.
Exponential and alpha kernels may also be used to represent postynaptic
currents / potentials in a linear (current-based) model.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_ [2]_.
Parameters
----------
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are aymmetric kernel forms and
assume optional parameter `direction`.
sigma : float
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution (in ms) of the kernel estimate
and makes different kernels comparable (cf. [1] for symetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
time_stamp_resolution : float
Temporal resolution of input and output in ms.
direction : {-1, 1}
Asymmetric kernels have two possible directions.
The values are -1 or 1, default is 1. The
definition here is that for direction = 1 the
kernel represents the impulse response function
of the linear filter. Default value is 1.
Returns
-------
kernel : array_like
Array of kernel. The length of this array is always an odd
number to represent symmetric kernels such that the center bin
coincides with the median of the numeric array, i.e for a
triangle, the maximum will be at the center bin with equal
number of bins to the right and to the left.
norm : float
For rate estimates. The kernel vector is normalized such that
the sum of all entries equals unity sum(kernel)=1. When
estimating rate functions from discrete spike data (0/1) the
additional parameter `norm` allows for the normalization to
rate in spikes per second.
For example:
``rate = norm * scipy.signal.lfilter(kernel, 1, spike_data)``
m_idx : int
Index of the numerically determined median (center of gravity)
of the kernel function.
Examples
--------
To obtain single trial rate function of trial one should use::
r = norm * scipy.signal.fftconvolve(sua, kernel)
To obtain trial-averaged spike train one should use::
r_avg = norm * scipy.signal.fftconvolve(sua, np.mean(X,1))
where `X` is an array of shape `(l,n)`, `n` is the number of trials and
`l` is the length of each trial.
See also
--------
SpikeTrain.instantaneous_rate
SpikeList.averaged_instantaneous_rate
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
.. [2] Nawrot M, Aertsen A, Rotter S, "Single-trial estimation of neuronal
firing rates - from single neuron spike trains to population activity";
J. Neurosci Meth 94: 81-92; 1999.
"""
assert form.upper() in ('BOX','TRI','GAU','EPA','EXP','ALP'), "form must \
be one of either 'BOX','TRI','GAU','EPA','EXP' or 'ALP'!"
assert direction in (1,-1), "direction must be either 1 or -1"
SI_sigma = sigma / 1000. #convert to SI units (ms -> s)
SI_time_stamp_resolution = time_stamp_resolution / 1000. #convert to SI units (ms -> s)
norm = 1./SI_time_stamp_resolution
if form.upper() == 'BOX':
w = 2.0 * SI_sigma * np.sqrt(3)
width = 2 * np.floor(w / 2.0 / SI_time_stamp_resolution) + 1 # always odd number of bins
height = 1. / width
kernel = np.ones((1, width)) * height # area = 1
elif form.upper() == 'TRI':
w = 2 * SI_sigma * np.sqrt(6)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
trileft = np.arange(1, halfwidth + 2)
triright = np.arange(halfwidth, 0, -1) # odd number of bins
triangle = np.append(trileft, triright)
kernel = triangle / triangle.sum() # area = 1
elif form.upper() == 'EPA':
w = 2.0 * SI_sigma * np.sqrt(5)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
base = np.arange(-halfwidth, halfwidth + 1)
parabula = base**2
epanech = parabula.max() - parabula # inverse parabula
kernel = epanech / epanech.sum() # area = 1
elif form.upper() == 'GAU':
w = 2.0 * SI_sigma * 2.7 # > 99% of distribution weight
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution) # always odd
base = np.arange(-halfwidth, halfwidth + 1) * SI_time_stamp_resolution
g = np.exp(-(base**2) / 2.0 / SI_sigma**2) / SI_sigma / np.sqrt(2.0 * np.pi)
kernel = g / g.sum()
elif form.upper() == 'ALP':
w = 5.0 * SI_sigma
alpha = np.arange(1, (2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) + 1) * SI_time_stamp_resolution
alpha = (2.0 / SI_sigma**2) * alpha * np.exp(-alpha * np.sqrt(2) / SI_sigma)
kernel = alpha / alpha.sum() # normalization
if direction == -1:
kernel = np.flipud(kernel)
elif form.upper() == 'EXP':
w = 5.0 * SI_sigma
expo = np.arange(1, (2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) + 1) * SI_time_stamp_resolution
expo = np.exp(-expo / SI_sigma)
kernel = expo / expo.sum()
if direction == -1:
kernel = np.flipud(kernel)
kernel = kernel.ravel()
m_idx = np.nonzero(kernel.cumsum() >= 0.5)[0].min()
return kernel, norm, m_idx
def simple_frequency_spectrum(x):
"""Simple frequency spectrum.
Very simple calculation of frequency spectrum with no detrending,
windowing, etc, just the first half (positive frequency components) of
abs(fft(x))
Parameters
----------
x : array_like
The input array, in the time-domain.
Returns
-------
spec : array_like
The frequency spectrum of `x`.
"""
spec = np.absolute(np.fft.fft(x))
spec = spec[:len(x) / 2] # take positive frequency components
spec /= len(x) # normalize
spec *= 2.0 # to get amplitudes of sine components, need to multiply by 2
spec[0] /= 2.0 # except for the dc component
return spec
class TuningCurve(object):
"""Class to facilitate working with tuning curves."""
def __init__(self, D=None):
"""
If `D` is a dict, it is used to give initial values to the tuning curve.
"""
self._tuning_curves = {}
self._counts = {}
if D is not None:
for k,v in D.items():
self._tuning_curves[k] = [v]
self._counts[k] = 1
self.n = 1
else:
self.n = 0
def add(self, D):
for k,v in D.items():
self._tuning_curves[k].append(v)
self._counts[k] += 1
self.n += 1
def __getitem__(self, i):
D = {}
for k,v in self._tuning_curves[k].items():
D[k] = v[i]
return D
def __repr__(self):
return "TuningCurve: %s" % self._tuning_curves
def stats(self):
"""Return the mean tuning curve with stderrs."""
mean = {}
stderr = {}
n = self.n
for k in self._tuning_curves.keys():
arr = np.array(self._tuning_curves[k])
mean[k] = arr.mean()
stderr[k] = arr.std()*n/(n-1)/np.sqrt(n)
return mean, stderr
def max(self):
"""Return the key of the max value and the max value."""
k = _dict_max(self._tuning_curves)
return k, self._tuning_curves[k]
| true |
fccac36a133fe485f4c6b80dfc85d9bb0d75f188 | Python | WustAnt/Python-Algorithm | /Chapter3/3.3/3.3.6/3-6baseConverter.py | UTF-8 | 1,098 | 3.921875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2020/8/1 11:52
# @Author : WuatAnt
# @File : 3-6baseConverter.py
# @Project : Python数据结构与算法分析
from stack import Stack
"""
十进制数转换任意进制数:
decNumber:接受任意非负整数
base:要转换进制数
使用‘除以N’算法,待处理整数大于0,循环不停地进行十进制除以N,并记录余数
对应的N进制数,为余数,第一个余数是最后一位
"""
def baseConverter(decNumber,base):
digits = '0123456789ABCDEF' #十六进制使用十位数字及六个字母来表示,创建digits来对应相应字符
remstack = Stack() #创建一个栈用于保存余数,利用其反转特性,得到二进制数
while decNumber>0:
rem = decNumber % base #取余
remstack.push(rem)
decNumber = decNumber//base
newString = ''
while not remstack.isEmpty():
newString = newString + digits[remstack.pop()]
return newString
if __name__ == '__main__':
decNumber = 12
print(decNumber,'-->',baseConverter(decNumber,16)) | true |
5ee277f18cbfac9784e9029f3e68f1925cf426b2 | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4079/codes/1668_1396.py | UTF-8 | 160 | 2.953125 | 3 | [] | no_license | conta_restaurante=float(input("valor))
if (gorjeta<=300):
print(gorjeta-round(gorjeta*0.10))
else:
gorjeta(gorjeta-(gorjeta*0.06)
print(conta_restaurante,2) | true |
80434fcce27977f74e0c42f2e88aa9b7c4e4d9f3 | Python | jain-abhinav/news_clustering_nlp | /news_analysis.py | UTF-8 | 4,348 | 3 | 3 | [] | no_license | #text clustering LDA
#text processing
#visualizations
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import lda
import logging
logging.getLogger("lda").setLevel(logging.WARNING)
from sklearn.manifold import TSNE
import numpy as np
import bokeh.plotting as bp
from bokeh.io import output_notebook
from bokeh.resources import INLINE
from bokeh.models import HoverTool, BoxSelectTool
from bokeh.plotting import figure, show, output_file
news = pd.read_csv("news.csv")
#print(news.head())
news = news.drop_duplicates("description")
news = news[~news["description"].isnull()]
news = news[~news["description"].apply(lambda x: len(x.split(" ")) < 10)] #Dropping articles with description less than 10 words
news.reset_index(inplace=True, drop=True) #Reset index
print(news.shape)
#PLotting distribution of news description lengths
plt.xlabel("Length")
plt.ylabel("Number of News Descriptions")
plt.title("Distribution of Description Lengths")
plt.show(news.description.map(len).hist(figsize = (15, 5), bins = 100))
#Removing stop words. Tokenizing. Calculating each token count, retaining those with count >= 5. Calculating TfIDF scores
count_vect = CountVectorizer(min_df=5, analyzer='word', stop_words = "english", ngram_range = (1, 2))
news_token_matrix = count_vect.fit_transform(news["description"])
tfidf_transformer = TfidfTransformer()
news_tfidf_matrix = tfidf_transformer.fit_transform(news_token_matrix)
#Plotting distribution of TfIdf scores
tfidf = dict(zip(count_vect.get_feature_names(), tfidf_transformer.idf_))
tfidf = pd.DataFrame(columns=['tfidf']).from_dict(dict(tfidf), orient='index')
tfidf.columns = ['tfidf']
plt.xlabel("TfIDF Scores")
plt.ylabel("Number of News Descriptions")
plt.title("Distribution of TfIDF Scores")
plt.show(tfidf.tfidf.hist(bins=25, figsize=(15,7)))
#Creating word cloud
def plot_word_cloud(terms, category):
text = terms.index
text = ' '.join(list(text))
# lower max_font_size
wordcloud = WordCloud(max_font_size=40).generate(text)
plt.figure(figsize=(25, 25))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.title("Words with {} TfIdf Scores.".format(category))
plt.show()
#Lowest TfIDF scores
plot_word_cloud(tfidf.sort_values(by=['tfidf'], ascending=True).head(40), "Lowest")
#Highest TfIDF scores
plot_word_cloud(tfidf.sort_values(by=['tfidf'], ascending=False).head(40), "Highest")
n_topics = 10
n_iter = 2000
lda_model = lda.LDA(n_topics=n_topics, n_iter=n_iter)
X_topics = lda_model.fit_transform(news_token_matrix)
n_top_words = 20
topic_summaries = []
topic_word = lda_model.topic_word_ # get the topic words
vocab = count_vect.get_feature_names()
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]
topic_summaries.append(' '.join(topic_words))
print('Topic {}: {}'.format(i, ' '.join(topic_words)))
#dimensionality reduction
tsne_model = TSNE(n_components=2, verbose=1, random_state=0)
tsne_lda = tsne_model.fit_transform(X_topics)
doc_topic = lda_model.doc_topic_
lda_keys = []
for i, tweet in enumerate(news['description']):
lda_keys += [doc_topic[i].argmax()]
colormap = np.array(["#6d8dca", "#69de53", "#723bca", "#c3e14c", "#c84dc9", "#68af4e", "#6e6cd5",
"#e3be38", "#4e2d7c", "#5fdfa8", "#d34690", "#3f6d31", "#d44427", "#7fcdd8", "#cb4053", "#5e9981",
"#803a62", "#9b9e39", "#c88cca", "#e1c37b", "#34223b", "#bdd8a3", "#6e3326", "#cfbdce", "#d07d3c",
"#52697d", "#7d6d33", "#d27c88", "#36422b", "#b68f79"])
plot_lda = bp.figure(plot_width=700, plot_height=600, title="LDA topic visualization",
tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave",
x_axis_type=None, y_axis_type=None, min_border=1)
lda_df = pd.DataFrame(tsne_lda, columns=['x','y'])
lda_df['description'] = news['description']
lda_df['category'] = news['category']
lda_df['topic'] = lda_keys
lda_df['topic'] = lda_df['topic'].map(int)
lda_df["colors"] = colormap[lda_keys]
plot_lda.scatter(source=lda_df, x='x', y='y', color= "colors")
hover = plot_lda.select(dict(type=HoverTool))
hover.tooltips={"description":"@description", "topic":"@topic", "category":"@category"}
show(plot_lda)
| true |
eca76103cafd3f03acadad15a397edc07e5b9322 | Python | pengyuhou/git_test1 | /leetcode/整数反转.py | UTF-8 | 564 | 3.28125 | 3 | [] | no_license | x=1534236469
x1=2**31+1
print(x1)
class Solution(object):
def reverse(self, x):
if x<2**31-1 and x>(-2)**31:
if x<0:
x=-x
x = str(x)
x = list(x)
x.reverse()
x = "".join(x)
ret="-"+x
return int(ret)
else:
x=str(x)
x=list(x)
x.reverse()
x="".join(x)
return int(x)
else:
return 0
s=Solution()
print((s.reverse(x)))
| true |
7f61ee4d5e9039d3588138b16dc2cf83afa71c66 | Python | t77165330/260201017 | /lab6/example2.py | UTF-8 | 380 | 3.625 | 4 | [] | no_license | a = int(input("How many students : "))
d = []
for i in range(a):
print(i + 1, ". student")
x = []
b = input("Enter the grades by splitting with (,): ").split(",")
for c in range(3):
if c == 1:
x.append(int(b[c]) *40/100)
else:
x.append(int(b[c]) *30/100)
a = 0
for k in x:
a = a + k
d.append(a)
print(d) | true |
2b3c226d00328086567c1b86175a9a373b99ddc9 | Python | mihaiconstantin/tesc-publications | /lib/TescPerf/tescworkers.py | UTF-8 | 5,504 | 2.734375 | 3 | [] | no_license | from threading import Thread
from queue import Queue
from time import time
# from teschelpers import UrlFetcher, UrlBuilder
from lib.TescPerf.teschelpers import UrlFetcher, UrlBuilder
# Links.
class LinkWorker(Thread):
'''Extracts the links of a search query.'''
all_links = []
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
# While there are jobs in the queue.
while True:
# Get the work (i.e., the page URL) from the queue.
page_url = self.queue.get()
# Extract the links.
LinkWorker.extract_page_links(UrlFetcher(page_url).soup)
# Mark job as completed.
self.queue.task_done()
@classmethod
def extract_page_links(cls, soup):
"""Extracts the article links on a page.
Args:
soup (BeautifulSoup): BeautifulSoup object from the request content.
"""
papers = soup.find_all('li', {'class': 'portal_list_item'})
for paper in papers:
cls.all_links.append(paper.find('h2', class_='title').find('a')['href'])
print('\t- ' + paper.find('h2', class_='title').find('a')['href'][-43:])
@staticmethod
def extract_all_links(start, end, search):
"""Extracts the article links on all pages for a URL query in a multi-threaded fashion.
Args:
start (int): Year to start searching from.
end (int): Year to end searching at.
search (string): Search query.
"""
time_start = time()
url = UrlBuilder(start, end, search, 0).url
metadata = UrlFetcher(url).metadata
queue = Queue()
print('\nFrom year: %s' % str(start))
print('To year: %s' % str(end))
print('Searching for: %s' % str(search))
print('\nQuery executed: %s' % url)
print('\nIdentified %s candidate links distributed across %s page(s).' % (str(metadata[0]), str(metadata[1])))
print('\nStarting the extraction...')
for page in range(metadata[1]):
worker = LinkWorker(queue)
worker.daemon = True
worker.start()
for page in range(metadata[1]):
queue.put(UrlBuilder(start, end, search, page).url)
queue.join()
print('Extraction completed...')
print('\nFound: %s links.' % len(LinkWorker.all_links))
print('\nTook: %s seconds.\n' % str(time() - time_start))
# Paper data.
class PaperDataWorker(Thread):
'''Extracts the paper data.'''
all_papers = []
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work (i.e., the paper URL) from the queue.
paper_url = self.queue.get()
# Extract the paper data.
PaperDataWorker.extract_paper_data(UrlFetcher(paper_url))
# Mark job as completed.
self.queue.task_done()
@classmethod
def extract_paper_data(cls, url_fetcher):
"""Extracts data for a paper of type article.
Args:
url_fetcher (UrlFetcher): UrlFetcher object.
"""
# Get the HTML soup.
soup = url_fetcher.soup
# Determine if the paper is a scientific article.
article = cls.is_article(soup.find('div', class_='view_title').find('p', class_='type').find('span', class_='type_classification').text)
if article:
ctx_title = soup.find('div', class_='view_title')
ctx_body = soup.find('div', class_='view_body')
paper_data = {'tesc_authors': [], 'external_authors': []}
# URL.
paper_data['url'] = url_fetcher.url
# Title.
try:
paper_data['title'] = ctx_title.find('h2', class_='title').text
except:
paper_data['title'] = 'Error: title.'
# Abstract.
try:
paper_data['abstract'] = ctx_body.find('div', class_='abstract').text
except:
paper_data['abstract'] = 'Error: abstract.'
# DOI.
try:
paper_data['doi'] = ctx_body.find('div', class_='rendering_contributiontojournal_versioneddocumentandlinkextensionanddoiportal').find('ul', class_='digital_object_identifiers').find('li', class_='available').find('a').text.strip()
except:
paper_data['doi'] = 'Error: DOI.'
# Authors.
try:
authors = ctx_body.find('div', class_='rendering_associatesauthorsclassifiedlistportal').find('ul', class_='persons').find_all('li')
for author in authors:
if author.find('a') is not None:
paper_data['tesc_authors'].append({
'name' : author.find('a').text,
'link' : author.find('a')['href']
})
else:
paper_data['external_authors'].append(author.text)
except:
paper_data['tesc_authors'] = 'Error: TESC authors.'
paper_data['external_authors'] = 'Error: external authors.'
# Append the paper data if it was an article.
cls.all_papers.append(paper_data)
print('\t- For paper: %s' % str(paper_data['title']))
@staticmethod
def extract_all_paper_data(all_links):
"""Extract the data for all papers in the list in a multi-threaded fashion.
Args:
all_links (list): A list of URLs.
"""
time_start = time()
queue = Queue()
print('\nStarting extracting the paper data...')
for thread in range(20):
worker = PaperDataWorker(queue)
worker.daemon = True
worker.start()
for paper_link in all_links:
queue.put(paper_link)
queue.join()
print('Extraction completed...')
print('\nFound: %s papers of type scientific article.' % len(PaperDataWorker.all_papers))
print('\nTook: %s seconds.' % str(time() - time_start))
print('\nDone with all.')
@staticmethod
def is_article(category):
"""Checks if a paper is an article.
Args:
category (string): The paper category.
Returns:
bool: True if the paper is an article, false otherwise.
"""
if category == 'Article':
return True
return False
| true |
a8e72b594eb8697ce0a40bd12444e3d89a76cdd8 | Python | KaimingWan/Python_Learning | /os_homework.py | UTF-8 | 3,761 | 3.453125 | 3 | [] | no_license | __author__ = 'Kaiming'
import os
import pdb
class IO_dir(object):
flag = False # 类变量,用于在类全局内保存是否找到相应的文件
def dir_l(self):
'用于显示当前目录下所有文件和目录'
list_all = os.listdir() # listdir包括所有文件和目录,不加任何参数默认是当前目录下
print('当前目录下的所有目录如下:')
# 这里os.path.isdir不需要join,因为当前目录本来就有x
list_dirs = [x for x in list_all if os.path.isdir(x)]
print(list_dirs)
print('当前目录下的所有文件如下:')
list_files = [x for x in list_all if os.path.isfile(x)]
print(list_files)
def search_file(self, file_name, path):
'用于在当前目录以及子目录下搜索相关的文件,并打印出它的路径,如果当前目录找到了,则不再进子目录寻找'
file_list = [] # 如果是文件就直接放入文件list
dir_list = [] # 如果是目录就直接放入目录list
for x in os.listdir(path):
# !!!这一句非常重要,因为isfile的判断需要完整的路径名,如果不加这句,isfile的参数只是单纯的一个名字,就会全部返回False
# pdb.set_trace() #调试
fullpath = os.path.join(path, x) # path方法和x的名字连接在一起称为一个完整的路径
if os.path.isfile(fullpath):
file_list.append(x)
else:
dir_list.append(x)
if file_name not in file_list:
if len(dir_list) == 0:
pass # 如果当前目录找不到,并且也没子目录了,就可以到函数末尾了,不需要修改flag的值
else: # 当前目录没找到,子目录中寻找
for child_dir in dir_list: #在每个dir_list中寻找
if child_dir == '__pycache__': # __pycache__是代码产生的二进制文件信息,因此不考虑对其进行搜索
return False
# 更新最新的路径,将要查找的子目录更新到child_path,切勿join到path,否则path目录下其他目录就无法被遍历。因为for循环每次都执行path.join。
child_path = os.path.join(path, child_dir)
self.search_file(file_name, child_path)
else: # 如果找到文件
print('[' + file_name + ']已经找到!')
print('[' + file_name + ']的相对路径是:' + os.path.join(path, file_name))
self.flag = True
return self.flag
def input_str(self):
'用于接收用户的输入'
print('请输入操作命令:')
ops = str(input())
return ops
t = IO_dir() # 创建类实例
print('欢迎使用简易目录文件查看系统,退出系统请输入:exit')
print('------------------------------------------帮助说明------------------------------------------------')
print('(1)dir -l:查看当前目录下(执行该代码处)所有文件和目录')
print('(2)输入名字,会直接在当前目录下以及所有子目录下查找文件名包含指定字符串的文件(只搜索一个),并打印出相对路径,不支持搜索目录!')
print('------------------------------------------END----------------------------------------------------')
ops = t.input_str()
while ops != 'exit':
if ops == 'dir -l':
t.dir_l()
else:
flag = t.search_file(ops, '.') # 其他输入内容均看成是查找
if flag is False:
print('很遗憾,没有找到相应的文件!')
ops = t.input_str()
print('感谢使用,再见!')
| true |
08940d86eb8b09534defeae14c1b946c5d15d90c | Python | swansong/labgeeksrpg | /pythia/test/page_test.py | UTF-8 | 3,135 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | """
Tests creation and editing of pages
"""
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from pythia.models import *
import datetime
import pdb
class PageTestCase(TestCase):
def setUp(self):
'''
PREP!
'''
self.dawg = User.objects.create_user('Dawg', 'dawg@test.com', 'pass')
self.dawg.save()
self.writer = User.objects.create_user('Writer', 'writer@test.com', 'pass')
self.editor = User.objects.create_user('Editor', 'editor@test.com', 'pass')
page = ContentType.objects.get_for_model(Page)
add_page = Permission.objects.get(content_type=page, codename='add_page')
edit_page = Permission.objects.get(content_type=page, codename='change_page')
self.writer.user_permissions.add(add_page)
self.writer.save()
self.editor.user_permissions.add(add_page, edit_page)
self.editor.save()
hello = Page.objects.create(name='Hello', slug='hello', content='empty', date=datetime.date.today(), author=self.writer)
RevisionHistory.objects.create(after='empty', user=self.writer, date=datetime.date.today(), page=hello, notes='initial')
def testPageCreation(self):
client = Client()
client.login(username='Dawg', password='pass')
resp = client.get('/pythia/create_page/')
self.assertContains(resp, 'Without your space helmet')
client.logout()
client.login(username='Writer', password='pass')
resp = client.get('/pythia/create_page/')
self.assertContains(resp, 'Create Page')
resp = client.post('/pythia/None/edit/', {'content': 'I am a wee babby wiki page', 'notes': 'inintial page creation', 'page_name': "I'm a page!"})
self.assertEqual(resp.status_code, 302) # will be a redirect if successful
resp = client.get('/pythia/im-a-page/') # testing slugification along with page creation
self.assertEqual(resp.status_code, 200)
client.logout()
def testPageEditing(self):
client = Client()
client.login(username='Dawg', password='pass')
resp = client.get('/pythia/hello/')
self.assertContains(resp, 'empty')
resp = client.get('/pythia/hello/edit/')
self.assertContains(resp, 'Without your space helmet')
client.logout()
client.login(username='Writer', password='pass')
resp = client.get('/pythia/hello/edit/')
self.assertContains(resp, 'Without your space helmet')
client.logout()
client.login(username='Editor', password='pass')
resp = client.get('/pythia/hello/edit/')
self.assertContains(resp, 'Edit Page')
resp = client.post('/pythia/hello/edit/', {'content': 'This is NOT an empty page. I swear', 'notes': 'not empty', 'page_name': 'hello'})
self.assertEqual(resp.status_code, 302) # will be a 'found' redirect
resp = client.get('/pythia/hello/')
self.assertContains(resp, 'This is NOT an empty page.')
| true |
7e6f893ad8a9095ce7d1374dfebd234e3ce0820b | Python | frdrkandersson/AdventOfCode2020 | /Day04/solution.py | UTF-8 | 1,499 | 2.765625 | 3 | [] | no_license | from os.path import abspath, dirname, join
import re
with open(abspath(join(dirname(__file__), 'input.txt')), 'r') as f:
data = f.read().split("\n\n")
data = [row.replace("\n", " ") for row in data]
data = [row.split() for row in data]
passports = [dict(pair.split(":") for pair in row) for row in data]
def fieldValidation(passport):
fields = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
return all(items in passport for items in fields)
def baseValidation(passports):
return [p for p in passports if fieldValidation(p)]
def isValidExtended(passport):
validations = {
"byr": lambda x: 1920 <= int(x) <= 2002,
"iyr": lambda x: 2010 <= int(x) <= 2020,
"eyr": lambda x: 2020 <= int(x) <= 2030,
"hgt": lambda x: int(x[:-2]) and (
(x[-2:] == "cm" and 150 <= int(x[:-2]) <= 193) or
(x[-2:] == "in" and 59 <= int(x[:-2]) <= 76)
),
"hcl": lambda x: re.fullmatch("#[0-9a-f]{6}", x),
"ecl": lambda x: re.fullmatch("amb|blu|brn|gry|grn|hzl|oth", x),
"pid": lambda x: int(x) and len(x) == 9,
}
for field, func in validations.items():
if field not in passport.keys():
continue
try:
if not func(passport[field]):
return 0
except:
return 0
return 1
def part1(passports):
return len(baseValidation(passports))
def part2(passports):
return sum(isValidExtended(p) for p in baseValidation(passports))
print(part1(passports))
print(part2(passports)) | true |
28bf362737a0b40e53c4bb35e99fd2179cd1af47 | Python | hlim1/delphi | /delphi/analysis/sensitivity/variance_methods.py | UTF-8 | 1,891 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | from abc import ABCMeta, abstractmethod
import inspect
from SALib.sample import saltelli
from SALib.analyze import sobol
import numpy as np
class VarianceAnalyzer(metaclass=ABCMeta):
"""
Meta-class for all variance based sensitivity analysis methods
"""
def __init__(self, model, prob_def=None):
self.has_samples = False
self.has_outputs = False
self.model = model
if prob_def is None:
sig = inspect.signature(self.model)
args = list(sig.parameters)
self.problem_definition = {
'num_vars': len(args),
'names': args,
'bounds': [[-100, 100] for arg in args]
}
else:
self.problem_definition = prob_def
def sample(self, num_samples=1000, second_order=True):
print("Sampling over parameter bounds")
self.samples = saltelli.sample(self.problem_definition,
num_samples,
calc_second_order=second_order)
self.has_samples = True
def evaluate(self):
if not self.has_samples:
raise RuntimeError("Attempted to evaluate model without samples")
print("Evaluating samples")
res = [self.model(*tuple([[a] for a in args])) for args in self.samples]
self.outputs = np.array(res)
self.has_outputs = True
@abstractmethod
def analyze(self):
if not self.has_outputs:
raise RuntimeError("Attempting analysis without outputs")
print("Collecting sensitivity indices")
class SobolAnalyzer(VarianceAnalyzer):
def __init__(self, model, prob_def=None):
super().__init__(model, prob_def=prob_def)
def analyze(self, **kwargs):
super().analyze()
return sobol.analyze(self.problem_definition, self.outputs, **kwargs)
| true |
2038006dbde7623062fa291f60ff22d7e2fa569b | Python | CarsonScott/Evolutionary-Logic-Learning | /src/neural_network.py | UTF-8 | 4,943 | 2.5625 | 3 | [] | no_license | from lib.relations import *
from lib.util import *
from pattern import *
import math
def multiply(X, Y):
return [X[i] * Y[i] for i in range(len(X))]
def subtract(X, Y):
return [X[i] - Y[i] for i in range(len(X))]
class NeuralNetwork(list):
def __init__(self, shape):
shape = shape
self.weights = []
self.biases = []
self.deltas = []
self.drives = []
self.lrate = 0.001
self.init(shape)
def init(self, shape):
i = 0
for i in range(len(shape)):
s = shape[i]
x = [0 for i in range(s)]
self.append(x)
self.deltas.append(x)
self.drives.append(x)
self.biases.append(x)
self.weights = []
for i in range(len(self)):
self.weights.append([])
for j in range(len(self[i])):
self.weights[i].append([])
for k in range(len(self[i-1])):
self.weights[i][j].append(rr(100)/100)
def compute_outputs(self, level, X):
B = self.biases[level]
Y = []
for i in range(len(self[level])):
b = self.biases[level][i]
y = 0
for j in range(len(self[level-1])):
w = self.weights[level][i][j]
y += self[level-1][j] * w
Y.append(self.activation(y + b))
return Y
def compute_biases(self):
B = [[-1 for j in range(len(self[i]))] for i in range(len(self))]
for level in range(len(self.weights)-1):
for i in range(len(self[level+1])):
d = self.drives[level+1][i]
y = self[level+1][i]
W = self.weights[level+1][i]
for j in range(len(self[level])):
w = self.weights[level+1][i][j]
if level == 0:
x = 1
else:
x = self[level][j]
g = self.drives[level+1][i]
if y != 0:
# if g*w != 0:
derivative = d*y*w#(g * w / y) * d
B[level][j] += derivative
for i in range(len(B)):
for j in range(len(B[i])):
B[i][j] = math.tanh(B[i][j])
self.biases = B
return B
def train(self, reward):
for level in range(1, len(self)):
total_output = sum(self[level])
W = []
for i in range(len(self.weights[level])):
W.append([])
for j in range(len(self.weights[level][i])):
gi = self.drives[level][i]
gj = self.drives[level-1][j]
yi = self[level][i]
yj = self[level-1][j]
wij = self.weights[level][i][j]
xj = yj * wij
if gj != 0:
yj *= gj
dy = yi / total_output
if reward != 0:
dg = dy / reward * self.lrate
W[i].append(gi + dg)
self.drives = W
def compute_drives(self, level):
for i in range(len(self.weights[level])):
gi = 0
for j in range(len(self.weights[level][i])):
gi += self.drives[level-1][j] * self.weights[level][i][j]
self.drives[level][i] = math.tanh(gi)# += #math.tanh(gi)
def compute_weights(self):
W = self.weights
for level in range(len(self.weights)):
for i in range(len(self[level])):
Y = []
gi = 0
for j in range(len(self[level-1])):
yi = self[level][i]
yj = self[level-1][j]
dyi = self.deltas[level][i]
dyj = self.deltas[level-1][j]
gj = self.drives[level-1][j]
dw = dyi * dyj
if gj != 0: dw *= gj
W[level][i][j] += dw * self.lrate
self.weights = W
def activation(self, x):
return math.tanh(x)
def compute_deltas(self, level, V):
X = self[level]
D = []
for i in range(len(V)):
v = V[i]
x = X[i]
d = v-x
D.append(d)
return D
def compute(self, X):
self.compute_biases()
self.compute_weights()
Y = []
B = self.biases[0]
for i in range(len(B)):
self[0][i] = X[i] + B[i]
for i in range(1, len(self)):
X = self[i-1]
y = self.compute_outputs(i, X)
d = self.compute_deltas(i, y)
self[i] = x
self.deltas[i] = d
Y = y
return Y
# network = NeuralNetwork([3, 3, 3])
# [0, 0, 0, 1]
# [0, 0, 0, 2]
# [0, 0, 0,-1]
# [0, 0, 0, 1]
nn=NeuralNetwork([6, 5, 4])
#
X = [[1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 1],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]
Y = []
log = open('log.txt', 'w')
c = -1
rewards = [[1, -1, -1, -1, -1, -1,],
[-1, 1, -1, -1, -1, -1,] ,
[-1, -1, 1, -1, -1, -1,] ,
[-1, -1, 1, -1, -1, -1,] ,
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, 1, -1, -1,] ,
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, 1, -1, -1,] ,
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1]]
for i in range(10000):
for j in range(len(X)):
x = X[j]
c += 1
nn.drives[0] = rewards[j]
Y = nn.compute(x)
Y = reverse(sort(Y))
# index = round(len(Y) * j/len(X))
# y=sort(Y)
# index = y.index(index) / len(y)
# reward = index -0.5
# nn.train(reward)
# R = rewards[j]
# for l in range(len(R)):
# R[l] *= x[l]
# r = sum(R)
# nn.train(r)
# if j == 0:
# if sort(Y)[0] == 0:
# nn.train(1)
# else:
# nn.train(-1)
s = ''
# if j == 1:
# Y = sort(Y)
for k in range(len(Y)):
y = Y[k]
s += str(y) + ' '
print(s)
log.write(str(c) + ' ' + s + '\n')
print() | true |
c166d67d162e22d5171c25d88219664f17a0be6f | Python | hildebrando001/Finance | /RealTimeStock/hb_platform.py | UTF-8 | 2,931 | 2.890625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.gridspec import GridSpec # split screen into grids
import matplotlib.ticker as mticker
import datetime
import math
fig = plt.figure()
fig.patch.set_facecolor('#121416')
gs = fig.add_gridspec(6,6) # Screen divided into 6x6 frames
ax1 = fig.add_subplot(gs[0:4, 0:4])
ax2 = fig.add_subplot(gs[0, 4:6])
ax3 = fig.add_subplot(gs[1, 4:6])
ax4 = fig.add_subplot(gs[2, 4:6])
ax5 = fig.add_subplot(gs[3, 4:6])
ax6 = fig.add_subplot(gs[4, 4:6])
ax7 = fig.add_subplot(gs[5, 4:6])
ax8 = fig.add_subplot(gs[4, 0:4])
ax9 = fig.add_subplot(gs[5, 0:4])
Stock = ['BRK-B', 'PYPL', 'TWTR', 'AAPL', 'AMZN', 'MSFT', 'FB']
# Make nice plot
def figure_design(ax):
ax.set_facecolor('#091217')
ax.tick_params(axis='both', labelsize=14, colors='white')
ax.ticklabel_format(useOffset=False)
ax.spines['bootom'].set_color('#808080')
ax.spines['top'].set_color('#808080')
ax.spines['left'].set_color('#808080')
ax.spines['right'].set_color('#808080')
# Convert strings to numbers
def string_to_number(df, column):
if isinstance(df.iloc[0, df.columns.get_loc(column)], str):
df[column] = df[column].str.replace(',', ' ')
df[column] = df[column].astype(float)
return df
# Read data (Open, High, Low, Cost) function
def read_data_ohlc(filename, stock_code, usecols):
df = pd.read_csv(filename, header=None, usecols=usecols,
names=['time', stock_code, 'change', 'volume', 'pattern','target'],
index_col = 'time', parse_dates['time'])
index_with_nan = df.index[df.isnull().any(axis=1)]
df.drop(index_with_nan, 0, inplace=True) # The 'zero' here means the rows that is going to drop
df.index = pd.DatetimeIndex(df.index)
# Convert these three columns into a floating number type
df = string_to_number(df, stock_code)
df = string_to_number(df, 'volume')
df = string_to_number(df, 'target')
latest_info = df.iloc[-1, :] # last line, all columns
latest_price = str(latest_info.iloc[0])
latest_change = str(latest_info.iloc[1])
df_vol = df['volume'].resample('1Min').mean() # resampling the data
# Move from generic df to Open, High, Low, Cost df
data = df[stock_code].resample('1Min').ohlc() #
data['time'] = data.index
data['time'] = pd.to_datetime(data['time'], format='%Y-%m-%d %H:%M:%S')
data['MA5'] = data['close'].rolling(5).mean()
data['MA10'] = data['close'].rolling(10).mean()
data['MA20'] = data['close'].rolling(20).mean()
data['volume_diff'] = df_vol.diff()
data[data['volume_diff']<0]=None
index_with_nan = data.index[data.isnull().any(axis=1)]
data.drop(index_with_nan, 0, inplace=True)
data.reset_index(drop=True, inplace=True)
reteurn data, latest_price, latest_change, df['pattern'][-1], df['target'][-1], df['volume'][-1]
| true |
2586811b137ecd17cab6f79ce1ff5774e85f9407 | Python | ZhangjlGIT/test_android_for_diamond | /public/Adb_devices.py | UTF-8 | 773 | 3.078125 | 3 | [] | no_license | # -*- coding:utf-8 _*-
"""
@author:zhangjianlang
@file: test.py
@time: 2019/9/16 20:20
"""
import os
def lookforDevices():
# popen返回文件对象,跟open操作一样
f = os.popen(r"adb devices", "r")
out = f.read()
f.close()
# print(out) # cmd输出结果
# 输出结果字符串处理
s = out.split("\n") # 切割换行
new = [x for x in s if x != ''] # 去掉空''
# print(new)
# 可能有多个手机设备
devices = [] # 获取设备名称
for i in new:
dev = i.split('\tdevice')
if len(dev) >= 2:
devices.append(dev[0])
if not devices:
print('{:#^20}'.format('没有手机连接'))
else:
print('{:#^20}'.format("已连接的手机:%s" % str(devices)))
| true |
206a256cb5c78763e3c19a0c0dc8d0b8d8b66fc7 | Python | yolo-forks/YOLOv3 | /prepare_data.py | UTF-8 | 3,485 | 3.421875 | 3 | [] | no_license | import os
import pandas as pd
from copy import copy
import numpy as np
import shutil
import argparse
def parse_my_csv(path_to_csv_file):
"""
This function reads all the annotations from the csv file and then create a dictionary
that stores these annotations.
The dictionary will have as a key the name of the image and as a value a list of detections.
params:
path_to_csv_file : the path to where your pascal annotations (should be a csv file).
"""
df = pd.read_csv(path_to_csv_file)
data = {}
for i in range(len(df.index)):
xmin = int(df.iloc[i]['xmin'])
ymin = int(df.iloc[i]['ymin'])
xmax = int(df.iloc[i]['xmax'])
ymax = int(df.iloc[i]['ymax'])
if df.iloc[i]['class'] == 'mask':
object_class = 0
else:
object_class = 1
if df.iloc[i]['filename'] in data.keys():
data[df.iloc[i]['filename']].append([xmin, ymin, xmax, ymax, object_class])
else:
data.update({df.iloc[i]['filename'] : [[xmin, ymin, xmax, ymax, object_class]]})
# data = {'image_name' : [[det1], [det2], ..]}
# det1 = xmin, ymin, xmax, ymax, object_class
return data
def modify_data(data, path_to_images, path_to_output):
"""
This function transforms annotations from pascal format to yolo format.
It also removes any spaces in the images names and create new images that
have names without spaces. This is done to make things consistent with
the way the training and evaluation code handle reading data.
params:
data: a dictionary that has as a key the name of the image and as value a list of
detections (xmin, ymin, xmax, ymax, class).
path_to_images: the path to where your images are stored.
path_to_output : the path to where the generated yolo annotations and also the images
without any spaces in their names.
"""
path_to_save_annotations = os.path.join(path_to_output, 'annotations.txt')
with open(path_to_save_annotations, 'a+') as f:
for img_name, detections in data.items():
# Copy and rename image
path_to_input_img = os.path.join(path_to_images, img_name)
name_without_spaces = img_name.replace(' ', '')
path_to_output_img = os.path.join(path_to_output, name_without_spaces)
shutil.copy(path_to_input_img, path_to_output_img)
# save detections in the new annotations file
f.write(f'{path_to_output_img} ')
for detection in detections:
xmin, ymin, xmax, ymax, c = detection
f.write(f'{xmin},{ymin},{xmax},{ymax},{c} ')
f.write('\n')
print('Done saving annotations!')
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path_to_images', help="path to where your images are stored")
parser.add_argument('--path_to_csv_annotations', help='full path to where your csv annotations file is.')
parser.add_argument('--path_to_save_output', help='path to where the output images and annotation file will be saved')
args = parser.parse_args()
path_to_images = args.path_to_images
path_csv_file = args.path_to_csv_annotations
data = parse_my_csv(path_csv_file)
output_path = args.path_to_save_output
if not os.path.isdir(output_path):
os.makedirs(output_path)
modify_data(data, path_to_images, output_path)
| true |
5f9a13c0c9fb4e0bbaf9c143449fc839be78f4c4 | Python | AmrKhalifa/Solutions-to-MIT-6.0002-Introduction-to-Computational-Thinking-and-Data-Science-assignments | /PS2/ps2.py | UTF-8 | 8,810 | 3.6875 | 4 | [] | no_license | # 6.0002 Problem Set 5
# Graph optimization
# Name:
# Collaborators:
# Time:
#
# Finding shortest paths through MIT buildings
#
import unittest
from graph import Digraph, Node, WeightedEdge
#
# Problem 2: Building up the Campus Map
#
# Problem 2a: Designing your graph
#
# What do the graph's nodes represent in this problem? What
# do the graph's edges represent? Where are the distances
# represented?
#
# Answer:
#
# Problem 2b: Implementing load_map
def load_map(map_filename):
"""
Parses the map file and constructs a directed graph
Parameters:
map_filename : name of the map file
Assumes:
Each entry in the map file consists of the following four positive
integers, separated by a blank space:
From To TotalDistance DistanceOutdoors
e.g.
32 76 54 23
This entry would become an edge from 32 to 76.
Returns:
a Digraph representing the map
"""
# TODO
print("Loading map from file...")
digrahp = Digraph()
with open (map_filename, 'r') as f:
for line in f.readlines():
line = line.split("\n")
edge_info = line[0].split(" ")
source_node = Node(edge_info[0])
destin_node = Node(edge_info[1])
edge = WeightedEdge(source_node, destin_node, int(edge_info[2]), int(edge_info[3]))
if not digrahp.has_node(source_node):
digrahp.add_node(source_node)
if not digrahp.has_node(destin_node):
digrahp.add_node(destin_node)
digrahp.add_edge(edge)
return digrahp
# Problem 2c: Testing load_map
# Include the lines used to test load_map below, but comment them out
#print(load_map('test_load_map.txt'))
#
# Problem 3: Finding the Shorest Path using Optimized Search Method
#
# Problem 3a: Objective function
#
# What is the objective function for this problem? What are the constraints?
#
# Answer:
#
# Problem 3b: Implement get_best_path
def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,
best_path):
pass
# Problem 3c: Implement directed_dfs
def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):
"""
Finds the shortest path from start to end using a directed depth-first
search. The total distance traveled on the path must not
exceed max_total_dist, and the distance spent outdoors on this path must
not exceed max_dist_outdoors.
Parameters:
digraph: Digraph instance
The graph on which to carry out the search
start: string
Building number at which to start
end: string
Building number at which to end
max_total_dist: int
Maximum total distance on a path
max_dist_outdoors: int
Maximum distance spent outdoors on a path
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings), [n_1, n_2, ..., n_k],
where there exists an edge from n_i to n_(i+1) in digraph,
for all 1 <= i < k
If there exists no path that satisfies max_total_dist and
max_dist_outdoors constraints, then raises a ValueError.
"""
# TODO
def printAllPathsUtil(graph, u, d, visited, path, paths):
# Mark the current node as visited and store in path
#visited[u]= True
visited.add(u[0])
path.append(u)
# If current vertex is same as destination, then print
# current path[]
if u[0] == d :
paths.append(list(path))
else:
# If current vertex is not destination
#Recur for all the vertices adjacent to this vertex
#print("node not found, recuring ...")
for i in graph.edges[u[0]]:
if not i.dest in visited:
printAllPathsUtil(graph, (i.dest, i), d, visited, path, paths)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited.remove(u[0])
def printAllPaths(graph, s, d):
# Mark all the vertices as not visited
visited = set([])
# Create an array to store paths
paths = []
path = []
x = 0
# Call the recursive helper function to print all paths
printAllPathsUtil(graph, (Node(s), x), Node(d), visited, path, paths)
return paths
def calcualte_path_total_dist(path):
total_dist = 0
for edge in path[1:]:
total_dist += edge[1].get_total_distance()
return total_dist
def calculate_path_outdoor_dist(path):
total_out_dist = 0
for edge in path[1:]:
total_out_dist += edge[1].get_outdoor_distance()
return total_out_dist
paths = printAllPaths(digraph, start, end)
full_path_info = []
for path in paths:
full_path_info.append((path, calcualte_path_total_dist(path), calculate_path_outdoor_dist(path)))
def get_best_path_from_sorted(paths, max_dist, max_outdoor_dist):
paths = sorted(paths, key = lambda x: x[1])
for path in paths:
if path[1] <= max_dist and path[2] <= max_outdoor_dist:
return list([str(x[0]) for x in path[0]])
else:
continue
raise ValueError
return (get_best_path_from_sorted(full_path_info, max_total_dist, max_dist_outdoors))
# ================================================================
# Begin tests -- you do not need to modify anything below this line
# ================================================================
class Ps2Test(unittest.TestCase):
LARGE_DIST = 99999
def setUp(self):
self.graph = load_map("mit_map.txt")
def test_load_map_basic(self):
self.assertTrue(isinstance(self.graph, Digraph))
self.assertEqual(len(self.graph.nodes), 37)
all_edges = []
for _, edges in self.graph.edges.items():
all_edges += edges # edges must be dict of node -> list of edges
all_edges = set(all_edges)
self.assertEqual(len(all_edges), 129)
def _print_path_description(self, start, end, total_dist, outdoor_dist):
constraint = ""
if outdoor_dist != Ps2Test.LARGE_DIST:
constraint = "without walking more than {}m outdoors".format(
outdoor_dist)
if total_dist != Ps2Test.LARGE_DIST:
if constraint:
constraint += ' or {}m total'.format(total_dist)
else:
constraint = "without walking more than {}m total".format(
total_dist)
print("------------------------")
print("Shortest path from Building {} to {} {}".format(
start, end, constraint))
def _test_path(self,
expectedPath,
total_dist=LARGE_DIST,
outdoor_dist=LARGE_DIST):
start, end = expectedPath[0], expectedPath[-1]
self._print_path_description(start, end, total_dist, outdoor_dist)
dfsPath = directed_dfs(self.graph, start, end, total_dist, outdoor_dist)
print("Expected: ", expectedPath)
print("DFS: ", dfsPath)
self.assertEqual(expectedPath, dfsPath)
def _test_impossible_path(self,
start,
end,
total_dist=LARGE_DIST,
outdoor_dist=LARGE_DIST):
self._print_path_description(start, end, total_dist, outdoor_dist)
with self.assertRaises(ValueError):
directed_dfs(self.graph, start, end, total_dist, outdoor_dist)
def test_path_one_step(self):
self._test_path(expectedPath=['32', '56'])
def test_path_no_outdoors(self):
self._test_path(
expectedPath=['32', '36', '26', '16', '56'], outdoor_dist=0)
def test_path_multi_step(self):
self._test_path(expectedPath=['2', '3', '7', '9'])
def test_path_multi_step_no_outdoors(self):
self._test_path(
expectedPath=['2', '4', '10', '13', '9'], outdoor_dist=0)
def test_path_multi_step2(self):
self._test_path(expectedPath=['1', '4', '12', '32'])
def test_path_multi_step_no_outdoors2(self):
self._test_path(
expectedPath=['1', '3', '10', '4', '12', '24', '34', '36', '32'],
outdoor_dist=0)
def test_impossible_path1(self):
self._test_impossible_path('8', '50', outdoor_dist=0)
def test_impossible_path2(self):
self._test_impossible_path('10', '32', total_dist=100)
if __name__ == "__main__":
unittest.main()
pass | true |
70ba4039c494a2b954302587ca0555276ef8e0de | Python | amankumarsinha/amankumarsinha.github.io | /dict.py | UTF-8 | 418 | 4.03125 | 4 | [] | no_license | #to represent real life data
#collection of data in key : value pair
user = {'name' : 'aman','age' : 20}
print(user)
print(type(user))
# 2 method
user1 = dict(name = 'aman',age = 20)
print(user1)
print(user1['name'])
# type of data store in dict
#---> number ,string, list,dict
userinfo = {
'name': 'aman',
'age': 34,
'fav': ['3 ididot','lig','ololo']
}
print(userinfo['fav']) | true |
f49e8afce7b6a89d82da36a8397f59c50e86f0e5 | Python | asfiowilma/rakbookoo-api | /author/models.py | UTF-8 | 762 | 2.78125 | 3 | [] | no_license | from django.db import models
from rest_framework import serializers
class Author(models.Model):
first_name = models.CharField(max_length=20)
middle_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
@property
def full_name(self):
"Returns the person's full name."
return f"{first_name} {middle_name} {last_name}"
@property
def cited_name(self):
"Returns the person's name in citation format."
return f"{last_name}, {first_name} {middle_name}"
def __str__(self):
return f"{self.full_name()}"
class AuthorSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Author
fields = ['first_name', 'middle_name', 'last_name'] | true |
fe3f75adbb651275870aa222e98903bcbf52188f | Python | Arkady-G/Practice-15-02 | /API response check list - 04-05.py | UTF-8 | 6,207 | 2.609375 | 3 | [] | no_license | import json
with open('json_example_QAP.json', encoding='utf8') as file: # Открываем файл
strfile = file.read()
templates_resp = json.loads(strfile)
# Список полей в шаблоне
fields_list = ['timestamp',
'referer',
'location',
'remoteHost',
'partyId',
'sessionId',
'pageViewId',
'eventType',
'item_id',
'item_price',
'item_url',
'basket_price',
'detectedDuplicate',
'detectedCorruption',
'firstInSession',
'userAgentName']
m = 1 # Создаем счетчик тестов
file_03 = open('output_tests.txt', 'w', encoding='UTF8') # запись результатов в новый файл
for templates in templates_resp:
k = 0 # Создаем счетчик ошибок в тестах
file_03.write(f'\nТест {m}\n')
for field in templates: # определяем лишние поля в ответе
extra_fields = []
if field not in fields_list:
extra_fields.append(field)
extra_fields = (', '.join(extra_fields))
file_03.write(f'False - В ответе присутствует лишнее поле - {extra_fields}\n')
k = k + 1
for missing_field in fields_list: # опредеяем отсутствующие поля в ответе
missing_fields = []
if missing_field not in templates:
missing_fields.append(missing_field)
missing_fields = (', '.join(missing_fields))
file_03.write(f'False - В ответе отсутствуют поля - {missing_fields}\n')
if 'timestamp' in templates and type(
templates['timestamp']) != int: # опредеяем соответствие ответа заданным условиям
file_03.write(f'False - Поле timestamp не является int\n')
k = k + 1
if 'referer' in templates and type(templates['referer']) != str:
file_03.write(f'False - Поле referer не является string\n')
k = k + 1
try:
if 'referer' in templates and not (
templates['referer'].startswith('https://') or templates['referer'].startswith('http://')):
file_03.write(f'False - Поле referer не является url\n')
k = k + 1
except:
None
if 'location' in templates and type(templates['location']) != str:
file_03.write(f'False - Поле location не является string\n')
k = k + 1
try:
if 'location' in templates and not (
templates['location'].startswith('https://') or templates['location'].startswith('http://')):
file_03.write(f'False - Поле location не является url\n')
k = k + 1
except:
None
if 'remoteHost' in templates and type(templates['remoteHost']) != str:
file_03.write(f'False - Поле remoteHost не является string\n')
k = k + 1
if 'partyId' in templates and type(templates['partyId']) != str:
file_03.write(f'False - Поле partyId не является string\n')
k = k + 1
if 'sessionId' in templates and type(templates['sessionId']) != str:
file_03.write(f'False - Поле sessionId не является string\n')
k = k + 1
if 'pageViewId' in templates and type(templates['pageViewId']) != str:
file_03.write(f'False - Поле pageViewId не является string\n')
k = k + 1
if 'eventType' in templates and type(templates['eventType']) != str:
file_03.write(f'False - Поле eventType не является string\n')
k = k + 1
if 'eventType' in templates and (
(templates['eventType']) != 'itemBuyEvent' and (templates['eventType']) != 'itemViewEvent'):
file_03.write(f'False - Поле eventType не является itemBuyEvent или itemViewEvent\n')
k = k + 1
if 'item_id' in templates and type(templates['item_id']) != str:
file_03.write(f'False - Поле item_id не является string\n')
k = k + 1
if 'item_price' in templates and type(templates['item_price']) != int:
file_03.write(f'False - Поле item_price не является int\n')
k = k + 1
if 'item_url' in templates and type(templates['item_url']) != str:
file_03.write(f'False - Поле item_url не является string\n')
k = k + 1
try:
if 'item_url' in templates and not (
templates['item_url'].startswith('https://') or templates['location'].startswith('http://')):
file_03.write(f'False - Поле item_url не является url\n')
k = k + 1
except:
None
if 'basket_price' in templates and type(templates['basket_price']) != str:
file_03.write(f'False - Поле basket_price не является string\n')
k = k + 1
if 'detectedDuplicate' in templates and type(templates['detectedDuplicate']) != bool:
file_03.write(f'False - Поле detectedDuplicate не является bool\n')
k = k + 1
if 'detectedCorruption' in templates and type(templates['detectedCorruption']) != bool:
file_03.write(f'False - Поле detectedCorruption не является bool\n')
k = k + 1
if 'firstInSession' in templates and type(templates['firstInSession']) != bool:
file_03.write(f'False - Поле firstInSession не является bool\n')
k = k + 1
if 'userAgentName' not in templates and type(templates['userAgentName']) != str:
file_03.write(f'False - Поле userAgentName не является string\n')
k = k + 1
if k != 0: # подсчитываем количество ошибок
file_03.write(f'Найдено {k} ошибок\n')
else:
file_03.write('PASS - Тест пройден!\n')
m = m + 1
file_03.close() # закрытие файла
| true |
e4b1897dbd0bad3aa46c8e885dfd875c870e2781 | Python | scottmries/eulerproject | /112.py | UTF-8 | 546 | 3.53125 | 4 | [] | no_license | ratio = 0.0
i = 100.0
bouncies = 0.0
def is_bouncy(n):
a_digit_increases = False
a_digit_decreases = False
for e, digit in enumerate(str(n)[:-1]):
if int(digit) > int(str(n)[e+1]):
a_digit_decreases = True
if int(digit) < int(str(n)[e+1]):
a_digit_increases = True
if a_digit_increases and a_digit_decreases:
return True
return False
print is_bouncy(155349)
while True:
if ratio < 0.99:
print int(i)
if is_bouncy(int(i)):
bouncies += 1.0
print i, ratio
ratio = bouncies/i
i += 1.0
else:
print i
quit()
| true |
3c7be769c5db141f4c8b0121093a3ef661a50273 | Python | awaddell77/Math-Python-Projects | /B_tree.py | UTF-8 | 3,044 | 3.34375 | 3 | [] | no_license | #bin tree
#technically it is a binary search tree
class B_tree:
def __init__(self, head):
self.head = head
def add(self, data):
root = self.head
if not root:
self.head = Node(data)
return
return self._add_help(data, root)
def _add_help(self, data, node):
#needs to control for insertions/duplicates
if not node:
return Node(data)
if data == node.data: return node
print(node.data)
#if not node.left and not node.right and data < node.data:
##node.left = Node(data)
#return node
#elif not node.left and not node.right and data> node.data:
#node.right = Node(data)
#return node
if data > node.data:
node.right =self._add_help(data, node.right)
return node
if data < node.data:
node.left = self._add_help(data, node.left)
return node
return node
def _travel(self, data, node):
print("NODE IS {0}".format(node.data))
if not node: return node
if data == node.data: return node
if data < node.data and node.left:
return self._travel(data, node.left)
elif data < node.data and not node.left:
return node
if data > node.data and node.right:
return self._travel(data, node.right)
elif data > node.data and not node.right:
return node
#return node
def find(self, data):
#if it cannot find a node with the given data it will return the last node traversed
node = self.head
return self._travel(data, node)
def remove(self, data):
root = self.head
self._remove_help(data, root)
return
def _remove_help(self, data, node):
if node.right.data == data and node.right.right:
node.right = node.right.right
node.right.left = node.right.left #this is wrong
return
#if data == node.data: return node
print(node.data)
#if not node.left and not node.right and data < node.data:
##node.left = Node(data)
#return node
#elif not node.left and not node.right and data> node.data:
#node.right = Node(data)
#return node
if data > node.data:
node.right =self._add_help(data, node.right)
return node
if data < node.data:
node.left = self._add_help(data, node.left)
return node
return node
def print_tree(self):
self._traverse(self.head, 1)
def _traverse(self, node, depth):
#inorder traversal (left, root, right)
if not node: return
#print("NODE IS {0}".format(node.data))
depth += 5
self._traverse(node.left, depth)
#operation goes here
print(str(node.data), end='')
self._traverse(node.right, depth)
#print((' ' * depth) + str(node.data), end='')
class Node:
def __init__(self,data):
self.data = data
self.left = ''
self.right = ''
def __str__(self):
return "D: " + str(self.data) +"[" + "L: "+str(self.left) + " R: "+ str(self.right) +"]"
def __repr__(self):
return self.__str__()
n1 = Node(10)
'''n2 = Node(1)
n3 = Node(3)
n4 = Node(4)
n1.right = n4
n1.left = n3
n3.left = n2'''
tst_tree = B_tree(n1)
tst_tree.add(20)
tst_tree.add(8)
tst_tree.add(4)
tst_tree.add(25)
tst_tree.add(13)
tst_tree.add(6)
tst_tree.add(3)
tst_tree.print_tree() | true |
5ebb6c67d7b864653421def66d0c4094f25d114f | Python | mugenZebra/MangaStyle | /manga_model.py | UTF-8 | 9,834 | 2.6875 | 3 | [] | no_license | import tensorflow as tf
def model(images, batch_size, classes, dropout):
"""Build the model
Args:
images: Tensor with image batch [batch_size, height, width, channels].
batch_size: Number of image of one batch.
classes: Number of classes.
dropout: Dropout probability, but does not use drop out in this model.
Returns:
softmax_linear: Tensor with the computed logits.
"""
# Convolution_layer1
with tf.variable_scope('convolution_layer1') as scope:
weights = tf.get_variable('weights',
shape = [3, 3, 3, 32],
dtype = tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biasrq12ges2',
shape=[32],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
conv_biases = tf.nn.bias_add(conv, biases)
conv_layer1 = tf.nn.relu(conv_biases, name = 'conv1')
# Maxpooling1_layer1
with tf.variable_scope('maxpooling1_layer1') as scope:
maxpool1 = tf.nn.max_pool(conv_layer1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='maxpooling1')
#maxpool1 = tf.nn.dropout(maxpool1, dropout)
# Convolution_layer2
with tf.variable_scope('convolution_layer2') as scope:
weights = tf.get_variable('weights',
shape=[3, 3, 32, 64],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[64],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(maxpool1, weights, strides=[1, 1, 1, 1], padding='SAME')
conv_biases = tf.nn.bias_add(conv, biases)
conv_layer2 = tf.nn.relu(conv_biases, name = 'conv2')
# Maxpooling1_layer2
with tf.variable_scope('maxpooling1_layer2' , reuse=True) as scope:
maxpool2 = tf.nn.max_pool(conv_layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME',name='maxpooling2')
#maxpool2 = tf.nn.dropout(maxpool2, dropout)
# Convolution_layer3
with tf.variable_scope('convolution_layer3') as scope:
weights = tf.get_variable('weights',
shape=[3, 3, 64, 128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(maxpool2, weights, strides=[1, 1, 1, 1], padding='SAME')
conv_biases = tf.nn.bias_add(conv, biases)
conv_layer3 = tf.nn.relu(conv_biases, name = 'conv3')
# Convolution_layer4
with tf.variable_scope('convolution_layer4') as scope:
weights = tf.get_variable('weights',
shape=[3, 3, 128, 128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(conv_layer3, weights, strides=[1,1,1,1],padding='SAME')
conv_biases = tf.nn.bias_add(conv, biases)
conv_layer4 = tf.nn.relu(conv_biases, name = 'conv4')
# Convolution_layer5
with tf.variable_scope('convolution_layer5') as scope:
weights = tf.get_variable('weights',
shape=[3, 3, 128, 256],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[256],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(conv_layer4, weights, strides=[1,1,1,1],padding='SAME')
conv_biases = tf.nn.bias_add(conv, biases)
conv_layer5 = tf.nn.relu(conv_biases, name = 'conv5')
# Convolution_layer6
with tf.variable_scope('convolution_layer6') as scope:
weights = tf.get_variable('weights',
shape=[3, 3, 256, 256],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[256],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(conv_layer5, weights, strides=[1, 1, 1, 1], padding='SAME')
conv_biases = tf.nn.bias_add(conv, biases)
conv_layer6 = tf.nn.relu(conv_biases, name = 'conv6')
# Maxpooling1_layer6
with tf.variable_scope('maxpooling1_layer6') as scope:
maxpool6 = tf.nn.max_pool(conv_layer6, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME',name='pooling6')
#maxpool6 = tf.nn.dropout(maxpool6, dropout)
# Fullconnected_layer7
with tf.variable_scope('fullconnected_layer7') as scope:
reshape = tf.reshape(maxpool6, shape=[batch_size, -1])
dim = reshape.get_shape()[1].value
weights = tf.get_variable('weights',
shape=[dim, 256],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[256],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
fullconnected_layer7 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name= "full7")
# Fullconnected_layer8
with tf.variable_scope('fullconnected_layer8') as scope:
weights = tf.get_variable('weights',
shape=[256, 256],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[256],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
fullconnected_layer8 = tf.nn.relu(tf.matmul(fullconnected_layer7, weights) + biases, name="full8")
# Softmax
with tf.variable_scope('softmax_linear') as scope:
weights = tf.get_variable('softmax_linear',
shape=[256, classes],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[classes],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
softmax_linear = tf.add(tf.matmul(fullconnected_layer8, weights), biases, name='softmax_linear')
return softmax_linear
def losses(logits, labels):
"""Compute loss from logits and labels.
Args:
logits: logits tensor [batch_size, label_of_predict]
labels: label tensor [label_of_groudtruth]
Returns:
loss: loss tensor
"""
with tf.variable_scope('loss') as scope:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=labels, name='cross_entropy_per_example')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope.name + '/loss', loss)
return loss
def trainning(loss, learning_rate):
"""Training ops.
Args:
loss: loss tensor, from losses()
learing_rate: learning rate of optimizer
Returns:
train_op: The op for trainning
"""
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step= global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate accurracy of predicting image of label.
Args:
logits: Logits tensor, [batch_size, label_of_predict].
labels: Labels tensor, [label_of_groudtruth].
Returns:
accuracy: Tensor with the number of examples that were predicted correctly.
"""
with tf.variable_scope('accuracy') as scope:
correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.cast(correct, tf.float16)
accuracy = tf.reduce_mean(correct)
tf.summary.scalar(scope.name+'/accuracy', accuracy)
return accuracy
| true |
f1e2e1fc212a722d2c8f568ed99a43e3e6b5be43 | Python | brainerazer/SustainRefer | /SRC/approximateBcalc.py | WINDOWS-1251 | 1,077 | 2.671875 | 3 | [] | no_license | #!/usr/local/bin/python3
import scipy.optimize
import numpy as np
from math import ceil, log, exp
import matplotlib.pyplot as plt
import sys
from matplotlib2tikz import save as tikz_save
data = np.genfromtxt(sys.argv[1], delimiter=',')
def B_func(p, c):
l = p
ll = np.log(l)
pw = np.multiply(np.power(l, 0.5), np.power(ll, 1 - 0.5))
e = np.exp(np.multiply(0.5, pw))
r = np.ceil(np.multiply(c, e))
return r
def R_func(p, c):
l = p
ll = np.log(l)
pw = np.multiply(np.power(l, 0.5), np.power(ll, 1 - 0.5))
e = np.exp(np.multiply(2, pw))
r = np.ceil(np.multiply(c, e))
return r
x = data[:, 1]
log_x = np.log(x)
y = data[:, 2]
print(log_x)
print(y)
b = scipy.optimize.curve_fit(B_func, log_x, y, bounds=(1,8), diff_step=0.01)
print(b)
points = plt.plot(data[:, 0], y, 'ro', label=' ')
plt.xlabel(", ")
plt.ylabel(" B")
plt.plot(data[:, 0], B_func(log_x, b[0]), label=' ')
plt.legend(loc='lower right')
tikz_save('figure.tex') | true |
c8427d92a682caaa90b2bf6ad1a67bbf5d03d9f3 | Python | suhasghorp/QuantFinanceBook | /PythonCodes/Chapter 14/Fig14_05.py | UTF-8 | 1,373 | 3.03125 | 3 | [] | no_license | #%%
"""
Created on Feb 10 2019
Ploting of the rates in positive and negative rate environment
@author: Lech A. Grzelak
"""
import numpy as np
import matplotlib.pyplot as plt
def mainCalculation():
time = np.linspace(0.1,30,50)
Rates2008 = [4.4420,4.4470,4.3310,4.2520,4.2200,4.2180,4.2990,4.3560,4.4000,\
4.4340,4.4620,4.4840,4.5030,4.5190,4.5330,4.5450,4.5550,4.5640,4.5720,\
4.5800,4.5860,4.5920,4.5980,4.6030,4.6070,4.6110,4.6150,4.6190,4.6220,\
4.6250,4.6280,4.6310,4.6340,4.6360,4.6380,4.6400,4.6420,4.6440,4.6460,4.6480,\
4.6490,4.6510,4.6520,4.6540,4.6550,4.6560,4.6580,4.6590,4.6600,4.6610]
Rates2017 = [-0.726,-0.754,-0.747,-0.712,-0.609,-0.495,-0.437,-0.374,-0.308,\
-0.242,-0.177,-0.113,-0.0510,0.00900,0.0640,0.115,0.163,0.208,0.250,0.288,\
0.323,0.356,0.386,0.414,0.439,0.461,0.482,0.501,0.519,0.535,0.550,0.564,\
0.577,0.588,0.598,0.608,0.617,0.625,0.632,0.640,0.646,0.652,0.658,0.663,\
0.668,0.673,0.678,0.682,0.686,0.690]
plt.figure(1)
plt.plot(time,Rates2008)
plt.grid()
plt.title('Interest Rates, EUR1M, 2008')
plt.xlabel('time in years')
plt.ylabel('yield')
plt.figure(2)
plt.plot(time,Rates2017)
plt.grid()
plt.title('Interest Rates, EUR1M, 2017')
plt.xlabel('time in years')
plt.ylabel('yield')
mainCalculation() | true |
01df016790f7b89565368e46246513d5bacab933 | Python | applecrumble123/ComputerVision | /Colour conversion and geometric transformations/Task1.2P.py | UTF-8 | 4,318 | 3.34375 | 3 | [] | no_license | import numpy as np
import cv2 as cv
img = cv.imread('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/img1.jpg')
#----------------------------- colour conversion ------------------------------------
# image img is represented in BGR (Blue-Green-Red) space by default
# convert img into HSV space
# HSV means Hue-Saturation-Value
# Hue is the color.
# Saturation is the greyness, so that a Saturation value near 0 means it is dull or grey looking.
# Value is the brightness of the pixel
img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
cv.imshow('image in HSV', img_hsv)
cv.waitKey(0)
cv.imwrite('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/imgHSV.jpg', img_hsv)
# close all the windows when any key is pressed
cv.destroyAllWindows()
# convert img into gray image
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('image in gray', img_gray)
cv.waitKey(0)
cv.imwrite('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/imgGRAY.jpg', img_gray)
cv.destroyAllWindows()
#----------------------------- scaling ------------------------------------
height, width = img.shape[:2]
# resize the image img by a horizontal scale of 0.5 and vertical scale of 0.4
h_scale = 0.5
v_scale = 0.4
# we need this as the new height must be interger
new_height = (int) (height * v_scale)
# we need this as the new width must be interger
new_width = (int) (width * h_scale)
img_resize = cv.resize(img, (new_width, new_height), interpolation = cv.INTER_LINEAR)
cv.imshow('resize', img_resize)
cv.waitKey(0)
cv.imwrite('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/imgSCALE.jpg', img_resize)
cv.destroyAllWindows()
#----------------------------- translation ------------------------------------
# shifts an image to a new location determined by a translation vector
t_x = 100
t_y = 200
M = np.float32([[1, 0, t_x], [0, 1, t_y]])
#this will get the number of rows and columns in img
height, width = img.shape[:2]
img_translation = cv.warpAffine(img, M, (width, height))
cv.imshow('translation', img_translation)
cv.waitKey(0)
cv.imwrite('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/imgTRANSLATION.jpg', img_translation)
cv.destroyAllWindows()
#----------------------------- rotation ------------------------------------
#rotate 45 degrees in anti-clockwise
# -45 to rotate in clockwise
theta = 45
# column index varies in [0, width-1]
c_x = (width - 1) / 2.0
# row index varies in [0, height-1]
c_y = (height - 1) / 2.0
# A point is defined by x and y coordinate
c = (c_x, c_y)
print(c)
# s is the scale, when no scaling is done, scale = 1
s= 1
M = cv.getRotationMatrix2D(c, theta, s)
img_rotation = cv.warpAffine(img, M, (width, height))
cv.imshow('rotation', img_rotation)
cv.waitKey(0)
cv.imwrite('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/imgROTATION.jpg', img_rotation)
cv.destroyAllWindows()
#----------------------------- Affine ------------------------------------
m00 = 0.38
m01 = 0.27
m02 = -47.18
m10 = -0.14
m11 = 0.75
m12 = 564.32
# transformation matrix
M = np.float32([[m00, m01, m02], [m10, m11, m12]])
height, width = img.shape[:2]
img_affine = cv.warpAffine(img, M, (width, height))
cv.imshow('affine', img_affine)
cv.waitKey(0)
cv.imwrite('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/imgAFFINE.jpg', img_affine)
cv.destroyAllWindows()
# using cv.warpAffine to replace cv.resize
# h_scale wrt to x-axis
# v_scale wrt to y-axis
M = np.float32([[h_scale, 0, 0], [0, v_scale, 0]])
img_replace_resize_with_affine = cv.warpAffine(img, M, (width, height))
cv.imshow('img_replace_resize_with_affine', img_replace_resize_with_affine)
cv.waitKey(0)
cv.imwrite('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 1/Task 1.2P/Resources_1.2/img_replace_resize_with_affine.jpg', img_replace_resize_with_affine)
cv.destroyAllWindows() | true |
635fbb48899aa4178ada327a86b09f1524d710aa | Python | capaulson/pyKriging | /examples/3d_Simple_Train.py | UTF-8 | 1,377 | 3.078125 | 3 | [
"MIT"
] | permissive | from __future__ import print_function
import pyKriging
from pyKriging.krige import kriging
from pyKriging.samplingplan import samplingplan
from pyKriging.testfunctions import testfunctions
# The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here
sp = samplingplan(3)
X = sp.optimallhc(30)
# Next, we define the problem we would like to solve
testfun = testfunctions().squared
y = testfun(X)
# Now that we have our initial data, we can create an instance of a kriging model
k = kriging(X, y, testfunction=testfun, testPoints=300)
# The model is then trained
k.train()
k.snapshot()
# It's typically beneficial to add additional points based on the results of the initial training
# The infill method can be used for this
# In this example, we will add nine points in three batches. The model gets trained after each stage
for i in range(10):
print(k.history['rsquared'][-1])
print('Infill iteration {0}'.format(i + 1))
infillPoints = k.infill(10)
# Evaluate the infill points and add them back to the Kriging model
for point in infillPoints:
print('Adding point {}'.format(point))
k.addPoint(point, testfun(point)[0])
# Retrain the model with the new points added in to the model
k.train()
k.snapshot()
# Once the training of the model is complete, we can plot the results
k.plot()
| true |
b961ddf6023da7670c1aa19d5f205988cb5ff922 | Python | surzioarmani/python_for_codingTest | /2020_Coding_Test/programmers_10_1.py | UTF-8 | 345 | 3.125 | 3 | [] | no_license | def solution(n):
answer = 0
first = n
left = []
while first > 3:
left.append(first % 3)
first = first // 3
left.append(first)
print(left)
n = 1
for i in range(len(left)):
answer += left[len(left)-1-i] * n
n *= 3
print(n)
print(answer)
return answer
| true |
c12a3c61535bf9a0ca438801662ceb2ee49df227 | Python | kokokong/ssu-tensorflow-lecture | /ML-lecture/2일차/Matplot/py file/Matplot02.py | UTF-8 | 510 | 2.78125 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import tensorflow as tf
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
filename = "도깨비.jpg"
image = mpimg.imread(filename)
print("Original size:" ,image.shape)
x = tf.Variable(image,name = 'x')
init = tf.global_variables_initializer()
with tf.Session() as sess:
x = tf.transpose(x,perm=[1,0,2])
sess.run(init)
result = sess.run(x)
print("changed size: ", result.shape)
plt.imshow(result)
plt.xticks([]),plt.yticks([])
plt.show()
# In[ ]:
| true |
268c895cec7413aaf5a722056c13ef4ac6fda8a6 | Python | adahya/APIServer | /API/modules.py | UTF-8 | 1,006 | 2.5625 | 3 | [] | no_license | from flask import jsonify
from configuration import Username_Policies
import re
class User(object):
username = None
password = None
session_id = None
def __init__(self,username):
self.username = username
@staticmethod
def validate_username(username):
reason = ""
if len(username) <= 4 or len(username) > 15:
reason = jsonify(Username=username, Reason=Username_Policies[0])
return reason
elif re.search('^[0-9].*', username):
reason = jsonify(Username=username, Reason=Username_Policies[3])
return reason
elif re.search('[!@#$%^&*\(\)\[\]\{\}\"\,]', username):
reason = jsonify(Username=username, Reason=[Username_Policies[1],Username_Policies[2]])
return reason
return None
@staticmethod
def generate_session_id(self):
self.session_id = "DUMMY_SESSION_ID"
def get_session_id(self):
return self.session_id
| true |
230703c680e44ab8d8fe6c9ace647b87f075cdf3 | Python | AlekosIosifidis/detaviz | /Source/visualisation/visualisation_utils.py | UTF-8 | 6,664 | 2.625 | 3 | [] | no_license | import os
import json
import numpy as np
import pandas as pd
from pathlib import Path
def check_flag_value(file, flag):
"""
Check the window flag size
:param file:
:param flag:
:return:
"""
with open(file) as f:
datafile = f.readlines()
for line in datafile:
if flag in line:
line_contents = line.split(sep=' ')[1].strip()
if line_contents == 'true':
line_contents = 1
elif line_contents == 'false':
line_contents = 0
else:
line_contents = int(line_contents)
break
else:
line_contents = None
return line_contents
def get_file_list(dirName):
"""
Create a list of file and sub directories
:param dirName:
:return:
"""
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + get_file_list(fullPath)
else:
allFiles.append(fullPath)
return allFiles
def model_search(model_window=500, model_dimensionality=60, cycles=50000, model_checkbox=['binarize']):
"""
Search for the best performing model for the given window size in the model Zoo
:param model_window: int,
model window
:param model_dimensionality: int,
model dimensionality
:param cycles: int,
the length of the simulation to read
:param model_checkbox: list of strings,
additional model parameters
:return: df,
simulation data
"""
model_path = os.path.join(Path(__file__).parents[2], 'Zoo\\Results\\runs\\')
results_path = os.path.join(Path(__file__).parents[2], 'Results\\')
# Get all files in the runs
file_list = get_file_list(model_path)
# Get the run flags
flags_list = [f for f in file_list if 'flags' in f and 'user_' not in f]
selected_flags = []
# Read each flags file and select the ones with appropriate window size
for f in flags_list:
if 'binarize' in model_checkbox:
binarize = 1
else:
binarize = 0
if 'screwdriver_only' in model_checkbox:
screwdriver_only = 1
else:
screwdriver_only = 0
window_size_flag = check_flag_value(f, 'window')
dimensionality_flag = check_flag_value(f, 'dimensionality')
binarize_flag = check_flag_value(f, 'binarize')
screwdriver_only_flag = check_flag_value(f, 'screwdriver_only')
if window_size_flag == model_window and dimensionality_flag == model_dimensionality and binarize_flag == binarize and (screwdriver_only_flag == screwdriver_only or screwdriver_only_flag is None):
selected_flags.append(f)
if len(selected_flags) > 0:
# Get the selected directories
selected_dirs = [os.path.abspath(f) for f in selected_flags]
for i, path in enumerate(selected_dirs):
split_dir = path.split(os.sep)
s = os.sep
selected_dirs[i] = s.join(split_dir[:-3])
# Read all test_metrics in those directories
test_metrics = []
for path in selected_dirs:
with open(path + '\\test_metrics.json') as json_file:
metrics = json.load(json_file)
f1 = metrics['f1_avg']
acc = metrics['accuracy']
name = path.split(os.sep)
name = name[-1:]
metric_dict = {'f1': f1,
'accuracy': acc,
'name': name}
test_metrics.append(metric_dict)
# Select the file with highest average F1 score and get its directory
max_f1 = max(test_metrics, key=lambda x: x['f1'])
load_list = get_file_list(results_path)
load_list = [f for f in load_list if '.csv' in f]
load_list = [d for d in load_list if max_f1['name'][0][:4] in d]
# Select the simulation run with selected number of cycles
load_dir = [f for f in load_list if ('_cycles-' + str(cycles)) in f]
if len(load_dir) > 0:
# Load the results file
data = pd.read_csv(load_dir[0])
return data, max_f1['name'], np.round(max_f1['accuracy'], decimals=3)
else:
return "", 'Simulation not found', 0
else:
return "", 'Model not found', 0
def prepare_data(data, rolling_window=1000, window_type='hamming', threshold=0.5):
"""
Prepare the simulation data for visualisation
:param df: DataFrame,
the simulation data
:param rolling_window: int,
size of the rolling window
:param window_type: string,
type of the window -> https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows
:param threshold: float,
the decision value for classyfing point as anomalous or normal
:return: df,
augmented simulation data
"""
# Add description of accuracy
data.loc[data['Predicted_labels'] == data['True_labels'], 'Prediction_result'] = 'Correct'
data.loc[(data['Predicted_labels'] == 1) & (data['True_labels'] == 0), 'Prediction_result'] = 'False positive'
data.loc[(data['Predicted_labels'] == 0) & (data['True_labels'] == 1), 'Prediction_result'] = 'False negative'
# Add system response
if window_type == 'gaussian':
data['Rolling_mean'] = data['Predicted_labels'].rolling(rolling_window, win_type=window_type).mean(std=3)
else:
data['Rolling_mean'] = data['Predicted_labels'].rolling(rolling_window, win_type=window_type).mean()
data['Response'] = np.where(data['Rolling_mean'] < threshold, 0, 1)
# Add description of system response
data.loc[data['Response'] == data['True_labels'], 'Response_result'] = 'Correct'
data.loc[(data['Response'] == 1) & (data['True_labels'] == 0), 'Response_result'] = 'False positive'
data.loc[(data['Response'] == 0) & (data['True_labels'] == 1), 'Response_result'] = 'False negative'
# Add system response accuracy
data['Response_accuracy'] = np.where(data['Response'] == data['True_labels'], 1, 0)
# Add Cumulative Moving Average of accuracy
data['Predicted_CMA'] = data['Accuracy'].expanding(min_periods=1).mean()
data['Response_CMA'] = data['Response_accuracy'].expanding(min_periods=1).mean()
data = data.dropna()
augmented_data = pd.melt(data, id_vars=['Cycle'])
return augmented_data
| true |
4ec19945ef8d1566d04a8f98d473646bdf526ab5 | Python | secreter/QA | /online/serverRelT.py | UTF-8 | 466 | 2.53125 | 3 | [] | no_license |
from flask import Flask
from flask import request
import json
f=open(r"./txt/dist/my/relT.txt",'r')
dic=json.load(f)
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return '<h1>Home</h1>'
@app.route('/relT', methods=['GET'])
def relT():
print(request.args)
rel=request.args.get('rel')
if rel in dic:
return json.dumps({rel:dic[rel]})
return json.dumps({rel:[]})
if __name__ == '__main__':
app.run(port=5002) | true |
9134ab2808f8053a6dbe75e92175ecf324f73f75 | Python | Otetz/any-comment | /app/blueprints/comments/__init__.py | UTF-8 | 8,863 | 2.578125 | 3 | [] | no_license | """Комментарии."""
import datetime
from typing import Dict, Any, List, Optional
import dateutil.parser
import flask
from dateutil.tz import tzlocal
from flask import Blueprint, stream_with_context, Response, redirect, url_for
from app.blueprints.doc import auto
from app.comments import get_comments, get_comment, remove_comment, new_comment, update_comment, first_level_comments, \
descendants
from app.common import db_conn, resp, affected_num_to_code, pagination, DatabaseException, to_json_stream, \
AttachmentManager, date_filter
from app.types import Comment
comments = Blueprint('comments', __name__)
def comment_validate(data: Optional[Dict[str, Any]] = None) -> (Dict[str, Any], List[str]):
"""
Валидация данных о Комментарии.
:param dict data: (Опционально) Готовый словарь данных для проверки на валидность
:return: Данные комментария, Найденные ошибки
:rtype: tuple
"""
if not data:
data = flask.request.get_json()
errors = []
if data is None:
errors.append("Ожидался JSON. Возможно Вы забыли установить заголовок 'Content-Type' в 'application/json'?")
return None, errors
for field_name in Comment.data_fields:
val = data.get(field_name)
if val is None:
errors.append("Отсутствует поле '%s'" % field_name)
continue
if field_name in ['text'] and not isinstance(val, str):
errors.append("Поле '%s' не является строкой" % field_name)
if field_name in ['userid', 'parentid'] and not isinstance(val, int):
errors.append("Поле '%s' не является числом" % field_name)
return data, errors
@comments.route('/comments/', methods=['GET'])
@auto.doc(groups=['comments'])
def comments_list():
"""
Показать все комментарии.
Поддерживается пагинация :func:`app.common.pagination`.
:return: Список всех комментариев
"""
offset, per_page = pagination()
total, records = get_comments(db_conn(), offset=offset, limit=per_page)
return resp(200, {'response': records, 'total': total, 'pages': int(total / per_page) + 1})
@comments.route('/comments/', methods=['POST'])
@auto.doc(groups=['comments'])
def post_comment():
"""
Создать новый Комментарий.
:return: Запись о новом Комментарии, либо Возникшие ошибки
"""
data = flask.request.get_json()
if 'deleted' not in data:
data['deleted'] = False
if 'datetime' not in data:
data['datetime'] = datetime.datetime.now(tz=tzlocal())
else:
data['datetime'] = dateutil.parser.parse(data['datetime'])
(data, errors) = comment_validate(data)
if errors:
return resp(400, {"errors": errors})
try:
record = new_comment(db_conn(), data)
except DatabaseException as e:
return resp(400, {"errors": str(e)})
return redirect(url_for('comments.comment', comment_id=record[0]), code=302)
@comments.route('/comments/<int:comment_id>', methods=['GET'])
@auto.doc(groups=['comments'])
def comment(comment_id: int):
"""
Получить информацию о Комментарии.
:param int comment_id: Идентификатор комментария
:return: Запись с информацией о запрошенном Комментарии либо Сообщение об ощибке
"""
record = get_comment(db_conn(), comment_id)
if record is None:
errors = [{'error': 'Комментарий не найден', 'comment_id': comment_id}]
return resp(404, {'errors': errors})
return resp(200, {'response': record})
@comments.route('/comments/<int:comment_id>', methods=['PUT'])
@auto.doc(groups=['comments'])
def put_comment(comment_id: int):
"""
Изменить информацию в Комментарии.
:param int comment_id: Идентификатор комментария
:return: Пустой словарь {} при успехе, иначе Возникшие ошибки
"""
record = get_comment(db_conn(), comment_id)
if record is None:
return resp(404, {"errors": [{"error": "Комментарий не найден", "comment_id": comment_id}]})
record['userid'] = record['author']['userid']
data = flask.request.get_json()
for x in Comment.data_fields:
if x not in data:
data[x] = record[x]
(data, errors) = comment_validate(data)
if errors:
return resp(400, {"errors": errors})
try:
num_updated = update_comment(db_conn(), comment_id, data)
except DatabaseException as e:
return resp(400, {"errors": str(e)})
return resp(affected_num_to_code(num_updated), {})
@comments.route('/comments/<int:comment_id>', methods=['DELETE'])
@auto.doc(groups=['comments'])
def delete_comment(comment_id: int):
"""
Удалить Комментарий.
Комментарию устанавливается флаг удалённого.
:param int comment_id: Идентификатор комментария
:return: Пустой словарь {} при успехе, иначе Возникшие ошибки. При попытке удаеления ветви возвращает статус 400.
"""
try:
num_deleted = remove_comment(db_conn(), comment_id)
except DatabaseException as e:
return resp(400, {"errors": str(e)})
return resp(affected_num_to_code(num_deleted, 400), {})
@comments.route('/comments/<int:comment_id>/first_level', methods=['GET'])
@auto.doc(groups=['comments'])
def get_first_level_comments(comment_id: int):
"""
Показать комментарии первого уровня вложенности к указанному комментарию в порядке возрастания даты создания
комментария.
Поддерживается пагинация :func:`app.common.pagination`.
:param int comment_id: Идентификатор родительского комментария
:return: Список комментарии первого уровня вложенности
"""
record = get_comment(db_conn(), comment_id)
if record is None:
errors = [{'error': 'Родительский комментарий не найден', 'comment_id': comment_id}]
return resp(404, {'errors': errors})
offset, per_page = pagination()
total, records = first_level_comments(db_conn(), comment_id, offset=offset, limit=per_page)
return resp(200, {'response': records, 'total': total, 'pages': int(total / per_page) + 1})
@comments.route('/comments/<int:comment_id>/descendants', methods=['GET'], defaults={'fmt': None})
@comments.route('/comments/<int:comment_id>/descendants.<string:fmt>', methods=['GET'])
@auto.doc(groups=['comments'])
def get_descendants(comment_id: int, fmt: str):
"""
Получение всех дочерних комментариев.
Поддерживается фильтрация по дате создания комментария :func:`app.common.date_filter`.
:param comment_id: Идентификатор родительского комментария
:param fmt: Формат выдачи в виде "расширения" имени файла. При отсутствии — выдача JSON-стрима в теле ответа. \
Возможные значения: *json*, *csv*, *xml*
:return: Список всех дочерних комментариев в JSON-стриме либо в стриме скачивания файла заданного формата
"""
after, before, errors = date_filter()
if errors:
return resp(404, {'errors': errors})
if not fmt:
return Response(stream_with_context(to_json_stream(descendants(db_conn(), comment_id, after, before))),
mimetype='application/json; charset="utf-8"')
try:
formatter = AttachmentManager(fmt.lower())
except NotImplemented:
return resp(400, {'error': 'Указан не поддерживаемый формат файла', 'fmt': fmt})
return Response(stream_with_context(formatter.iterate(descendants(db_conn(), comment_id, after, before))),
mimetype=formatter.content_type,
headers={"Content-Disposition": "attachment; filename=comment%d_descendants.%s" % (
comment_id, fmt.lower())})
| true |
ab8df62c765a8a2bf67e88b2d4b1ae14960262e4 | Python | xadupre/xadupre.github.io | /draft/mlprodict/_downloads/b352f437bf7c07763e099b765029f9c0/numpy_api_onnx_ccl.py | UTF-8 | 9,135 | 3.421875 | 3 | [
"Python-2.0",
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# # Introduction to a numpy API for ONNX: CustomClassifier
#
# This notebook shows how to write python classifier using similar functions as numpy offers and get a class which can be inserted into a pipeline and still be converted into ONNX.
# In[1]:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# In[2]:
get_ipython().run_line_magic('load_ext', 'mlprodict')
# ## A custom binary classifier
#
# Let's imagine a classifier not that simple about simple but not that complex about predictions. It does the following:
# * compute the barycenters of both classes,
# * determine an hyperplan containing the two barycenters of the clusters,
# * train a logistic regression on both sides.
#
# Some data first...
# In[3]:
from sklearn.datasets import make_classification
from pandas import DataFrame
X, y = make_classification(200, n_classes=2, n_features=2, n_informative=2,
n_redundant=0, n_clusters_per_class=2, hypercube=False)
df = DataFrame(X)
df.columns = ['X1', 'X2']
df['y'] = y
ax = df[df.y == 0].plot.scatter(x="X1", y="X2", color="blue", label="y=0")
df[df.y == 1].plot.scatter(x="X1", y="X2", color="red", label="y=1", ax=ax);
# Split into train and test as usual.
# In[4]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# The model...
# In[5]:
import numpy
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.linear_model import LogisticRegression
class TwoLogisticRegression(ClassifierMixin, BaseEstimator):
def __init__(self):
ClassifierMixin.__init__(self)
BaseEstimator.__init__(self)
def fit(self, X, y, sample_weights=None):
if sample_weights is not None:
raise NotImplementedError("weighted sample not implemented in this example.")
# Barycenters
self.weights_ = numpy.array([(y==0).sum(), (y==1).sum()])
p1 = X[y==0].sum(axis=0) / self.weights_[0]
p2 = X[y==1].sum(axis=0) / self.weights_[1]
self.centers_ = numpy.vstack([p1, p2])
# A vector orthogonal
v = p2 - p1
v /= numpy.linalg.norm(v)
x = numpy.random.randn(X.shape[1])
x -= x.dot(v) * v
x /= numpy.linalg.norm(x)
self.hyperplan_ = x.reshape((-1, 1))
# sign
sign = ((X - p1) @ self.hyperplan_ >= 0).astype(numpy.int64).ravel()
# Trains models
self.lr0_ = LogisticRegression().fit(X[sign == 0], y[sign == 0])
self.lr1_ = LogisticRegression().fit(X[sign == 1], y[sign == 1])
return self
def predict_proba(self, X):
sign = self.predict_side(X).reshape((-1, 1))
prob0 = self.lr0_.predict_proba(X)
prob1 = self.lr1_.predict_proba(X)
prob = prob1 * sign - prob0 * (sign - 1)
return prob
def predict(self, X):
prob = self.predict_proba(X)
return prob.argmax(axis=1)
def predict_side(self, X):
return ((X - self.centers_[0]) @ self.hyperplan_ >= 0).astype(numpy.int64).ravel()
model = TwoLogisticRegression()
model.fit(X_train, y_train)
model.predict(X_test)
# Let's compare the model a single logistic regression. It shouuld be better. The same logistic regression applied on both sides is equivalent a single logistic regression and both half logistic regression is better on its side.
# In[6]:
from sklearn.metrics import accuracy_score
lr = LogisticRegression().fit(X_train, y_train)
accuracy_score(y_test, lr.predict(X_test)), accuracy_score(y_test, model.predict(X_test))
# However, this is true in average but not necessarily true for one particular datasets. But that's not the point of this notebook.
# In[7]:
model.centers_
# In[8]:
model.hyperplan_
# In[9]:
model.lr0_.coef_, model.lr1_.coef_
# Let's draw the model predictions. Colored zones indicate the predicted class, green line indicates the hyperplan splitting the features into two. A different logistic regression is applied on each side.
# In[10]:
import matplotlib.pyplot as plt
def draw_line(ax, v, p0, rect, N=50, label=None, color="black"):
x1, x2, y1, y2 = rect
v = v / numpy.linalg.norm(v) * (x2 - x1)
points = [p0 + v * ((i * 2. / N - 2) + (x1 - p0[0]) / v[0]) for i in range(0, N * 4 + 1)]
arr = numpy.vstack(points)
arr = arr[arr[:, 0] >= x1]
arr = arr[arr[:, 0] <= x2]
arr = arr[arr[:, 1] >= y1]
arr = arr[arr[:, 1] <= y2]
ax.plot(arr[:, 0], arr[:, 1], '.', label=label, color=color)
def zones(ax, model, X):
r = (X[:, 0].min(), X[:, 0].max(), X[:, 1].min(), X[:, 1].max())
h = .02 # step size in the mesh
xx, yy = numpy.meshgrid(numpy.arange(r[0], r[1], h), numpy.arange(r[2], r[3], h))
Z = model.predict(numpy.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
return ax.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
fig, ax = plt.subplots(1, 1)
zones(ax, model, X)
df[df.y == 0].plot.scatter(x="X1", y="X2", color="blue", label="y=0", ax=ax)
df[df.y == 1].plot.scatter(x="X1", y="X2", color="red", label="y=1", ax=ax);
rect = (df.X1.min(), df.X1.max(), df.X2.min(), df.X2.max())
draw_line(ax, model.centers_[1] - model.centers_[0], model.centers_[0],
rect, N=100, label="hyperplan", color="green")
ax.legend();
# ## Conversion to ONNX = second implementation
#
# The conversion fails as expected because there is no registered converter for this new model.
# In[11]:
from skl2onnx import to_onnx
one_row = X_train[:1].astype(numpy.float32)
try:
to_onnx(model, one_row)
except Exception as e:
print(e.__class__.__name__)
print("---")
print(e)
# Writing a converter means implementing the prediction methods with ONNX operators. That's very similar to learning a new mathematical language even if this language is very close to *numpy*. Instead of having a second implementation of the predictions, why not having a single one based on ONNX? That way the conversion to ONNX would be obvious. Well do you know ONNX operators? Not really... Why not using then numpy functions implemented with ONNX operators? Ok! But how?
# ## A single implementation with ONNX operators
#
# A classifier needs two pethods, `predict` and `predict_proba` and one graph is going to produce both of them. The user need to implement the function producing this graph, a decorator adds the two methods based on this graph.
# In[12]:
from mlprodict.npy import onnxsklearn_class
from mlprodict.npy.onnx_variable import MultiOnnxVar
import mlprodict.npy.numpy_onnx_impl as nxnp
import mlprodict.npy.numpy_onnx_impl_skl as nxnpskl
@onnxsklearn_class('onnx_graph')
class TwoLogisticRegressionOnnx(ClassifierMixin, BaseEstimator):
def __init__(self):
ClassifierMixin.__init__(self)
BaseEstimator.__init__(self)
def fit(self, X, y, sample_weights=None):
if sample_weights is not None:
raise NotImplementedError("weighted sample not implemented in this example.")
# Barycenters
self.weights_ = numpy.array([(y==0).sum(), (y==1).sum()])
p1 = X[y==0].sum(axis=0) / self.weights_[0]
p2 = X[y==1].sum(axis=0) / self.weights_[1]
self.centers_ = numpy.vstack([p1, p2])
# A vector orthogonal
v = p2 - p1
v /= numpy.linalg.norm(v)
x = numpy.random.randn(X.shape[1])
x -= x.dot(v) * v
x /= numpy.linalg.norm(x)
self.hyperplan_ = x.reshape((-1, 1))
# sign
sign = ((X - p1) @ self.hyperplan_ >= 0).astype(numpy.int64).ravel()
# Trains models
self.lr0_ = LogisticRegression().fit(X[sign == 0], y[sign == 0])
self.lr1_ = LogisticRegression().fit(X[sign == 1], y[sign == 1])
return self
def onnx_graph(self, X):
h = self.hyperplan_.astype(X.dtype)
c = self.centers_.astype(X.dtype)
sign = ((X - c[0]) @ h) >= numpy.array([0], dtype=X.dtype)
cast = sign.astype(X.dtype).reshape((-1, 1))
prob0 = nxnpskl.logistic_regression( # pylint: disable=E1136
X, model=self.lr0_)[1]
prob1 = nxnpskl.logistic_regression( # pylint: disable=E1136
X, model=self.lr1_)[1]
prob = prob1 * cast - prob0 * (cast - numpy.array([1], dtype=X.dtype))
label = nxnp.argmax(prob, axis=1)
return MultiOnnxVar(label, prob)
# In[13]:
model = TwoLogisticRegressionOnnx()
model.fit(X_train, y_train)
# In[14]:
model.predict(X_test.astype(numpy.float32))
# In[15]:
model.predict_proba(X_test.astype(numpy.float32))[:5]
# It works with double too.
# In[16]:
model.predict_proba(X_test.astype(numpy.float64))[:5]
# And now the conversion to ONNX.
# In[17]:
onx = to_onnx(model, X_test[:1].astype(numpy.float32),
options={id(model): {'zipmap': False}})
# Let's check the output.
# In[18]:
from mlprodict.onnxrt import OnnxInference
oinf = OnnxInference(onx)
oinf.run({'X': X_test[:5].astype(numpy.float32)})
# In[19]: | true |
1458b91a857795aa2cffd7c356ecf8634746b319 | Python | bazhenov4job/client_server | /to_send/client.py | UTF-8 | 3,601 | 2.921875 | 3 | [] | no_license | """
Реализовать простое клиент-серверное взаимодействие по протоколу JIM (JSON instant messaging):
клиент отправляет запрос серверу;
сервер отвечает соответствующим кодом результата. Клиент и сервер должны быть реализованы в виде отдельных скриптов,
содержащих соответствующие функции. Функции клиента: сформировать presence-сообщение; отправить сообщение серверу;
получить ответ сервера; разобрать сообщение сервера;
параметры командной строки скрипта client.py <addr> [<port>]: addr — ip-адрес сервера; port — tcp-порт на сервере,
по умолчанию 7777. Функции сервера: принимает сообщение клиента; формирует ответ клиенту; отправляет ответ клиенту;
имеет параметры командной строки: -p <port> — TCP-порт для работы (по умолчанию использует 7777)
; -a <addr> — IP-адрес для прослушивания (по умолчанию слушает все доступные адреса).
"""
from socket import *
import argparse
from common import utils
from common import variables
import sys
import os
sys.path.insert(0, os.getcwd())
import logging
import log.client_log_config
client_logger = logging.getLogger('client')
def main_client():
USER = 'guest'
PASSWORD = 'password'
BYTES_TO_READ = variables.BYTES_TO_READ
parser = argparse.ArgumentParser()
parser.add_argument('-a')
parser.add_argument('-p')
parser.add_argument('-m')
args = vars(parser.parse_args())
if args['a'] is not None:
HOST = args['a']
client_logger.info("Получен агрумент адреса хоста")
else:
HOST = variables.HOST
client_logger.info("Адреса хоста выбран по умолчанию")
if args['p'] is not None:
PORT = args['p']
client_logger.info("Получен агрумент порта хоста")
else:
PORT = variables.PORT
client_logger.info("Порт хоста выбран по умолчанию")
if args['m'] is None or args['m'] == 'r':
MODE = 'r'
else:
MODE = 'w'
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((HOST, PORT))
while True:
# message = utils.create_presence(USER, PASSWORD)
if MODE == 'w':
text = input("Введите сообщение для отправки:\n")
if text == 'quit':
break
message = utils.create_message('w_client', text)
utils.send_message(sock, message)
elif MODE == 'r':
response = utils.get_response(sock, BYTES_TO_READ)
handled_response = utils.handle_response(response)
try:
handled_response.items()
for key, value in handled_response.items():
client_logger.info(f"Получено сообщение{key}, {value}")
print(handled_response['message'])
except AttributeError:
client_logger.info("Невозможно разобрать сообщение")
if __name__ == '__main__':
main_client()
| true |
a824e7b7c280d80aca7885fe8c1074b80da62886 | Python | umiundlake/links-api | /app.py | UTF-8 | 3,726 | 2.78125 | 3 | [] | no_license | from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from marshmallow import Schema, fields
import os
# This method to get an absolute path of a file works with all the operative systems.
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
#DB_URI = "sqlite:///" + os.path.join(BASE_DIR, "database.db")
DB_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
username="",
password="",
hostname="",
databasename="")
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = DB_URI
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
class Framework(db.Model):
__tablename__ = "frameworks"
# The id will be unique, cannot be null, and auto-increase.
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
class Link(db.Model):
__tablename__ = "links"
id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.String(100))
class FrameworkSchema(Schema):
id = fields.Int()
name = fields.Str()
class LinkSchema(Schema):
id = fields.Int()
link = fields.Str()
@app.route("/")
def index():
return "Hello World!"
# GET METHOD
@app.route("/api/frameworks/", methods=["GET"])
def get_frameworks():
frameworks = Framework.query.all()
frameworks_schema = FrameworkSchema(many=True)
result, errors = frameworks_schema.dump(frameworks)
return jsonify(result)
@app.route("/api/frameworks/<string:name>")
def get_framework_by_name(name):
framework = Framework.query.filter_by(name=name).first()
framework_dict = dict(id=framework.id, name=framework.name)
return jsonify(framework_dict)
# POST METHOD
@app.route("/api/frameworks/", methods=["POST"])
def add_framework():
new_framework = Framework(name=request.json["name"])
db.session.add(new_framework)
db.session.commit()
framework_dict = dict(id=new_framework.id, name=new_framework.name)
return jsonify(framework_dict)
# PUT METHOD
@app.route("/api/frameworks/<int:id>", methods=["PUT"])
def edit_framework(id):
framework = Framework.query.get(id)
framework.name = request.json["name"]
db.session.commit()
framework_dict = dict(id=framework.id, name=framework.name)
return jsonify(framework_dict)
# DELETE METHOD
@app.route("/api/frameworks/<int:id>", methods=["DELETE"])
def delete_framework(id):
framework = Framework.query.get(id)
db.session.delete(framework)
db.session.commit()
return jsonify({"message": "ok"})
#LINKS
@app.route("/api/links/", methods=["GET"])
def get_links():
links = Link.query.all()
links_schema = LinkSchema(many=True)
result, errors = links_schema.dump(links)
return jsonify(result)
@app.route("/api/links/<string:url>")
def get_link_by_url(url):
link = Link.query.filter_by(url=url).first()
link_dict = dict(id=link.id, url=link.url)
return jsonify(link_dict)
# POST METHOD
@app.route("/api/links/", methods=["POST"])
def add_link():
new_link = Link(url=request.json["url"])
db.session.add(new_link)
db.session.commit()
link_dict = dict(id=new_link.id, url=new_link.url)
return jsonify(link_dict)
# PUT METHOD
@app.route("/api/links/<int:id>", methods=["PUT"])
def edit_link(id):
link = Link.query.get(id)
link.url = request.json["url"]
db.session.commit()
link_dict = dict(id=link.id, url=link.url)
return jsonify(link_dict)
# DELETE METHOD
@app.route("/api/links/<int:id>", methods=["DELETE"])
def delete_link(id):
link = Link.query.get(id)
db.session.delete(link)
db.session.commit()
return jsonify({"message": "ok"})
| true |
fe14849dca7379077c4835c53f6fdcd6f35e5ec9 | Python | YoungcsGitHub/PythonHouse | /pyqt5/chapter3/menu_toolbar/Toolbar.py | UTF-8 | 1,911 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# Name: Toolbar
# Description:
# Author: Dell
# Date: 2019/10/6
#-------------------------------------------------------------------------------
'''
创建和使用工具栏
工具栏默认按钮:只显示图标,将文本作为悬停提示
工具栏按钮的3种显示状态
1. 只显示图标
2. 只显示文本
3. 同时显示文本和图标
'''
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class ToolbarDemo(QMainWindow):
def __init__(self):
super(ToolbarDemo, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('工具栏实例')
self.resize(300, 200)
tb1 = self.addToolBar('File')
new = QAction(QIcon('Knob Add.ico'),"new",self)
tb1.addAction(new)
open = QAction(QIcon('Knob Play Green.ico'),"open",self)
tb1.addAction(open)
save = QAction(QIcon('Knob Blue.ico'), "save", self)
tb1.addAction(save)
tb2 = self.addToolBar('File1')
new = QAction(QIcon('Knob Add.ico'), "新建", self)
tb2.addAction(new)
open = QAction(QIcon('Knob Play Green.ico'), "打开", self)
tb2.addAction(open)
save = QAction(QIcon('Knob Blue.ico'), "保存", self)
tb2.addAction(save)
tb1.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# 按钮显示风格
tb2.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
tb1.actionTriggered.connect(self.toolbtnpressed)
tb2.actionTriggered.connect(self.toolbtnpressed)
def toolbtnpressed(self,a):
print("按下的按钮是:",a.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = ToolbarDemo()
mainWin.show()
sys.exit(app.exec_()) | true |
c5bd0e0534ad73caff3a607af087af2cbc6e7a08 | Python | vlaguillo/M03 | /Ejercicios_python/Ejercico_Hoja_calculo.py/hoja_calculo.py | UTF-8 | 614 | 3.359375 | 3 | [] | no_license | def my_range(inici, fi, increment):
while inici <= fi:
#Retorna l'element actual del rang (llista)
yield inici
inici = inici + increment
for fil in my_range(1,5,1):
for col in my_range(1,4,1):
if (fil==1 and col==2):
print "A",
elif (fil==1 and col==3):
print "B",
elif (fil==1 and col==4):
print "C",
elif (fil==2 and col==1):
print "1",
elif (fil==3 and col==1):
print "2",
elif (fil==4 and col==1):
print "3",
elif (fil==5 and col==1):
print "4",
elif (fil==3 and col==2):
print "*",
elif (fil==2 and col==3):
print "*",
else:
print "-",
print ""
| true |
785b54164cc535c4d001eaeb6ec0921cb682c734 | Python | MahdiZizou/Hamun-Lake-NLP-project | /NLPproj_task2.py | UTF-8 | 1,410 | 2.875 | 3 | [] | no_license | #region Description: task2
print('you should execute line by line because run does not work on seperate py files')
print('input is: tweets_data')
print('Your querry was:', query)
print('The length of tweet data set is:', len(tweets_data))
####################################################################################################
import tweepy
import pandas as pd
import googletrans
from googletrans import Translator
translator = Translator()
# here we clean and translate tweets:
translated_tweet = []
for tweet in tweets_data:
clean_tweet = "".join([char for char in tweet if char not in 'qwertyuiopasdfghjkl\:zxcvbnm/ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890@#$%^&*_=-_,:.;?!']
).strip()
clean_tweet_trans = translator.translate(clean_tweet, src='fa', dest='en').text
clean_tweet_trans_clean = "".join([char for char in clean_tweet_trans if char not in ',:;،ضصثقفغعهخحجچپگکمنتالبیسشظطزرذدئوې']
).strip()
# print(clean_tweet_trans_clean)
translated_tweet.append(clean_tweet_trans_clean)
# here we change into pandas dataframe to save as csv:
df = pd.DataFrame(translated_tweet)
df.head()
query_en=translator.translate(keyword, dest='en').text
name= query_en + '_tranCln_' + str(len(tweets_data)) + '.csv'
df.to_csv(name, sep='\t', header=False, encoding='utf-8-sig')
print('DF is saved :)')
#endregion | true |
08201c81fc49686a34012d0d0aa4fd5b16d2a967 | Python | niminjie/iptv | /sim.py | UTF-8 | 8,021 | 2.5625 | 3 | [] | no_license | import cPickle as pickle
import codecs
from math import sqrt
from dfs import find_connection
log = open('sim.log', 'w')
DEBUG = False
def across(interval, time):
if interval[0] <= time < interval[1] or (time == 23 and interval[1] == 23):
return True
else:
return False
def convert_to_hour(time):
hour = int(time.split(' ')[1].split(':')[0])
return hour
def file_to_dict(train):
user_dict = {}
# for line in codecs.open(train, 'r', 'utf-8'):
for line in open(train, 'r'):
id, content_id, class_name, start, end, timespan, user_id = line.split(',')
user_id = user_id.strip()
# print user_id
user_dict.setdefault(user_id, [])
# fo.write('%s,%s,%s,%s,%s,%s,%s\n' % (str(id), str(content_id), str(class_name), str(start_time), str(end_time), str(timespan), str(user_id)))
user_dict[user_id].append({'id':id, 'content_id':content_id, 'start':start, 'end':end, 'timespan':timespan, 'class':class_name})
return user_dict
def tag(user_dict, user_time):
tag_dict = {}
for user_id, play_list in user_dict.items():
# if user_id != '1':
# continue
if DEBUG:
print >> log, 'Now tagging user: ', user_id
tag_list = {}
tag = 1
for idx, play in enumerate(play_list):
id = play['id']
content_id = play['content_id']
start = play['start']
end = play['end']
timespan = play['timespan']
class_name = play['class']
for t in user_time[user_id]:
if t not in tag_list.keys():
tag_list.setdefault(t, 'tag' + str(tag))
tag += 1
if across(t, convert_to_hour(start)):
if DEBUG:
print >> log, '*' * 100
print >> log, 'Across: ', t
print >> log, '*' * 100
# play_list[idx] = {'start':start, 'end':end, 'class':class_name, 'timespan':timespan, 'tag':tag_list[t]}
play_list[idx] = {'id':id, 'content_id':content_id, 'start':start, 'end':end, 'timespan':timespan, 'class':class_name, 'tag':tag_list[t]}
if DEBUG:
print >> log, 'Play_list', play_list[idx]
print >> log, 'idx', idx
print >> log, '*' * 100
# print tag_list
tag_dict[user_id] = tag_list
return tag_dict
def rate(tag):
rate_list = {}
for key in tag:
rate_list.setdefault(key[0], 0)
rate_list[key[0]] += 1
for key,value in rate_list.items():
rate_list[key] /= 1.0 * len(tag)
return rate_list
def rate_span(tag, tag_span):
# print tag_span
rate_list = {}
for key in tag:
rate_list.setdefault(key[0], 0)
rate_list[key[0]] += float(key[1])
for key,value in rate_list.items():
rate_list[key] = rate_list[key] / 1.0 * tag_span / len(tag)
return rate_list
def similarity(tags, tag_list, key1, key2):
try:
len_tag1 = len(tags[key1])
len_tag2 = len(tags[key2])
except:
return 0
tag1_span = tag_list[key1]
tag2_span = tag_list[key2]
rate1 = rate_span(tags[key1], tag1_span[0])
rate2 = rate_span(tags[key2], tag2_span[0])
# rate1 = rate_span(tags[key1], tag1_span)
# rate2 = rate_span(tags[key2], tag2_span)
mod1 = 0.0
mod2 = 0.0
metrix = 0.0
for key,value in rate1.items():
if key in rate2:
metrix = metrix + value * rate2[key]
for key,value in rate1.items():
mod1 = mod1 + value * value
for key,value in rate2.items():
mod2 = mod2 + value * value
mod1 = sqrt(mod1)
mod2 = sqrt(mod2)
r = metrix / (mod1 * mod2)
return r
def extract_class(user_dict):
user_tag = {}
# For every user
for user_id, play_list in user_dict.items():
# if user_id != '1':
# continue
# For every play entry
user_tag.setdefault(user_id, {})
for idx, play in enumerate(play_list):
class_name = play['class']
tag = play['tag']
timespan = play['timespan']
user_tag[user_id].setdefault(tag, [])
user_tag[user_id][tag].append((class_name, timespan))
# print user_tag
return user_tag
# {(0,13):'tag1'} to {'tag1':(0,13)}
def reverse(tag_dict):
new_dict = {}
for user_id, tag_list in tag_dict.items():
for key, value in tag_list.items():
new_dict.setdefault(user_id, {})
new_dict[user_id][value] = ((key[1] - key[0]) * 3600, key)
return new_dict
def main():
fo_tag = open('test_tag_result.csv', 'w')
user_pickle = file('test_user_time_all.pkl', 'rb')
# {'1': [(0,7), (7,12), (12,18), (18,23)]}
user_time = pickle.load(user_pickle)
if DEBUG:
print >> log, 'Successfully read pickle!'
# user_dict = file_to_dict('train.csv')
user_dict = file_to_dict('cf/Test/test_rand1.csv')
# print user_dict['00264C2C9333'][0][2]
tag_dict = tag(user_dict, user_time)
# {'tag1':25200, 'tag2':14400, 'tag3':43200}
tag_dict = reverse(tag_dict)
# print tag_dict
# {'tag1':[('59', '2051'), ('59', '2033'), ...]}
user_tag = extract_class(user_dict)
one = 0
multi = 0
for user_id, tags in user_tag.items():
# if user_id != '1':
# continue
tag_list = tag_dict[user_id]
keys = sorted(tag_list.keys())
# print keys
matrix = [[0 for col in range(len(keys) + 1)] for row in range(len(keys) + 1)]
for i in range(len(keys)):
for j in range(len(keys)):
if i != j:
matrix[i + 1][j + 1] = similarity(tags, tag_list, keys[i], keys[j])
# print i + 1, j + 1
# print keys[i], keys[j]
# print '-' * 100
# print 'Userid: ', user_id
# print '*' * 100
# print tag_dict[user_id]
# print user_tag[user_id]
# for m in matrix:
# print m
# print '*' * 100
# print tag_dict[user_id]
result = find_connection(matrix)
result2tag = {}
# print '-' * 100
# print 'Userid:', user_id
# print result
# for idx in result:
# for i in idx:
# print tag_dict[user_id][keys[i - 1]][1],
# print ''
# print '-' * 100, '\n'
# print user_dict[user_id]
# print user_dict['00264C2C9333']
for play in user_dict[user_id]:
id = play['id']
content_id = play['content_id']
start = play['start']
end = play['end']
timespan = play['timespan']
class_name = play['class']
print result
for idx in result:
# if len(idx) > 1:
# print idx
for i in idx:
tag_s = tag_dict[user_id][keys[i - 1]][1]
# print tag_s, start
if tag_dict[user_id][keys[i - 1]][1][0] <= convert_to_hour(start) < tag_dict[user_id][keys[i - 1]][1][1]:
# print tag_dict[user_id]
t = [tag_dict[user_id][keys[j - 1]][1] for j in idx]
sum = 0
for inter in t:
span = inter[1] - inter[0]
sum += span
# print sum * 60
s = str(t).lstrip('[').rstrip(']')
fo_tag.write('%s|%s|%s|%s|%s|%s|%s|%s|%s\n' % (id, content_id, start, end, timespan, class_name, user_id, s, str(sum * 60)))
fo_tag.flush()
print '-' * 100, '\n\n'
if len(result) == 1:
one += 1
else:
multi += len(result)
print one, multi
fo_tag.close()
if __name__ == '__main__':
main()
| true |
2bff24e452881474533b290f156d3c76d7c4aa5f | Python | bagaki/Python | /ginko/my/view.py | UTF-8 | 2,816 | 3.3125 | 3 | [] | no_license | # coding:utf-8
'''
管理员界面
类名: View
属性:账号、密码
行为:管理员初始化界面 管理员登录 系统功能界面 管理员注销
系统功能:开户、查询、取款、存款、转账、改密、销户、退出
'''
from check import Check
import time
class View(object):
def __init__(self, admin, pwd):
self.admin = admin
self.pwd = pwd
def initface(self):
print("--------------------------------------")
print(" ")
print(" loading...... ")
print(" ")
print("--------------------------------------")
time.sleep(1)
return
# 登录界面
def login(self):
print("--------------------------------------")
print(" ")
print(" Admin login..... ")
print(" ")
print("--------------------------------------")
check = Check()
check.userName(self.admin, self.pwd)
print("--------------Login success-----------")
print(" Please wait a moment... ")
del check
time.sleep(1)
return
# 退出界面
def logout(self):
print("--------------------------------------")
print(" ")
print(" Admin logout.... ")
print(" ")
print("--------------------------------------")
# 确认是否退出
check = Check()
if not check.isSure("退出"):
return False
check.userName(self.admin, self.pwd)
print("-------------Logout success-----------")
print(" It is closing...Please wait a moment ")
del check
time.sleep(1)
return True
# 系统功能界面
'''
系统功能:开户、查询、取款、存储、转账、销户、挂失、解锁、改密、退出
'''
def sysInit(self):
print("---------Welcome to My Bank-----------")
print("* 开户(1) 登录(2) *")
print("* 找回密码(3) 挂失(4) *")
print("* 退出(q) *")
print("--------------------------------------")
def sysInterface(self):
print("---------Welcome to My Bank-----------")
print("* 查询(1) 取款(2) *")
print("* 存款(3) 转账(4) *")
print("* 改密(4) 解锁(6) *")
print("* 销户(7) 退出(q) *")
print("--------------------------------------") | true |
673aec45ac2032a0faf6b2653e01f1a88d03318a | Python | ottohahn/data | /gender_model/gender_io_nokey.py | UTF-8 | 850 | 2.953125 | 3 | [] | no_license | # encoding: utf-8
"""
gender_io_nokey.py
"""
import requests
import json
def get_genders(names):
"""Create a call to genderize for up to 10 names in a list."""
url = ""
cnt = 0
if not isinstance(names, list):
names = [names, ]
for name in names:
if url == "":
url = "name[0]=" + name
else:
cnt += 1
url = url + "&name[" + str(cnt) + "]=" + name
req = requests.get("http://api.genderize.io?" + url)
results = json.loads(req.text)
if len(names) == 1:
results = [results, ]
retrn = []
for result in results:
if result["gender"] is not None:
retrn.append((result["gender"], result["probability"],
result["count"]))
else:
retrn.append((u'None', u'0.0', 0.0))
return retrn
| true |
e8a01d7e5726d4ba84c70b6ba7a190d5867b9e59 | Python | soulgchoi/Algorithm | /Programmers/Level 1/test.py | UTF-8 | 280 | 3.40625 | 3 | [] | no_license | def solution(n):
answer = 0
if n >= 2:
answer += 1
numbers = list(range(3, n + 1, 2))
for number in numbers:
flag = True
for num in range(3, number, 2):
if not number % num:
flag = False
break
if flag:
answer += 1
return answer
n = 5
print(solution(n)) | true |
a5eaa755f7408f30999c36693c8abf599533dc5e | Python | daqingyi770923/SDCFun | /pathPlanClass.py | UTF-8 | 6,857 | 2.953125 | 3 | [
"MIT"
] | permissive |
import math
from enum import Enum
import matplotlib.pyplot as plt
import numpy as np
class RobotType(Enum):
circle = 0
rectangle = 1
class PPClass:
def __init__(self,
max_accel = 2.0,
min_accel = -2.0,
yawRange = 0.5,
max_speed = 2.0,
predictTime = 6.0,
dt = 0.1,
robotType = RobotType.rectangle,
robotLength = 1.2,
robotWidth = 0.5,
robotRadius = 1.0,
velocityRate = 0.5,
yawRate = math.pi / 180.0,
maxYawRate = 5 * math.pi / 180.0,
obCostWeight = 6.0,
distCostWeight = 1.0,
speedCostWeight = 2.0
):
self.max_accel = max_accel #最大加速度 [m/ss]
self.min_accel = min_accel #最小加速度(允许倒车) [m/ss]
self.yawRange = yawRange # 偏置范围
self.max_speed = max_speed # 最大速度
self.predictTime = predictTime # 预测时间
self.dt = dt # 时间微分段
self.robotType = robotType #机器形状
self.robotLength = robotLength # 机器长
self.robotWidth = robotWidth # 机器宽
self.robotRadius = robotRadius # 机器半径
self.velocityRate = velocityRate # 速度分辨率
self.yawRate = yawRate # 偏置(转角)分辨率
self.maxYawRate = maxYawRate # 最大转角范围
self.obCostWeight = obCostWeight # 障碍成本权重
self.distCostWeight = distCostWeight # 距离成本权重
self.speedCostWeight = speedCostWeight # 速度成本权重
# 更新运动状态模型
def motion(self, x, u):
x[0] += u[0] * math.cos(x[2]) * self.dt
x[1] += u[0] * math.sin(x[2]) * self.dt
x[2] += u[1] * self.dt
x[3] = u[0]
x[4] = u[1]
return x
# 绘制方向箭头
def plot_arrow(self, x, y, yaw, length=0.5, width=0.1): # pragma: no cover
plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw),
head_length=width, head_width=width)
plt.plot(x, y)
# 预测轨迹
def predict_trajectory(self, x, v, y):
x = np.array(x)
traj = np.array(x)
time = 0
while time <= self.predictTime:
x = self.motion(x, [v, y])
traj = np.vstack((traj, x))
time += self.dt
return traj
# 计算最终轨迹点到目标点的距离成本
def calc_to_goal_cost(self, trajectory, goal):
dx = goal[0] - trajectory[-1, 0]
dy = goal[1] - trajectory[-1, 1]
dist=np.hypot(dx, dy)
cost=dist
return cost
# 计算障碍物成本信息:碰撞
def calc_obstacle_cost(self, trajectory, ob):
ox = ob[:, 0]
oy = ob[:, 1]
dx = trajectory[:, 0] - ox[:, None]
dy = trajectory[:, 1] - oy[:, None]
r = np.hypot(dx, dy)
if self.robotType == RobotType.rectangle:
yaw = trajectory[:, 2]
rot = np.array([[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]])
rot = np.transpose(rot, [2, 0, 1])
local_ob = ob[:, None] - trajectory[:, 0:2]
local_ob = local_ob.reshape(-1, local_ob.shape[-1])
local_ob = np.array([local_ob @ x for x in rot])
local_ob = local_ob.reshape(-1, local_ob.shape[-1])
upper_check = local_ob[:, 0] <= self.robotLength / 2
right_check = local_ob[:, 1] <= self.robotWidth / 2
bottom_check = local_ob[:, 0] >= -self.robotLength / 2
left_check = local_ob[:, 1] >= -self.robotWidth / 2
if (np.logical_and(np.logical_and(upper_check, right_check),
np.logical_and(bottom_check, left_check))).any():
return float("inf")
elif self.robotType == RobotType.circle:
if (r <= self.robotRadius).any():
return float("inf")
min_r = np.min(r)
return 1.0 / min_r # OK
# 绘制机器
def plot_robot(self, x, y, yaw): # pragma: no cover
if self.robotType == RobotType.rectangle:
outline = np.array([[-self.robotLength / 2, self.robotLength / 2,
(self.robotLength / 2), -self.robotLength / 2,
-self.robotLength / 2],
[self.robotWidth / 2, self.robotWidth / 2,
- self.robotWidth / 2, -self.robotWidth / 2,
self.robotWidth / 2]])
Rot1 = np.array([[math.cos(yaw), math.sin(yaw)],
[-math.sin(yaw), math.cos(yaw)]])
outline = (outline.T.dot(Rot1)).T
outline[0, :] += x
outline[1, :] += y
plt.plot(np.array(outline[0, :]).flatten(),
np.array(outline[1, :]).flatten(), "-k")
elif self.robotType == RobotType.circle:
circle = plt.Circle((x, y), self.robotRadius, color="b")
plt.gcf().gca().add_artist(circle)
out_x, out_y = (np.array([x, y]) +
np.array([np.cos(yaw), np.sin(yaw)]) * self.robotRadius)
plt.plot([x, out_x], [y, out_y], "-k")
# 遍历所有轨迹得出成本最低路径
def searchBestTrajectory(self, x, ob, goal):
# 最初成本为正无穷大
min_cost = float("inf")
# 遍历所有轨迹得出成本最低路径
for v in np.arange(self.min_accel, self.max_accel, self.velocityRate):
for y in np.arange(-self.yawRange, self.yawRange, 4*self.yawRate):
trajectory = self.predict_trajectory(x, v, y)
# 计算碰撞风险成本
ob_cost = self.calc_obstacle_cost(trajectory, ob)
#如果没有碰撞
if (ob_cost != float("inf")):
# 计算距离成本
to_goal_cost = self.calc_to_goal_cost(trajectory, goal)
# 计算速度成本
speed_cost = (self.max_speed - abs(trajectory[-1, 3]))
# 打印轨迹
plt.plot(trajectory[:, 0], trajectory[:, 1], ":r", alpha=0.3, linewidth=0.9)
# 累加各项成本函数并配置权重
final_cost= self.obCostWeight * ob_cost + self.distCostWeight * to_goal_cost + self.speedCostWeight * speed_cost
else:
final_cost=float("inf")
# search minimum trajectory
if min_cost >= final_cost:
min_cost = final_cost
best_u = [v, y]
best_trajectory = trajectory
return best_u, best_trajectory | true |
2d7da8478e3b0ae800ca9c0cd0a73a201a3468d8 | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/leap/429c8b675cc147a9a0d0ce2c388eb8b5.py | UTF-8 | 179 | 2.90625 | 3 | [] | no_license | def is_leap_year(year):
byfour = not bool(year % 4)
by100 = not (year % 100)
by400 = not (year % 400)
if(byfour and not by100 or by400):
return True
return False
| true |
40144f7644169908a4cfc5f658bc9787da05d600 | Python | Pookie-Cookie/pongproject2017 | /BetterMovement.py | UTF-8 | 1,147 | 3.328125 | 3 | [] | no_license | from tkinter import *
x = 10
y = 10
width = 100
height = 100
x_vel = 0
y_vel = 0
def move():
global x_vel
global y_vel
if abs(x_vel) + abs(y_vel) > 0:
canvas1.move(rect, x_vel, y_vel)
window.after(16, move)
def on_keypress(event):
print(event.keysym)
global x_vel
global y_vel
if event.keysym == "Left":
x_vel = -5
if event.keysym == "Right":
x_vel = 5
if event.keysym == "Down":
y_vel = 5
if event.keysym == "Up":
y_vel = -5
def on_keyrelease(event):
global x_vel
global y_vel
print("release", event.keysym, "Xvel", x_vel, "Yvel", y_vel)
if event.keysym in ["Left", "Right"]:
x_vel = 0
else:
y_vel = 0
window = Tk()
window.geometry("600x600")
#canvas and drawing
canvas1 = Canvas(window, height=600, width=600)
canvas1.grid(row=0, column=0, sticky=W)
coord = [x, y, width, height]
rect = canvas1.create_rectangle(*coord, outline="#fb0", fill="#fb0")
#capturing keyboard inputs and assigning to function
window.bind_all('<KeyPress>', on_keypress)
window.bind_all('<KeyRelease>', on_keyrelease)
move()
window.mainloop()
| true |
d24232d27b92fb894aa3c56a329018c386acdd16 | Python | julianofhernandez/ctf | /column_text_format/column.py | UTF-8 | 3,264 | 2.9375 | 3 | [] | no_license | import os
import boto3
from .metadata_conversion_funcs import metadata_types
from .file_management import open_iterator
class Column:
'''
The column object is returned as an iterable for each column that needs to be accessed.
For multiple columns a list of Column objects should be returned.
Attributes:
file_name(str): The full path to the colum file
datatype(dict): The type of data in the column as specified in metadata.json
column_file(_io.TextIOWrapper): Refers to the opened file
'''
def __init__(self, file_name, datatype = None, bucket_name=None):
'''Sets up the column name that will be accessed'''
self.datatype = datatype
self.file_name = file_name
self.bucket_name = bucket_name
self.index_name = os.path.splitext(os.path.split(file_name)[1])[0]
# file_name_only, extension = os.path.splitext(file_name)
# if (extension == ''):
# self.file_name = file_name + ".txt"
# else:
# self.file_name = file_name
# if (not os.path.exists(self.file_name)):
# raise FileNotFoundError(f'{self.file_name} does not exist')
self.datatype = datatype
def __iter__(self):
'''Sets up the object for iteration'''
self.iterator = open_iterator(self.file_name, bucket_name=self.bucket_name)
return self
def __next__(self):
'''Returns the next item in the column converted to the proper data type'''
try:
if (self.bucket_name == None):
row = next(self.iterator)[:-1]
else:
row = next(self.iterator).decode('utf8')[:-1]
except StopIteration:
self.iterator.close()
raise StopIteration()
return self.parse_data(row)
def __len__(self):
'''Returns the length of the column without loading the data into memory'''
opened_file = open_iterator(self.file_name, bucket_name=self.bucket_name)
counter = 0
for value in opened_file:
counter+=1
self.length = counter
opened_file.close()
return self.length
def __del__(self):
'''Runs when self is destroyed, it closes the open file'''
pass
# self.iterator.close()
# def open_iterator(self, file_name):
# '''Returns an iterator either from the file object or from the s3 object
# Both have tne \n at the end, which must be handled elsewhere in this class'''
# if (not self.bucket_name):
# self.iterator = open(file_name)
# else:
# session = boto3.Session().resource('s3')
# s3_obj = session.Object(self.bucket_name, self.key)
# body = s3_obj.get()['Body']
# self.iterator = body.iter_lines(chunk_size=1024, keepends=True)
# return self.iterator
def parse_data(self, value):
if (self.datatype == None):
return value
else:
try:
conversion_func = metadata_types[self.datatype]
except KeyError as err:
raise NotImplementedError(self.datatype+" is not currently a valid datatype") from None
return conversion_func(value)
| true |
138daf81911811306ea85035857ce9e7b5d932b0 | Python | CatPhillips103/Recipe-Search | /Recipe.py | UTF-8 | 1,532 | 3.390625 | 3 | [] | no_license | import requests
import hiddenkeys
id = hiddenkeys.app_id
key = hiddenkeys.app_key
def recipe_database(ingredient, health_labels, diet_labels):
url = f'https://api.edamam.com/search?q={ingredient}&app_id={id}&app_key={key}&Health={health_labels}&Diet={diet_labels}'
response = requests.get(url)
found_recipes = response.json()
return found_recipes["hits"]
def recipe_search():
ingredient_criteria = input('What ingredient would you like to include in the recipe? ')
health_criteria = input('Do you have any specific dietary and/or allergy-free requests? ')
diet_criteria = input('Any nutrition requests? ')
answers = recipe_database(ingredient_criteria, health_criteria, diet_criteria)
with open('recipe-inventory.txt', 'a') as text_file:
for answer in answers:
recipe = answer["recipe"]
text_file.write(f'{recipe["label"].upper()}\n')
text_file.write(f'See Recipe Prep Here: {recipe["url"]}\n')
kcal = recipe["calories"]
formatted_calories = f'{kcal:1.0f}'
text_file.write(f'Calories: {formatted_calories}kcal\n')
weight = recipe["totalWeight"]
formatted_weight = f'{weight:1.2f}'
text_file.write(f'Total weight of this meal: {formatted_weight}g\n\n')
for food_supplies in answers:
food = food_supplies["recipe"]["ingredientLines"]
text_file.write(f'{food[0]}\n\n')
print(f'Feeling Peckish? Check Your Inventory!')
recipe_search()
| true |
9541e980f7d4eeeb7d9d3dbb3d49b4b25fdd3e0f | Python | itrowa/arsenal | /algo-lib/5_string/readdg.py | UTF-8 | 440 | 3.515625 | 4 | [] | no_license | # 用于处理算法一书提供的图
import sys
# 读入V和E
v_cnt = sys.stdin.readline()
e_cnt = sys.stdin.readline()
# 读入剩下的边
raw_edges = [line.split() for line in sys.stdin]
# 把元素从string类型转换为int
for pair in raw_edges:
for i in range(len(pair)):
pair[i] = int(pair[i])
# print
print(v_cnt)
print(e_cnt)
for edge in raw_edges:
s = str(edge[0]) + " → " + str(edge[1])
print(s)
| true |
b7371664e8ace64d63daaa12dbb7710bb9460c4a | Python | Ph0en1xGSeek/ACM | /LeetCode/118.py | UTF-8 | 441 | 2.953125 | 3 | [] | no_license | class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
arr = []
for i in range(numRows):
arr.append([0]*(i+1))
for j in range(i+1):
if j == 0 or j == i:
arr[i][j] = 1
else:
arr[i][j] = arr[i-1][j-1] + arr[i-1][j]
return arr
| true |
4b9d80840d593ef8f6ceeacaa87ce086b66fd51e | Python | devourer3/algorithm_py | /algo_str/algo_3.py | UTF-8 | 1,514 | 3.890625 | 4 | [] | no_license | # 로그파일 재정렬
# 로그를 재정렬하라. 기준은 다음과 같다.
# 1. 로그의 가장 앞 부분은 식별자다
# 2. 문자로 구성된 로그가 숫자 로그보다 앞에 온다.
# 3. 식별자는 순서에 영향을 끼치지 않지만, 문자가 동일할 경우 식별자 순으로 한다.
# 4. 숫자 로그는 입력 순서대로 한다.
# https://leetcode.com/problems/reorder-data-in-log-files
from typing import List
logs = ["dig1 8 1 5 1", "let1 art can", "dig2 3 6", "let2 own kit dig", "let3 art zero"]
for log in logs:
print(log.split())
def func(x: List[str]):
return x.split()[1:], x.split()[0]
def reOrderLogFiles(elements: List[str]) -> List[str]:
letters, digits = [], []
for ele in elements:
print("LOG: ", ele)
if ele.split()[1].isdigit(): # 각 배열 요소를 띄어쓰기로 split했을 때, 2 번 째 있는 것이 숫자일 때
digits.append(ele)
else:
letters.append(ele)
print("letters[0].split()[1:]", letters[0].split()[1:])
print("letters[1].split()[1:]", letters[1].split()[1:])
print("letters[2].split()[1:]", letters[2].split()[1:])
# 식별자를 제외한 문자열 [1:](인덱스 1부터 마지막까지) 을 키로 하며, 동일한 경우 후순위로 식별자 [0]을 지정해 정렬되도록 함.
letters.sort(key=lambda x: (x.split()[1:], x.split()[0]))
# -> 람다 대신 letters.sort(key=func)
return letters + digits
print(reOrderLogFiles(logs))
| true |
87d06cdcb59e41f9aa6d9cf0ed0e74d8ae175fcd | Python | ofl/design-patterns-for-humans-python | /Creational/factory_method.py | UTF-8 | 1,048 | 3.421875 | 3 | [
"CC-BY-4.0"
] | permissive | # Factory Method Pattern
from abc import ABCMeta, abstractmethod
class Interviewer(metaclass=ABCMeta):
@abstractmethod
def ask_questions(self) -> None:
pass
class Developer(Interviewer):
def ask_questions(self) -> None:
print('Asking about design patterns!')
class CommunityExecutive(Interviewer):
def ask_questions(self) -> None:
print('Asking about community building')
class HiringManager(metaclass=ABCMeta):
def take_interview(self) -> None:
interviewer = self._make_interviewer()
interviewer.ask_questions()
@abstractmethod
def _make_interviewer(self) -> Interviewer:
pass
class DevelopmentManager(HiringManager):
def _make_interviewer(self) -> Interviewer:
return Developer()
class MarketingManager(HiringManager):
def _make_interviewer(self) -> Interviewer:
return CommunityExecutive()
dev_manager = DevelopmentManager()
dev_manager.take_interview()
marketing_manager = MarketingManager()
marketing_manager.take_interview()
| true |
40bc818a526e5d30d7a0b0303be714ef75644e36 | Python | tboztuna/Hackerrank | /Python/Basic Data Types/Find the Runner-Up Score.py | UTF-8 | 213 | 2.859375 | 3 | [] | no_license | if __name__ == '__main__':
n = int(raw_input())
arr = map(int, raw_input().split())
max = max(arr)
arr.sort()
for i in arr:
if i < max:
second_max = i
print second_max
| true |
ebbb31c49c6015c50bfc10b6207a7e82c48c3dd5 | Python | printdoc2020/disinfo | /app.py | UTF-8 | 3,418 | 2.90625 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import json
def get_num_words(text, key_words, return_keys):
count = 0
key_res = []
if (not text) or (not key_words):
return count
text_list = text.split(" ")
for key in key_words:
if key in text_list:
key_res.append(key)
count+=1
continue
if return_keys:
return ", ".join(k for k in key_res)
else:
return count
@st.cache
def read_data(n_tops):
df = pd.read_csv("data/df_res_2.csv")
df = df[ [f"top{i+1}" for i in range(n_tops)] + [f"score{i+1}" for i in range(n_tops)] + ["tweetid"] ]
df["tweet_account"] = df.tweetid.map(lambda x: x.split("/status/")[0].split("/")[-1])
df_tweet = pd.read_csv("data/tweet_parse_all_text_cols_and_processed_2cols.csv")
with open('data/keywords.json') as json_file:
topics_dict = json.load(json_file)
return df, df_tweet, topics_dict
st.set_page_config(
page_title="Topics Dictionary",
page_icon="random",
layout="wide",
initial_sidebar_state="expanded",
)
st.title('Topics Using Dictionary')
st.write("Last Updated: May 7, 2021")
ALL="--- ALL ---"
NO_SORT= "--- not selected ---"
n_tweets = 1000
# link is the column with hyperlinks
# df['tweetid'] = df['tweetid'].apply(make_clickable,1)
# st.write(df.to_html(escape=False, index=False, show_dimensions=True), unsafe_allow_html=True)
n_tops = st.sidebar.selectbox('Get top...',(1,2,3,4,5), 2)
df, df_tweet, topics_dict = read_data(n_tops)
st.sidebar.title('Show...')
topic = st.sidebar.selectbox('Select by Topic',(*df["top1"].unique(), ALL))
account = st.sidebar.selectbox('Select by Account',(*df["tweet_account"].unique(), ALL))
if topic != ALL:
df = df[df["top1"] == topic]
if account != ALL:
df = df[df["tweet_account"] == account]
cols_to_sort_1= [NO_SORT] + [col for col in df.columns if col != "tweetid"]
first_sort = st.sidebar.selectbox("First, sort by", cols_to_sort_1)
cols_to_sort_2 = [NO_SORT] + [col for col in df.columns if col != "tweetid"]
if first_sort != NO_SORT:
cols_to_sort_2.remove(first_sort)
second_sort = st.sidebar.selectbox("Then, sort by", cols_to_sort_2)
ascending = st.sidebar.checkbox("ascending order")
if first_sort != NO_SORT and second_sort != NO_SORT:
st.write(df.sort_values([first_sort, second_sort], ascending=ascending))
elif first_sort != NO_SORT:
st.write(df.sort_values([first_sort], ascending=ascending))
else:
st.write(df)
st.text(f"Show {df.shape[0]} tweets")
st.markdown("Double click on a _**tweetid**_ in the table above, then copy and paste here to see more detail of the tweet.")
tweetid = st.text_input('Ex: https://twitter.com/thetech/status/1299806383303516160', "")
target_tweet = df_tweet[df_tweet["tweetid"]==tweetid]
processed_text = target_tweet["all_text_processed"].values[0] if target_tweet["all_text_processed"].values else ""
if tweetid and df[df["tweetid"]==tweetid].shape[0]>0:
st.write('tweetid:', tweetid)
if len(target_tweet)>0:
st.markdown("**All texts (Tweet content, article content,...) after processing:** " + processed_text)
for i in range(n_tops):
st.write("-------")
st.markdown(f"**Topic {i+1}:** "+ str(df[df["tweetid"]==tweetid][f"top{i+1}"].values[0]))
all_keywords_in_the_topic = topics_dict[df[df["tweetid"]==tweetid][f"top{i+1}"].values[0]]
st.markdown("**Keywords appearing:** "+ get_num_words(processed_text, all_keywords_in_the_topic,return_keys=True))
st.write("-------")
| true |
be44c08833afd8a3739e984b978c4a6a7b3c6d58 | Python | ltrabas/X-Serv-15.8-CmsUsersPut | /cms_users_put/views.py | UTF-8 | 2,251 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | from django.shortcuts import render
from django.http import HttpResponse
from models import Pages
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import get_template
from django.template import Context
# Create your views here.
def mostrar(request):
if request.user.is_authenticated():
logged = ("Logged in as " + request.user.username +
". <a href='/logout/'>Logout</a><br/><br/>")
else:
logged = "Not logged in. <a href='/login/'>Login</a><br/><br/>"
respuesta = "Pages Found: "
lista_pages = Pages.objects.all()
for page in lista_pages:
respuesta += ("<br>-<a href='/" + page.name + "'>" + page.name +
"</a> --> " + page.page)
plantilla = get_template("plantilla.html")
contexto = Context({'title': logged, 'content': respuesta})
return HttpResponse(plantilla.render(contexto))
@csrf_exempt
def mostrar_pagina(request, resource):
if request.user.is_authenticated():
login = ("Logged in as " + request.user.username +
". <a href='/logout/'>Logout</a><br/><br/>")
else:
login = "Not logged in. <a href='/login/'>Login</a><br/><br/>"
if request.method == "GET":
try:
page = Pages.objects.get(name=resource)
return HttpResponse(page.page)
except Pages.DoesNotExist:
respuesta = "Page not found, add: "
respuesta += '<form action="" method="POST">'
respuesta += "Nombre: <input type='text' name='nombre'>"
respuesta += "<br>Página: <input type='text' name='page'>"
respuesta += "<input type='submit' value='Enviar'></form>"
elif request.method == "POST":
if request.user.is_authenticated():
nombre = request.POST['nombre']
page = request.POST['page']
pagina = Pages(name=nombre, page=page)
pagina.save()
respuesta = "Saved page: /" + nombre + " --> " + page
else:
respuesta = "Necesitas hacer <a href='/login/'>Login</a>"
plantilla = get_template("plantilla.html")
contexto = Context({'title': login, 'content': respuesta})
return HttpResponse(plantilla.render(contexto))
| true |
fb8014f3411c6ecb5bd3fda6d40dde12bb55738b | Python | rnsdoodi/Programming-CookBook | /Back-End/Python/Basics/Part -4- OOP/07 - Metaprogramming/Attribute-Read-write-Accessor/01_attributeread_accessor.py | UTF-8 | 4,643 | 3.5 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | class Person:
def __getattr__(self, name):
alt_name = '_' + name
print(f'Could not find {name}, trying {alt_name}...')
try:
return super().__getattribute__(alt_name)
except AttributeError:
raise AttributeError(f'Could not find {name} or {alt_name}')
p = Person()
try:
p.age
except AttributeError as ex:
print(type(ex).__name__, ex)
# Could not find age, trying _age...
# AttributeError Could not find age or _age
class Person:
def __init__(self, age):
self._age = age
def __getattr__(self, name):
print(f'Could not find {name}')
alt_name = '_' + name
try:
return super().__getattribute__(alt_name)
except AttributeError:
raise AttributeError(f'Could not find {name} or {alt_name}')
p = Person(100)
p.age
# Could not find age
# 100
# Example 1
class DefaultClass:
def __init__(self, attribute_default=None):
self._attribute_default = attribute_default
def __getattr__(self, name):
print(f'{name} not found. creating it and setting it to default...')
setattr(self, name, self._attribute_default)
return self._attribute_default
d = DefaultClass('NotAvailable')
d.test
# test not found. creating it and setting it to default...
# 'NotAvailable'
d.__dict__
# {'_attribute_default': 'NotAvailable', 'test': 'NotAvailable'}
d.test
# 'NotAvailable'
d.test = 'hello'
d.test
# 'hello'
d.__dict__
# {'_attribute_default': 'NotAvailable', 'test': 'hello'}
class Person(DefaultClass):
def __init__(self, name):
super().__init__('Unavailable')
self.name = name
p = Person('Raymond')
p.name
# 'Raymond'
p.age
# age not found. creating it and setting it to default...
# Example 2
class AttributeNotFoundLogger:
def __getattr__(self, name):
err_msg = f"'{type(self).__name__}' object has no attribute '{name}'"
print(f'Log: {err_msg}')
raise AttributeError(err_msg)
class Person(AttributeNotFoundLogger):
def __init__(self, name):
self.name = name
p = Person('Raymond')
p.name
# 'Raymond'
try:
p.age
except AttributeError as ex:
print(f'AttributeError raised: {ex}')
# Log: 'Person' object has no attribute 'age'
# AttributeError raised: 'Person' object has no attribute 'age'
# Example 3: Overriding __getattribute__
class DefaultClass:
def __init__(self, attribute_default=None):
self._attribute_default = attribute_default
def __getattr__(self, name):
print(f'{name} not found. creating it and setting it to default...')
default_value = super().__getattribute__('_attribute_default')
setattr(self, name, default_value)
return default_value
class Person(DefaultClass):
def __init__(self, name=None, age=None):
super().__init__('Not Available')
if name is not None:
self._name = name
if age is not None:
self._age = age
def __getattribute__(self, name):
if name.startswith('_') and not name.startswith('__'):
raise AttributeError(f'Forbidden access to {name}')
return super().__getattribute__(name)
@property
def name(self):
return super().__getattribute__('_name')
@property
def age(self):
return super().__getattribute__('_age')
p = Person('Python', 42)
p.name, p.age
# ('Python', 42)
p.language
# language not found. creating it and setting it to default...
# 'Not Available'
p.__dict__
# {'_attribute_default': 'Not Available',
# '_name': 'Python',
# '_age': 42,
# 'language': 'Not Available'}
# Overriding Class Attribute Accessors
class MetaLogger(type):
def __getattribute__(self, name):
print('class __getattribute__ called...')
return super().__getattribute__(name)
def __getattr__(self, name):
print('class __getattr__ called...')
return 'Not Found'
class Account(metaclass=MetaLogger):
apr = 10
Account.apr
# class __getattribute__ called...
# 10
Account.apy
# class __getattribute__ called...
# class __getattr__ called...
# # 'Not Found'
# Gets called for Method access
class MyClass:
def __getattribute__(self, name):
print(f'__getattribute__ called... for {name}')
return super().__getattribute__(name)
def __getattr__(self, name):
print(f'__getattr__ called... for {name}')
raise AttributeError(f'{name} not found')
def say_hello(self):
return 'hello'
m = MyClass()
m.say_hello()
# __getattribute__ called... for say_hello
# 'hello' | true |
183a9931d98b484d49e4c4f3621ac17a9d452452 | Python | signorcampana/Viking-Saga-Quest | /enemys.py | UTF-8 | 1,661 | 3.875 | 4 | [] | no_license | import random
ENEMY_NAMES = (
"Minotaur",
"Hydra",
"Griffin",
)
class Enemy:
name = None
hp = 100
maxHp = 100
stat_defense = 0
stat_attack = 0
level = 1
isPlayer = False
weapon = None
def __init__(self, level, weapon):
self.name = random.choice(ENEMY_NAMES)
self.weapon = weapon
self.level = level
self.fixStats()
self.hp = self.maxHp
def fixStats(self):
"""fixes the stats (attack/defense) after the level
has been changed"""
level = self.level
if self.isPlayer:
level += 1
self.stat_attack = 40 + (level * 5)
self.stat_defense = 30 + (level * 5)
self.maxHp = 100 + (10 * (level + 1))
def hurt(self, amount):
"""hurts the object, removes damaged based on the object's
defense rating"""
amount -= (self.stat_defense / 2)
self.hp -= amount
return self.hp
def attack(self, target):
"""attacks a target, and removes damaged based on the object
attack rating"""
damage = ((self.weapon.attack + self.stat_attack) / 2)
target.hurt(damage)
return damage
def heal(self, value):
"""heals the current object"""
oldHP = self.hp
self.hp += value
if self.hp > self.maxHp:
self.hp = self.maxHp
return self.hp - oldHP
def think(self, target):
"""AI for the enemy"""
if self.hp < 0:
return # we can't think if we're dead
if ((self.hp/self.maxHp < 0.4) and (random.choice([True, False]))):
amount = self.heal(self.level * 10)
print("Enemy used a healing potion, recovered ", amount, " HP!")
else:
damage = self.attack(target)
#print(damage, target.name, self.name)
print("Enemy ", self.name, " attacks,", target.name, " loses ", damage, " HP")
| true |
b48c133014e4e6a7f28faee7495763088d20f4ad | Python | sarvaaurimas/part_1A_floodwarning_system | /Task2B.py | UTF-8 | 567 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 10 19:40:44 2017
@author: samue
"""
from floodsystem.stationdata import build_station_list ,update_water_levels
from floodsystem.flood import stations_level_over_threshold
def run():
""" Requirement for Task 2B"""
stations = build_station_list()
update_water_levels(stations)
tuplist = stations_level_over_threshold(stations, 0.8)
for i in tuplist:
print(i[0].name, " :", i[1])
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run()
| true |
89cb63d9581a90dc7cd27a431e7d1ac8d00d5fc5 | Python | asherif844/100DaysOfCode | /searchApp/program.py | UTF-8 | 917 | 3.53125 | 4 | [] | no_license | import os
def main():
print_header()
folder = get_folder_from_user()
if not folder:
print("Sorry, we can't search this folder")
text = get_search_text_from_user()
if not text:
print("Sorry, can't search for nothing")
search_folders(folder, text)
def print_header():
print('---------------------------------')
print(' Search App ')
print('---------------------------------')
def get_folder_from_user():
folder = input('What folder do you want to search?')
if not folder or not folder.strip():
return None
if not os.path.isdir(folder):
return None
return os.path.abspath(folder)
def get_search_text_from_user():
text = input('What are you searching for [Single phrase only]')
return text
def search_folders(folder, text):
items = os.listdir(folder)
if __name__ == "__main__":
main()
| true |