blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
34afc486703912cd9df92e0583f0a6d3297eac10 | Python | kierangoodson/210CT--Programming-Coursework | /Week 1/1. [WEEK 1] Shuffling Arrays.py | UTF-8 | 623 | 4.6875 | 5 | [] | no_license | def shuffleArray():
'''A function that randomly shuffles an array. The function takes a random element from
the array and swaps it's position with the element in index 0. It does this for every number in
the array.'''
array = [1,2,3,4,5,6,7,8]
print("The original array: ",array)
import random
for i in array:
randInt = random.randrange(0, len(array)) #randomly selects element in array
array[0], array[randInt] = array[randInt], array[0] #swap element at index 0 with the randomly selected element.
print ("The randomly shuffled array: ",array)
shuffleArray()
| true |
1e0e8861defeac70ccc1461834e398c11ea0cf3b | Python | Pratyaksh7/Algorithmic-Toolbox | /week 2/fibonacci.py | UTF-8 | 227 | 3.609375 | 4 | [] | no_license | # Uses python3
def calc_fib(n):
arr = [1] * n
result = [0, 1, 1]
for i in range(2, n):
arr[i] = arr[i-1] + arr[i-2]
result.append(arr[i])
return result[n]
n = int(input())
print(calc_fib(n))
| true |
9726f2bd13758c79542664f542cd98986c07dc78 | Python | Mpolozov/GeographyScraper | /CoordBot.py | UTF-8 | 592 | 3.0625 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
def CoordBot(url):
PATH = "/Users/mitchellpolozov/Downloads/chromedriver"
driver = webdriver.Chrome(PATH)
driver.get(url)
search_bar = driver.find_element_by_name('q')
search_bar.clear()
time.sleep(5)
search_bar.send_keys("Alabama")
time.sleep(5)
search_bar.send_keys(Keys.RETURN)
time.sleep(10)
driver.quit()
#"https://www.geonames.org/search.html?q=New+York&country="
CoordBot("https://www.geonames.org/search.html?q=New+York&country=") | true |
4508c69d67159530fb0b8fe5f3e761c3e272e585 | Python | benk691/HomeProjects | /Budget/MoneyManager.py | UTF-8 | 4,897 | 3.15625 | 3 | [] | no_license | from decimal import Decimal
from AllocationManager import AllocationManager
from General.Common import WarningMsg, InfoMsg, DebugMsg, TWOPLACES, setContext, DEBT_KEY, EXTRA_KEY
class MoneyManager:
def __init__(self, moneyPath, allocationPath, savingsPath):
setContext()
self._moneyPath = moneyPath
self._savingsPath = savingsPath
# Allocation manager
self.allocationManager = AllocationManager(allocationPath)
self._readMoney()
self._readSavings()
def deposit(self, amount=None):
'''
Deposits money according to the percentages of all the allocations
'''
if amount == None:
amount = Decimal(raw_input("How much money to deposit? "))
self.allocationManager.deposit(amount)
def withdraw(self, amount=None):
'''
Withdraws money out of an allocation and checks if withdrawal is over budget
'''
self.allocationManager.withdraw()
def status(self):
self.allocationManager.status()
def update(self):
self.allocationManager.update()
def calculateSavings(self):
'''
Adds the amount in each allocation and compares it with bank holdings and credit debt.
The functions takes the difference and puts it into the extra money category if positive,
otherwise adds the amount to your existing debt.
'''
bankAccountMoney = Decimal("0.00")
creditAccountMoney = Decimal("0.00")
savings = Decimal("0.00")
bankAccounts = int(raw_input("How many bank accounts do you have? "))
creditCards = int(raw_input("How many credit cards do you have? "))
for ba in xrange(bankAccounts):
bankAccountMoney += Decimal(raw_input("How much money is in your bank account #{0}? ".format(ba)))
for cc in xrange(creditCards):
creditCap = Decimal(raw_input("What is your credit line for credit card account #{0}? ".format(cc)))
availCredit = Decimal(raw_input("What is your available credit for credit card account #{0}? ".format(cc)))
creditAccountMoney += (-1 * abs(creditCap - availCredit))
if bankAccounts > 0 or creditCards > 0:
# Calculate your debt before continuing
self.allocationManager.calculateDebt()
actualSavings = bankAccountMoney + creditAccountMoney
budgetSavings = self.allocationManager.calculateSavings()
# This works for all values of actualSavings and budgetSavings.
# For more description why look in the wiki
diff = actualSavings - budgetSavings
InfoMsg("Your actual savings are ${0}.".format(actualSavings.quantize(TWOPLACES)))
InfoMsg("Your budget savings are ${0}.".format(budgetSavings.quantize(TWOPLACES)))
if diff >= Decimal("0.00"):
InfoMsg("Adding ${0} to your extra money.".format(diff.quantize(TWOPLACES)))
self.allocationManager.allocationMap[EXTRA_KEY].extraMoney += diff
else:
InfoMsg("Adding ${0} to your debt.".format(diff.quantize(TWOPLACES)))
self.allocationManager.allocationMap[DEBT_KEY].debtReg += diff
self.allocationManager.calculateDebt()
WarningMsg("You have accumulated a debt of ${0}!".format(self.allocationManager.allocationMap[DEBT_KEY].debt.quantize(TWOPLACES)))
def finalize(self):
self._writeMoney()
self._writeSavings()
self.allocationManager.finalize()
def _readMoney(self):
'''
Get current money out of CSV file
'''
with open(self._moneyPath, 'r') as mFile:
mlines = mFile.readlines()
for i in xrange(len(mlines)):
if i == 0:
continue
if mlines[i].strip():
category, money = mlines[i].strip().split(',')
if category != DEBT_KEY:
self.allocationManager.allocationMap[category].extraMoney = Decimal(money)
else:
self.allocationManager.allocationMap[category].debt = Decimal(money)
self.allocationManager.allocationMap[category].debtReg = Decimal(money)
def _readSavings(self):
'''
Get current savings out of CSV file
'''
with open(self._savingsPath, 'r') as sFile:
slines = sFile.readlines()
for i in xrange(len(slines)):
if i == 0:
continue
if slines[i].strip():
category, product, percent, priority, savings, totalCost = slines[i].strip().split(',')
self.allocationManager.addSubAllocation(cat=category, product=product, percent=percent, priority=priority, savings=savings, totalCost=totalCost)
def _writeMoney(self):
with open(self._moneyPath, 'w') as mFile:
mFile.write("Category,Money\n")
for cat in self.allocationManager.allocationMap:
if cat != DEBT_KEY:
mFile.write("{0},{1}\n".format(cat, self.allocationManager.allocationMap[cat].extraMoney.quantize(TWOPLACES)))
else:
mFile.write("{0},{1}\n".format(cat, self.allocationManager.allocationMap[cat].debt.quantize(TWOPLACES)))
def _writeSavings(self):
with open(self._savingsPath, 'w') as sFile:
sFile.write("Category,Product,Percent,Priority,Savings,Total Cost\n")
for cat in self.allocationManager.allocationMap:
for subAlloc in self.allocationManager.allocationMap[cat].subAllocs:
sFile.write(str(subAlloc))
| true |
be82fc2a2b228552ba73955b02788c0267997daf | Python | zybine/NeutronRadiation | /python files/Water Activation.py | UTF-8 | 837 | 2.703125 | 3 | [] | no_license | # reads in a g4beamline output TEXT file, and plots the number of protons at each z vs distance
import pandas as pd
import os
NEURTRON_PDGid = 2112
PROTON_PDGid = 2212
GAMMA_PDGid = 22
directory = "./data/WaterActivation/"
f_header = ["x", "y", "z", "Px", "Py", "Pz", "t", "PDGid", "EventID", "TrackID", "ParentID", "Weight"]
detector_dfs = []
for filename in os.listdir(directory):
if filename.endswith(".txt"):
detector_dfs.append(pd.read_csv(directory + filename, sep=' ', comment='#', header=None, names=f_header))
n_neutrons = 0
n_input = 100000
n_protons = 0
n_gamma = 0
for df in detector_dfs:
n_neutrons+=len(df.loc[df["PDGid"] == NEURTRON_PDGid])
n_protons+=len(df.loc[df["PDGid"] == PROTON_PDGid])
n_gamma+=len(df.loc[df["PDGid"] == GAMMA_PDGid])
print(n_neutrons)
print(n_protons)
print(n_gamma)
| true |
309420fa7f4de5a2aef34fc504f29944b123fe95 | Python | grehujt/SmallPythonProjects | /CdfDrawing/cdf.py | UTF-8 | 300 | 2.921875 | 3 | [
"MIT"
] | permissive |
import numpy as np
import matplotlib.pyplot as plt
def draw_cdf():
data = np.loadtxt('err.txt')
x = np.sort(data)
y = np.arange(len(x)) / float(len(x)-1)
plt.plot(x, y, label='some text')
plt.grid()
plt.legend()
plt.xlabel('estimated error')
plt.savefig('cdf.png')
| true |
5def4a0a09ea516fd46d317b9d285fbfc3dcfd6e | Python | Dualve/Simple-Tasks | /number_e.py | UTF-8 | 318 | 2.984375 | 3 | [] | no_license | number_e_list = list("2.7182818284590452353602875")
exp = int(input())
if exp == 25:
print("".join(number_e_list))
elif exp == 0:
print(3)
else:
if int(number_e_list[exp+2]) >= 5:
number_e_list[exp+1] = str(int(number_e_list[exp+1])+1)
print("".join(number_e_list[:exp+2]))
| true |
34670824db2e5c45f7aa31288fe013c7fab6d384 | Python | ojwills/MIT-6.00.1x-Intro-to-CS-and-Python | /Final_Exam/Problem_3.py | UTF-8 | 1,491 | 4.65625 | 5 | [] | no_license | # Problem 3
# 10/10 points (graded)
# Numbers in Mandarin follow 3 simple rules.
# There are words for each of the digits from 0 to 10.
# For numbers 11-19, the number is pronounced as "ten digit", so for example, 16 would be pronounced (using Mandarin) as "ten six".
# For numbers between 20 and 99, the number is pronounced as “digit ten digit”, so for example, 37 would be pronounced (using Mandarin) as
# "three ten seven". If the digit is a zero, it is not included.
# Here is a simple Python dictionary that captures the numbers between 0 and 10.
trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si', '5':'wu', '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi'}
# We want to write a procedure that converts an American number (between 0 and 99), written as a string, into the equivalent Mandarin.
# Example Usage
# convert_to_mandarin('36') will return san shi liu
# convert_to_mandarin('20') will return er shi
# convert_to_mandarin('16') will return shi liu
# Paste your function here
def convert_to_mandarin(us_num):
'''
us_num, a string representing a US number 0 to 99
returns the string mandarin representation of us_num
'''
if int(us_num) <= 10:
return trans[us_num]
elif int(us_num) <= 19:
return 'shi ' + trans[us_num[1]]
elif int(us_num[1]) == 0:
return trans[us_num[0]] + ' shi'
else:
return trans[us_num[0]] + ' shi ' + trans[us_num[1]]
# Correct | true |
edaa04cb74074cd698ba118865401e26de212cbe | Python | kristelsamoy/minichiello.py | /20gen.py | UTF-8 | 1,723 | 3.296875 | 3 | [] | no_license | from tkinter import *
from tkinter import filedialog
def browseFiles():
filename = filedialog.askopenfilename(initialdir = "/", title = "Select a File", filetypes = (("Text files", ".txt"), ("all files", ".")))
label_file_explorer.configure(text="File aperto: "+filename)
def crea_grafico():
import string
import numpy as np
import matplotlib.pyplot as plt
f = open(filename, 'r')
coordX = []
coordY = []
for riga in f:
valori = str(riga)
Nval = len(valori)
valori = valori.strip('\n')
valori = valori.split(',')
valori = list(valori)
print(valori)
coordX.append(int(valori[0]))
coordY.append(int(valori[1]))
f.close()
print ("X: ",coordX)
print ("Y: ",coordY)
coordX.sort()
coordY.sort()
print("liste ordinate:")
print ("X: ",coordX)
print ("Y: ",coordY)
print(type(coordX))
print(type(coordY))
plt.scatter(coordX,coordY)
plt.ylabel('some numbers')
plt.show()
window = Tk()
window.title('Interfaccia Grafica')
window.geometry("800x600")
window.config(background = "green")
label_file_explorer = Label(window, text = "Inserisci un file di testo e genera un grafico", width = 116, height = 3, fg = "black")
button_explore = Button(window, text = "Inserisci file di testo", command = browseFiles)
button_exit = Button(window, text = "Esci", command = exit)
button_genera_grafico = Button(window, text = "Crea grafico", command = crea_grafico)
label_file_explorer.grid(column = 1, row = 1)
button_explore.grid(column = 1, row = 2)
button_exit.grid(column = 1,row = 4)
button_genera_grafico.grid(column = 1, row = 3)
window.mainloop()
| true |
e8477d4467ac387e710ab56caa28556010e456dc | Python | art-vasilyev/instachatbot | /tests/test_bot.py | UTF-8 | 7,774 | 2.515625 | 3 | [
"MIT"
] | permissive | from instachatbot.bot import InstagramChatBot
from instachatbot.nodes import (
MenuNode, MenuItem, MessageNode, QuestionnaireNode, DummyNode,
NotifyAdminNode)
class FakeBot(InstagramChatBot):
def __init__(self, menu, storage=None, trigger=None):
super(FakeBot, self).__init__(menu, storage=storage, trigger=trigger)
self.messages = {}
def send_direct_message(self, user_id, text):
if user_id not in self.messages:
self.messages[user_id] = []
self.messages[user_id].append(text)
def get_user_id_from_username(self, username):
return username
class TestBot:
user_id = 9912873321
bot_id = 19501769420
chat_id = '340282366841710300949128307236372348100'
username = 'user'
def build_message(self, text):
return {
'text': text,
'chat': {'id': self.chat_id},
'from': {'id': self.user_id, 'username': self.username}
}
def send_message(self, bot, text):
message = self.build_message(text)
bot.handle_message(message, {'bot': bot})
def test_parse_messages(self):
timestamp = 1559826742121822
text = 'Test'
message_body = {
'inbox': {
'threads': [
{
'is_group': False,
'items': [
{
'item_id': (
'28773724711249397008013560068964352'),
'item_type': 'text',
'text': text,
'timestamp': timestamp,
'user_id': self.user_id
}
],
'thread_id': self.chat_id,
'thread_title': 'user',
'thread_type': 'private',
'thread_v2_id': '18037398148086980',
'users': [
{
'full_name': 'User',
'pk': self.user_id,
'username': 'user'
}
],
}
],
},
'viewer': {
'full_name': 'Bot Name',
'pk': self.bot_id,
'username': 'botname'
}
}
bot = FakeBot(menu=MenuNode('test', []))
bot.user_id = self.bot_id
messages = list(bot.parse_messages(message_body, timestamp-1))
assert len(messages) == 1
assert messages[0]['text'] == text
def test_handle_message(self):
msg_node = MessageNode('test message')
menu = MenuNode('💡Menu', [MenuItem('message', msg_node)])
bot = FakeBot(menu=menu)
bot.user_id = self.bot_id
state = bot.conversation.get_state(self.chat_id)
assert state is None
self.send_message(bot, 'test')
state = bot.conversation.get_state(self.chat_id)
assert state['node'] is menu
self.send_message(bot, '1')
state = bot.conversation.get_state(self.chat_id)
assert state['node'] is menu
assert 'test message' in bot.messages[self.user_id][-2]
assert 'Menu' in bot.messages[self.user_id][-1]
def test_questionnaire(self):
question_node = QuestionnaireNode(
['question1', 'question2'],
admin_username='admin')
menu = MenuNode('💡Menu', [MenuItem('questionnaire', question_node)])
bot = FakeBot(menu=menu)
bot.user_id = self.bot_id
state = bot.conversation.get_state(self.chat_id)
assert state is None
self.send_message(bot, 'test')
state = bot.conversation.get_state(self.chat_id)
assert state['node'] is menu
self.send_message(bot, '1')
state = bot.conversation.get_state(self.chat_id)
assert state['node'] is question_node
assert bot.messages[self.user_id][-1] == 'question1'
self.send_message(bot, 'answer1')
state = bot.conversation.get_state(self.chat_id)
assert state['node'] is question_node
assert bot.messages[self.user_id][-1] == 'question2'
self.send_message(bot, 'answer2')
state = bot.conversation.get_state(self.chat_id)
assert state['node'] is menu
assert 'question1' in bot.messages['admin'][-1]
assert 'answer1' in bot.messages['admin'][-1]
assert 'question2' in bot.messages['admin'][-1]
assert 'answer2' in bot.messages['admin'][-1]
def test_dummy_node(self):
node = DummyNode()
menu = MenuNode('💡Menu', [MenuItem('dummy', node)])
bot = FakeBot(menu=menu)
bot.user_id = self.bot_id
self.send_message(bot, 'test')
assert len(bot.messages[self.user_id]) == 1
assert 'Menu' in bot.messages[self.user_id][-1]
self.send_message(bot, '1')
assert len(bot.messages[self.user_id]) == 2
assert 'Menu' in bot.messages[self.user_id][-1]
def test_message_node(self):
node = MessageNode(text='test message')
menu = MenuNode('💡Menu', [MenuItem('message', node)])
bot = FakeBot(menu=menu)
bot.user_id = self.bot_id
self.send_message(bot, 'test')
assert len(bot.messages[self.user_id]) == 1
self.send_message(bot, '1')
assert len(bot.messages[self.user_id]) == 3
assert 'test message' in bot.messages[self.user_id][-2]
assert 'Menu' in bot.messages[self.user_id][-1]
def test_notify_node(self):
admin_username = 'test_admin'
node = NotifyAdminNode(text='notification is sent',
notification='notification',
admin_username=admin_username)
menu = MenuNode('💡Menu', [MenuItem('notify', node)])
bot = FakeBot(menu=menu)
bot.user_id = self.bot_id
self.send_message(bot, 'test')
assert len(bot.messages[self.user_id]) == 1
self.send_message(bot, '1')
assert 'notification is sent' in bot.messages[self.user_id][-2]
assert 'Menu' in bot.messages[self.user_id][-1]
assert bot.messages[admin_username][-1] == (
'notification\n@{}'.format(self.username))
def test_menu_trigger(self):
node = MessageNode(text='test message')
menu = MenuNode('💡Menu', [MenuItem('message', node)])
bot = FakeBot(menu=menu, trigger='/menu')
bot.user_id = self.bot_id
self.send_message(bot, 'test')
assert not bot.messages
self.send_message(bot, '/menu')
assert 'Menu' in bot.messages[self.user_id][-1]
self.send_message(bot, '1')
assert 'message' in bot.messages[self.user_id][-1]
message_count = len(bot.messages[self.user_id])
self.send_message(bot, 'test')
assert len(bot.messages[self.user_id]) == message_count
self.send_message(bot, 'test')
assert len(bot.messages[self.user_id]) == message_count
self.send_message(bot, '/menu')
assert len(bot.messages[self.user_id]) == message_count + 1
def test_dummy_node_with_trigger(self):
node = DummyNode()
menu = MenuNode('💡Menu', [MenuItem('dummy', node)])
bot = FakeBot(menu=menu, trigger='/menu')
bot.user_id = self.bot_id
self.send_message(bot, '/menu')
assert len(bot.messages[self.user_id]) == 1
assert 'Menu' in bot.messages[self.user_id][-1]
self.send_message(bot, '1')
assert len(bot.messages[self.user_id]) == 1
| true |
e9ef333962a5dc4f2d0988bd05f4230dc7fca2f7 | Python | kylemsguy/creepy-octo-lamp | /linkfixer.py | UTF-8 | 1,083 | 2.84375 | 3 | [] | no_license | from html.parser import HTMLParser
class LinkHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.tags_src = ("img", "bgsound", "embed", "iframe", "script", "input")
self.tags_href = ("a", "link", "area")
self.tags_misc = {"body":("background"), "form":("action"), "object":("data"),
"blockquote":("cite")}
def handle_starttag(self, tag, attrs):
if tag in self.tags_src:
print("Found tag", tag, "looking for attribute src")
for attr, value in attrs:
if(attr.lower() == "src"):
# analyze the path given and rename accordingly
break
elif tag in self.tags_href:
print("Found tag", tag, "looking for attribute href")
for attr, value in attrs:
if(attr.lower() == "href"):
# analyze the path given and rename accordingly
break
elif tag in self.tags_misc.keys():
wanted_attr = self.tags_misc[tag]
print("Found tag", tag, "looking for attribute", wanted_attr)
for attr, value in attrs:
if(attr.lower() == wanted_attr.lower()):
# analyze the path given adn rename accordingly
break
| true |
57ae95d062e9d75dbbc5d14109859e0af50428dd | Python | AuFeld/cs-module-project-algorithms | /moving_zeroes/moving_zeroes.py | UTF-8 | 577 | 4.1875 | 4 | [] | no_license | '''
Input: a List of integers
Returns: a List of integers
'''
def moving_zeroes(arr):
# create zeroes array with len(arr)
moved_zeroes = [0] * len(arr)
i = 0
# loop through array
for k in range(len(arr)):
# if element is non-zero, overwrite from left
if arr[k] != 0:
moved_zeroes[i] = arr[k]
i += 1
return moved_zeroes
if __name__ == '__main__':
# Use the main function here to test out your implementation
arr = [0, 3, 1, 0, -2]
print(f"The resulting of moving_zeroes is: {moving_zeroes(arr)}") | true |
bdea3adbc4562843c7a6229d02c1cc0961a0e27e | Python | chyidl/leetcode | /0022-generate-parentheses/generate-parentheses.py | UTF-8 | 946 | 3.96875 | 4 | [
"MIT"
] | permissive | # Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
#
#
# Example 1:
# Input: n = 3
# Output: ["((()))","(()())","(())()","()(())","()()()"]
# Example 2:
# Input: n = 1
# Output: ["()"]
#
#
# Constraints:
#
#
# 1 <= n <= 8
#
#
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
# solution: 数学归纳法
# solution: recursion递归搜索 字符长度 2*n (2^2n)筛选 递归
# solution: 改进 左括号n 右括号n O(2^n)
self.list = []
self._gen(0, 0, n, "")
return self.list
def _gen(self, left, right, n, result):
if left == n and right == n:
self.list.append(result)
return
if left < n:
self._gen(left+1, right, n, result + "(")
if left > right and right < n:
self._gen(left, right + 1, n, result + ")")
| true |
7ed8c3d2a8474d146b6ebcc4f1005d12e9cd0518 | Python | jainamshroff/SnakeWaterGunGame---Python-Exercise | /main.py | UTF-8 | 2,706 | 4.34375 | 4 | [] | no_license | # Snake Water Gun Game
import random
print("Starting Snake Water Gun, 10 Rounds Per Game, One with Max Score Wins")
print("At Any Point Press 9 To exit")
computerScore = 0 # Score Handler For Computer Player
humanScore = 0 # Score Handler For Human Player
bool = True
round = 10
while(bool == True):
if(round == 1):
bool = False
print(f"Round Number: {round}")
round = round - 1
choiceList = ["Snake", "Water", "Gun"]
computerchoice = random.choice(choiceList)
# print(computerchoice)
print("Press 1 For Snake\n"
"Press 2 For Water\n"
"Press 3 For Gun\n"
"Press Any other number to exit before 10 rounds\n"
"Your Choice:")
userchoice = int(input())
if(userchoice == 1):
print(f"You Selected Snake and Computer Selected {computerchoice}")
if(computerchoice == "Gun"):
computerScore = computerScore + 1
print(f"You Lost!, Your Score:{humanScore} Computer Score:{computerScore} ")
elif(computerchoice == "Snake"):
print(f"Tie!, Your Score:{humanScore} Computer Score:{computerScore} ")
else:
humanScore = humanScore + 1
print(f"You Won!, Your Score:{humanScore} Computer Score:{computerScore}")
elif(userchoice == 2):
print(f"You Selected Water and Computer Selected {computerchoice}")
if (computerchoice == "Gun"):
humanScore = humanScore + 1
print(f"You Won!, Your Score:{humanScore} Computer Score:{computerScore}")
elif (computerchoice == "Snake"):
computerScore = computerScore + 1
print(f"You Lost!, Your Score:{humanScore} Computer Score:{computerScore} ")
else:
print(f"Tie!, Your Score:{humanScore} Computer Score:{computerScore} ")
elif(userchoice == 3):
print(f"You Selected Gun and Computer Selected {computerchoice}")
if (computerchoice == "Gun"):
print(f"Tie!, Your Score:{humanScore} Computer Score:{computerScore} ")
elif (computerchoice == "Snake"):
humanScore = humanScore + 1
print(f"You Won!, Your Score:{humanScore} Computer Score:{computerScore}")
else:
computerScore = computerScore + 1
print(f"You Lost!, Your Score:{humanScore} Computer Score:{computerScore} ")
else:
bool = False
print("Game Ends, Results are being displayed")
if(humanScore > computerScore):
print("Hurray, YOU WON THE GAME!")
elif(humanScore == computerScore):
print("Oh Snap, Tie !")
else:
print("Oh No, YOU LOST THE GAME!") | true |
1abefd0c761380f35411f2a0f45972df1ff3c79d | Python | Aasthaengg/IBMdataset | /Python_codes/p03048/s219553898.py | UTF-8 | 320 | 2.734375 | 3 | [] | no_license | R,G,B,N = (int(x) for x in input().split())
maxr = N // R
maxg = N // G
count = 0
for i in range(maxr+1):
for j in range(maxg+1):
checker = N - (i*R + j*G)
if checker // B >= 0:
if checker % B == 0:
count += 1
elif checker == 0:
count += 1
print(count) | true |
ab9f7326541ccfb3999adbf3b7898d66a56d535f | Python | kitkat-24/AdventOfCode2020 | /day09/script.py | UTF-8 | 2,984 | 4.15625 | 4 | [] | no_license | import itertools
def read_data(file):
with open(f'day09/{file}') as f:
nums = []
for line in f:
nums.append(int(line))
return nums
def validate(nums, n):
# For a list of integers and a preamble of length n, this function checks
# all numbers to see whether they follow the rule that they must be the sum
# of two unique numbers of the previous n elements.
valid = [False] * (len(nums) - n)
for i in range(n, len(nums)):
pairs = itertools.combinations(nums[i - n:i], 2)
for p in pairs:
if sum(p) == nums[i] and p[0] != p[1]:
valid[i - n] = True
return valid
def part1():
filename = 'input'
prefix = 25
nums = read_data(filename)
valid = validate(nums, prefix)
# List.index(elem) gives the index of the first occurence of elem
print(f'Part 1 answer: {nums[prefix + valid.index(False)]}')
# Now we gotta do some dynamic programming; exciting! (weird to put an
# exclamation right after a semicolon :D)
# Once again, we're gonna see how well I can pull this out of my ass based on
# what I remember of my algorithms course from two years ago.
def find_run(nums, index):
# Now we need to find a continuous run that sums to the key value (nums[index]).
# Memory array (O(n))
cum_sums = [0] * index
# Approach: We iterate through the list from index i = 0 to n-, storing the
# cumulative sum of a run of length k starting at index i in cum_sums[i]. As
# we increase k by 1, we traverse now i = 0 to n-2, updating the sums, and
# so on, until we reach a sum equal to key.
# Looks like I was right in my guess that actually we should only iterate
# over the subset of the list before the key.
for k in range(1, index):
for i in range(0, index - k + 1): # Explicit lower bound to help my brain
cum_sums[i] = cum_sums[i] + nums[i + k - 1]
if cum_sums[i] == nums[index]:
return i, k
# Pretty sure this should be doable in O(n^2) time.
def part2():
# First, we need our answer from part 1
filename = 'input'
prefix = 25
nums = read_data(filename)
valid = validate(nums, prefix)
i, k = find_run(nums, prefix + valid.index(False))
# Now we need the sum of the largest and smallest elements of the run
ans = min(nums[i:i + k - 1]) + max(nums[i:i + k - 1])
print(f'Part 2 answer: {ans}')
# Damn, I actually puzzled out how to do this DP program right on the first
# try, the only thing that fucked me up was that just putting one break
# didn't exit the whole loop, so it's python's fault, and woulda been no
# issue if i just made it a function in the first place 😔
#
# Well, testing on the the little baby example was helpful for both of
# these, but getting right answer on the big question first time made me
# very happy.
if __name__ == '__main__':
part1()
part2()
| true |
404e8c42ec354ec0e88ad6f9657ca9c0618d415b | Python | nsbgit/IIT-S21-CS-484 | /Old Materials/Additional Github/dvtate/cs484/in-class/Week 2 Nearest Neighbors Unsupervised.py | UTF-8 | 2,087 | 3.171875 | 3 | [
"MIT"
] | permissive | # Load the necessary libraries
import numpy
import pandas
from sklearn.neighbors import NearestNeighbors as kNN
cars = pandas.read_csv('cars.csv', delimiter=',')
cars["CaseID"] = cars["Make"] + "_" + cars.index.values.astype(str)
cars_wIndex = cars.set_index("CaseID")
# Specify the kNN
kNNSpec = kNN(n_neighbors = 4, algorithm = 'brute', metric = 'euclidean')
# Specify the training data
trainData = cars_wIndex[['Invoice', 'Horsepower', 'Weight']]
trainData.describe()
# Build nearest neighbors
nbrs = kNNSpec.fit(trainData)
distances, indices = nbrs.kneighbors(trainData)
# Find the nearest neighbors of these focal observations
focal = [[173560, 477, 3131], # Porsche_335
[119600, 493, 4473], # Mercedes-Benz_263
[117854, 493, 4429], # Mercedes-Benz_272
[113388, 493, 4235]] # Mercedes-Benz_271
myNeighbors = nbrs.kneighbors(focal, return_distance = False)
print("My Neighbors = \n", myNeighbors)
# Orthonormalized the training data
x = numpy.matrix(trainData.values)
xtx = x.transpose() * x
print("t(x) * x = \n", xtx)
# Eigenvalue decomposition
evals, evecs = numpy.linalg.eigh(xtx)
print("Eigenvalues of x = \n", evals)
print("Eigenvectors of x = \n",evecs)
# Here is the transformation matrix
transf = evecs * numpy.linalg.inv(numpy.sqrt(numpy.diagflat(evals)));
print("Transformation Matrix = \n", transf)
# Here is the transformed X
transf_x = x * transf;
print("The Transformed x = \n", transf_x)
# Check columns of transformed X
xtx = transf_x.transpose() * transf_x;
print("Expect an Identity Matrix = \n", xtx)
nbrs = kNNSpec.fit(transf_x)
distances, indices = nbrs.kneighbors(transf_x)
# Find the nearest neighbors of these focal observations
focal = [[173560, 477, 3131], # Porsche_335
[119600, 493, 4473], # Mercedes-Benz_263
[117854, 493, 4429], # Mercedes-Benz_272
[113388, 493, 4235]] # Mercedes-Benz_271
transf_focal = focal * transf;
myNeighbors_t = nbrs.kneighbors(transf_focal, return_distance = False)
print("My Neighbors = \n", myNeighbors_t)
| true |
248ef64ee2f9817df2b182c1599333af1aa0157e | Python | PasaLab/forestlayer | /forestlayer/layers/factory.py | UTF-8 | 2,605 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding:utf-8 -*-
"""
Factory methods to Layers.
"""
# Copyright 2017 Authors NJU PASA BigData Laboratory.
# Authors: Qiu Hu <huqiu00#163.com>
# License: Apache-2.0
from .layer import PoolingLayer
from ..estimators.estimator_configs import ExtraRandomForestConfig, RandomForestConfig
from .window import Window, Pooling
def MGSWindow(wins=(7, 7), strides=(1, 1), pads=(0, 0)):
"""
Multi-grain Scan window.
:param wins:
:param strides:
:param pads:
:return:
"""
assert len(wins) == len(strides) == len(pads), 'wins({}), strides({}), pads({}) SHAPE in-conform'.format(
len(wins), len(strides), len(pads))
assert len(wins) >= 2, 'len(wins) = {}, should >= 2'.format(len(wins))
return Window(win_x=wins[0], win_y=wins[1], stride_x=strides[0], stride_y=strides[1], pad_x=pads[0], pad_y=pads[1])
def MaxPooling(win_x=2, win_y=2):
"""
Max Pooling.
:param win_x:
:param win_y:
:return:
"""
assert win_x is not None and win_x >= 1, 'win_x = {}, invalid!'.format(win_x)
assert win_y is not None and win_y >= 1, 'win_y = {}, invalid!'.format(win_y)
return Pooling(win_x=win_x, win_y=win_y, pool_strategy="max")
def MeanPooling(win_x=2, win_y=2):
"""
Mean Pooling.
:param win_x:
:param win_y:
:return:
"""
assert win_x is not None and win_x >= 1, 'win_x = {}, invalid!'.format(win_x)
assert win_y is not None and win_y >= 1, 'win_y = {}, invalid!'.format(win_y)
return Pooling(win_x=win_x, win_y=win_y, pool_strategy="mean")
def MaxPooling2x2Layer(win_x=2, win_y=2):
"""
MaxPooling Layer with 2x2 pools, each pooling is MaxPooling(win_x, win_y).
:param win_x:
:param win_y:
:return:
"""
pools = [[MaxPooling(win_x, win_y), MaxPooling(win_x, win_y)],
[MaxPooling(win_x, win_y), MaxPooling(win_x, win_y)]]
return PoolingLayer(pools=pools)
def MeanPooling2x2Layer(win_x=2, win_y=2):
"""
MeanPooling Layer with 2x2 pools, each pooling is MeanPooling(win_x, win_y).
:param win_x:
:param win_y:
:return:
"""
pools = [[MeanPooling(win_x, win_y), MeanPooling(win_x, win_y)],
[MeanPooling(win_x, win_y), MeanPooling(win_x, win_y)]]
return PoolingLayer(pools=pools)
def EstForWin2x2(**kwargs):
"""
Estimator For Windows with 2x2 estimators, one CompletelyRandomForest, one RandomForest.
:param kwargs:
:return:
"""
rf1 = ExtraRandomForestConfig(**kwargs)
rf2 = RandomForestConfig(**kwargs)
est_for_windows = [[rf1, rf2], [rf1, rf2]]
return est_for_windows
| true |
e336303c212c02b28486a2017ea231a516d1a917 | Python | sinemelifhaseki/ROS-Kinetic-Robotics | /shape_color_detection/roboroach/src/camera_test.py | UTF-8 | 5,224 | 2.609375 | 3 | [
"MIT"
] | permissive | import rospy
import sys
from sensor_msgs.msg import Image, LaserScan
import matplotlib.pyplot as plt
import time
import numpy as np
import cv2
import base64
from geometry_msgs.msg import PoseStamped
print("**************************HELLO I AM ROBOROACH!**************************")
print("********************I WILL FIND THE COLOR AND SHAPE**********************")
print("What do you desire?")
user = raw_input()
colorname, shapename = user.split(" ")
print("Hmm, let's see if there is a " + colorname + " " + shapename)
font = cv2.FONT_HERSHEY_COMPLEX
prnt = 0
def displayImage(image):
plt.imshow(image)
plt.show()
def illcallyouback(data):
global prnt
h, w = data.height, data.width
flag = 0
frame = np.fromstring(data.data, dtype='>u1').reshape(h,w,3)[..., ::-1]
frame = np.array(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
if colorname == "blue":
l_h = 10
l_s = 179
l_v = 160
u_h = 50
u_s = 255
u_v = 170
elif colorname == "yellow":
l_h = 0
l_s = 130
l_v = 227
u_h = 180
u_s = 255
u_v = 255
elif colorname == "green":
l_h = 30
l_s = 136
l_v = 18
u_h = 90
u_s = 255
u_v = 243
elif colorname == "red":
l_h = 93
l_s = 13
l_v = 164
u_h = 180
u_s = 255
u_v = 243
lower_red = np.array([l_h, l_s, l_v])
upper_red = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_red, upper_red)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.erode(mask, kernel)
# Contours detection
if int(cv2.__version__[0]) > 3:
# Opencv 4.x.x
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
# Opencv 3.x.x
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
approx = cv2.approxPolyDP(cnt, 0.02*cv2.arcLength(cnt, True), True)
x = approx.ravel()[0]
y = approx.ravel()[1]
if area > 400:
cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)
if len(approx) == 3:
if shapename == "triangle":
flag = 1
string = 'Eureka! I have seen a '+ colorname+ ' triangle!'
prnt = 1
cv2.putText(frame, "Triangle", (x, y), font, 1, (0, 0, 0))
break
elif len(approx) == 4:
if shapename == "rectangle":
flag = 1
string = 'Eureka! I have seen a '+ colorname+ ' rectangle!'
prnt = 1
cv2.putText(frame, "Rectangle", (x, y), font, 1, (0, 0, 0))
break
elif 5 < len(approx):
if shapename == "circle":
flag = 1
string = 'Eureka! I have seen a '+ colorname+ ' circle!'
prnt = 1
cv2.putText(frame, "Circle", (x, y), font, 1, (0, 0, 0))
break
#h, w = data.height, data.width
cv2.imshow('Frame',frame[..., ::-1])
if cv2.waitKey(25) & 0xFF == ord('q'):
return
def laser_callback(data):
print(data)
def node():
rospy.init_node('amble')
rospy.Subscriber("/camera/rgb/image_raw", Image, illcallyouback, queue_size = 1000)
pub = rospy.Publisher("/move_base_simple/goal", PoseStamped, queue_size = 1000)
rospy.sleep(5)
goal1 = PoseStamped()
goal1.header.frame_id = "map"
goal1.header.stamp = rospy.Time.now()
goal1.pose.position.x = 10.299451828
goal1.pose.position.y = 6.46806716919
goal1.pose.position.z = 0.0
goal1.pose.orientation.w =0.718696551932
goal1.pose.orientation.z = -0.695323857092
pub.publish(goal1)
rospy.sleep(23)
goal = PoseStamped()
goal.header.frame_id = "map"
goal.header.stamp = rospy.Time.now()
goal.pose.position.x = 9.75285053253
goal.pose.position.y = 1.99568319321
goal.pose.position.z = 0.0
goal.pose.orientation.w = 1.0
goal.pose.orientation.z = 0.0
pub.publish(goal)
rospy.sleep(20)
goal2 = PoseStamped()
goal2.header.frame_id = "map"
goal2.header.stamp = rospy.Time.now()
goal2.pose.position.x = 3.01921746254
goal2.pose.position.y = 6.91606079102
goal2.pose.position.z = 0.0
goal2.pose.orientation.w = 0.640244253399
goal2.pose.orientation.z = 0.768142859432
pub.publish(goal2)
rospy.sleep(17)
goal5 = PoseStamped()
goal5.header.frame_id = "map"
goal5.header.stamp = rospy.Time.now()
goal5.pose.position.x = 3.27724266052
goal5.pose.position.y = 8.27506637573
goal5.pose.position.z = 0.0
goal5.pose.orientation.w = 0.814810943173
goal5.pose.orientation.z = 0.579726769164
pub.publish(goal5)
rospy.sleep(4)
if prnt:
print('Eureka! I have seen a '+ colorname+ " "+shapename)
else:
print("Roboroach couldn't find any " + colorname +" "+ shapename+"s")
rospy.spin()
if __name__ == '__main__':
node() | true |
07e4329aa3e3b054fa4863226abf6e4df4377448 | Python | alunfes/bybit-bot2 | /AccountConverter.py | UTF-8 | 802 | 2.53125 | 3 | [] | no_license |
from Bot import Bot
from SimAccount import SimAccount
from BotAccount import BotAccount
class AccountConverter:
'''
'''
@classmethod
def convert_bot_account(cls):
sim_ac = SimAccount()
hd = BotAccount.get_holding_data()
if hd['side'] != '':
sim_ac.holding_side = hd['side']
sim_ac.holding_price = hd['price']
sim_ac.holding_size = hd['size']
sim_ac.holding_dt = hd['dt']
sim_ac.holding_period = hd['period']
sim_ac.holding_ut = hd['dt'].timestamp()
oids = BotAccount.get_order_ids()
if len(oids) > 0:
od = BotAccount.get_order_data()
if od['side']
sim_ac.order_side
pd = BotAccount.get_performance_data()
return sim_ac
| true |
cbb35a9d2f06e4629b131a9079e434702fc3cd0b | Python | blairg23/Particle-Simulator | /examples/bouncingBall.py | UTF-8 | 3,263 | 3.4375 | 3 | [] | no_license | # This script animates a bouncing ball using OpenGL.
# Written by Glen Granzow on November 11, 2011.
# Modified by Glen Granzow on November 18, 2011.
from OpenGL.GL import *
from OpenGL.GLUT import *
#### Reshape Call-back Function ####
def reshape(width, height):
glViewport(0,0,width,height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1-radius,1+radius, 0-radius,2+radius, -1,1)
glMatrixMode(GL_MODELVIEW)
glLight(GL_LIGHT0, GL_POSITION, [0.7, 1.0, 2.0, 0.0]);
##### Idle Call-back Function ####
def idle():
# Update the ball's position and velocity (using Euler's method)
global x, y, vx, vy, theta
vy = vy - gravity
x = x + vx
y = y + vy
if x < -1:
x = -2 - x
vx = - vx * elasticity
if x > 1:
x = 2 - x
vx = -vx * elasticity
if y < 0:
y = - y
vy = - vy * elasticity
# theta += 0.01
glutPostRedisplay()
##### Keyboard Call-back Functions ####
def keyboard(key, x, y):
global color, elasticity, gravity, theta, phi
if key == 'c':
color = not color
if key == 'w':
color = False
glColor(1.0,1.0,1.0) # white
if key == 'e':
elasticity -= 0.1
print "'e' was pressed: elasticity =", elasticity
if key == 'E':
elasticity += 0.1
print "'E' was pressed: elasticity =", elasticity
if key == 'g':
gravity *= 2
print "'g' was pressed: gravity =", gravity
if key == 'G':
gravity /= 2
print "'G' was pressed: gravity =", gravity
if key == 'r':
theta += 1
if key == 'R':
phi += 1
if key == 'q':
print "'q' was pressed"
exit(0)
def special(key, x, y):
global vx, vy
if key == GLUT_KEY_UP:
vy += 100*gravity
print 'UP key was pressed: vy =', vy
if key == GLUT_KEY_LEFT:
vx -= 100*gravity
print 'LEFT key was pressed: vx =', vx
if key == GLUT_KEY_RIGHT:
vx += 100*gravity
print 'RIGHT key was pressed: vx =', vx
##### Display Call-back Function ####
def display():
if color:
v = max(0.001,(vx*vx + vy*vy)**0.5)
glColor(abs(vx)/v,1.0-abs(vy)/v,abs(vy)/v);
else:
glColor(1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslate(x,y,0)
glRotate(theta, 0, 0, 1)
glRotate(phi, 1, 0, 0)
glutSolidSphere(radius, 20, 10)
glColor(0.5, 0.5, 0.5)
glutWireSphere(radius, 20, 10)
glutSwapBuffers()
#### Main Program ####
# Ball parameters
color = False
radius = 0.2
x, y = (0, 0) # position
vx, vy = (0, 0) # velocity
gravity = 0.00001 # acceleration
elasticity = 1 # damping
theta, phi = 0, 0
# Initialization
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE)
glutInitWindowPosition(100,0)
glutInitWindowSize(500,500)
glutCreateWindow("Bouncing Ball")
# Register Call-back Functions
glutReshapeFunc(reshape)
glutIdleFunc(idle)
glutKeyboardFunc(keyboard)
glutSpecialFunc(special)
glutDisplayFunc(display)
# Lighting Parameters
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE)
glEnable(GL_POLYGON_OFFSET_FILL) # Prevents some hidden line problems when drawing
glPolygonOffset(1.0, 1.0); # a wireframe on top of filled polygons.
glEnable(GL_DEPTH_TEST)
# Start the simulation
glutMainLoop()
| true |
a96187e2aa25b50d4dc89ff3c7e3ce81945f7131 | Python | khrogos/pelican-gui | /main.py | UTF-8 | 11,343 | 2.875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# coding: utf-8
# TODO :
# delete draft if saved as published
# use pelicanconf to more flexible paramters
# config file with default pelican blog
#
import Tkinter as tk
import ScrolledText
import tkFileDialog
import os
import getpass
import subprocess
import ttk
class MainApplication(tk.Frame):
path = None
draft_directory = None
publish_directory = None
def choose_pelican_path(self):
"""
menu option to set the pelican blog folder
check if this is a pelican folder by trying to find the pelicanconf.py
file.
create a draft folder if not existe
TODO :
use a config file for persistent saving
"""
self.path = tkFileDialog.askdirectory()
self.publish_directory = "/".join([self.path, "content"])
self.draft_directory = "/".join([self.path, "draft"])
if os.path.isdir(self.path):
if not os.path.exists("/".join([self.path, "pelicanconf.py"])):
# TODO : try to find a ... cleaner way to do that
print "Not a pelican folder ! "
exit(1)
if not os.path.isdir(self.draft_directory):
# create draft folder
os.makedirs(self.draft_directory)
def open_dir(self, path):
"""
Open a file in path and parse the file to fill
all fields of the interface
"""
if path:
self.draft = tkFileDialog.askopenfilename(initialdir=path)
if self.draft:
self.new_article() # flush the fields before adding values
with open(self.draft, 'r') as d:
for line in d:
if "Title:" in line:
to_insert = " ".join(line.split(":")[1:]).strip()
self.title_entry.insert(0, to_insert)
elif "Date:" in line:
to_insert = " ".join(line.split(":")[1:]).strip()
self.date_entry.insert(0, to_insert)
elif "Category:" in line:
to_insert = " ".join(line.split(":")[1:]).strip()
self.category_entry.delete(0, tk.END)
self.category_entry.insert(0, to_insert)
elif "Tags:" in line:
to_insert = " ".join(line.split(":")[1:]).strip()
self.tags_entry.insert(0, to_insert)
elif "Summary:" in line:
to_insert = " ".join(line.split(":")[1:]).strip()
self.summary_entry.insert(0, to_insert)
elif "Slug:" in line or "Author:" in line:
continue
else:
self.body_entry.insert(tk.END, line)
print self.draft
else:
print "set pelican home first"
def open_draft(self):
"""
open a file in $PELICAN_HOME/draft/ folder
"""
self.open_dir(self.draft_directory)
def open_published(self):
"""
open a file in $PELICAN_HOME/content/ folder
"""
self.open_dir(self.publish_directory)
def new_article(self):
"""
flush all fields in interface
"""
self.title_entry.delete(0, tk.END)
self.date_entry.delete(0, tk.END)
self.category_entry.delete(0, tk.END)
self.tags_entry.delete(0, tk.END)
self.summary_entry.delete(0, tk.END)
self.body_entry.delete('0.0', tk.END)
def clean_title(self, title):
"""
Prepare title for slug
"""
title = title.strip()
title = title.replace(" ", "-")
title = title.replace("'", "-")
title = title.replace(":", "-")
return title.lower()
def save_file(self, path):
"""
retrieve informations from the GUI to generate the markdown document
and save it
"""
title = self.title_entry.get()
date = self.date_entry.get()
categories = self.category_entry.get()
tags = self.tags_entry.get()
summary = self.summary_entry.get()
body = self.body_entry.get(1.0, tk.END).strip()
author = getpass.getuser()
slug = self.clean_title(title)
f = tkFileDialog.asksaveasfile(mode='w',
defaultextension=".md",
initialdir=path)
if f is None:
return
text_to_save = u"""Title: {0}
Date: {1}
Category: {2}
Tags: {3}
Author: {4}
Slug: {5}
Summary: {6}
{7}
""".format(title, date, categories, tags, author, slug, summary, body)
f.write(text_to_save)
name = f.name
f.close()
return name
def save_draft(self):
f = self.save_file(self.draft_directory)
draft_file = f.name
if os.path.exists(draft_file):
pub_file = draft_file.replace('draft', 'content')
os.rename(pub_file, draft_file)
def save_published(self):
f = self.save_file(self.publish_directory)
pub_file = f.name
# delete draft if needs to be published
if os.path.exists(pub_file):
draft_file = pub_file.replace('content', 'draft')
os.rename(draft_file, pub_file)
def go_live(self):
os.chdir(self.path)
subprocess.call(["make", "publish", "ssh_upload"])
def __init__(self, parent, *args, **kwargs):
"""
Creation of the GUI is done here
"""
s = ttk.Style()
s.theme_use('classic')
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
parent.resizable(width=False, height=True)
tk.Grid.rowconfigure(self.parent, 0, weight=1)
tk.Grid.columnconfigure(self.parent, 0, weight=1)
# Use grid
self.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
# menu configuration
self.menu_bar = tk.Menu(self)
self.menu_bar.add_command(label="set pelican home",
command=self.choose_pelican_path)
self.menu_bar.add_command(label="Quit",
command=self.quit)
self.parent.config(menu=self.menu_bar)
# declare elements
# buttons
self.new_article_button = tk.Button(self,
text="new article",
command=self.new_article)
self.open_draft_button = tk.Button(self,
text="open Draft",
command=self.open_draft)
self.open_published_button = tk.Button(self,
text="Open published article",
command=self.open_published)
self.save_draft_button = tk.Button(self,
text="Save to draft",
command=self.save_draft)
self.save_published_button = tk.Button(self,
text="Ready to publish",
command=self.save_published)
self.upload_button = tk.Button(self,
text="Go online",
command=self.go_live)
# labels and entrys
self.title_label = tk.Label(self, text="Title")
self.date_label = tk.Label(self, text="Date")
self.category_label = tk.Label(self, text="Category")
self.tags_label = tk.Label(self, text="Tags")
self.summary_label = tk.Label(self, text="summary")
self.body_label = tk.Label(self, text="Body")
self.title_entry = tk.Entry(self)
self.date_entry = tk.Entry(self)
self.category_entry = tk.Entry(self)
self.tags_entry = tk.Entry(self)
self.summary_entry = tk.Entry(self)
self.body_entry = ScrolledText.ScrolledText(self, wrap=tk.WORD)
# position elements on the grid
self.new_article_button.grid(column=0,
row=0,
sticky='W',
pady=(20, 20),
padx=(20, 0))
self.open_draft_button.grid(column=1,
row=0,
pady=(20, 20))
self.open_published_button.grid(column=2,
row=0,
sticky='E',
pady=(20, 20),
padx=(0, 20))
self.title_label.grid(column=0,
row=1,
sticky='W')
self.date_label.grid(column=0,
row=2,
sticky='W')
self.category_label.grid(column=0,
row=3,
sticky='W')
self.tags_label.grid(column=0,
row=4,
sticky='W')
self.summary_label.grid(column=0,
row=5,
sticky='W')
self.body_label.grid(column=0,
row=6,
sticky='W')
self.title_entry.grid(column=1,
row=1,
sticky='WE')
self.date_entry.grid(column=1,
row=2,
sticky='WE')
self.category_entry.grid(column=1,
row=3,
sticky='WE')
self.tags_entry.grid(column=1,
row=4,
sticky='WE')
self.summary_entry.grid(column=1,
row=5,
sticky='WE')
self.body_entry.grid(column=1,
row=6,
columnspan=2,
sticky='NSWE',
padx=(0, 50),
pady=(0, 50))
self.save_draft_button.grid(column=0,
row=7,
sticky=tk.W,
pady=(20, 20),
padx=(20, 0))
self.save_published_button.grid(column=1,
row=7,
pady=(20, 20))
self.upload_button.grid(column=2,
row=7,
pady=(20, 20),
padx=(0, 20))
# manage horizontal and vertical resizing
self.columnconfigure(1, weight=3)
self.rowconfigure(6, weight=3)
self.update()
if __name__ == "__main__":
root = tk.Tk()
MainApplication(root).pack(side="top", fill="both", expand=True)
root.mainloop()
| true |
e6a49bff09a81bf8fe2c26bf78eb1ba8176b18ea | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_2/kngtho005/question2.py | UTF-8 | 2,810 | 4.0625 | 4 | [] | no_license | # question2
# a program to decide whether to eat a cupcake that has fallen on the floor
# Thomas Konigkramer
# 8 March 2014
# introduction to what the program does
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
witness = input("Did anyone see you? (yes/no)\n")
if witness == "yes":
who_witnessed = input("Was it a boss/lover/parent? (yes/no)\n")
if who_witnessed == "no":
print("Decision: Eat it.")
elif who_witnessed == "yes":
price = input("Was it expensive? (yes/no)\n")
if price == "yes":
cut_off = input("Can you cut off the part that touched the floor? (yes/no) \n")
if cut_off == "yes":
print("Decision: Eat it.")
elif cut_off == "no":
print("Decision: Your call.")
elif price == "no":
chocolate = input("Is it chocolate? (yes/no)\n")
if chocolate == "yes":
print("Decision: Eat it.")
elif chocolate == "no":
print("Decision: Don't eat it.")
elif witness == "no":
sticky = input("Was it sticky? (yes/no)\n")
if sticky == "yes":
steak = input("Is it a raw steak? (yes/no)\n")
if steak == "yes":
puma = input("Are you a puma? (yes/no)\n")
if puma == "no":
print("Decision: Don't eat it.")
elif puma == "yes":
print("Decision: Eat it.")
elif steak == "no":
cat = input("Did the cat lick it? (yes/no)\n")
if cat == "no":
print("Decision: Eat it.")
elif cat == "yes":
healthy = input("Is your cat healthy? (yes/no)\n")
if healthy == "no":
print("Decision: Your call.")
elif healthy == "yes":
print("Decision: Eat it.")
elif sticky == "no":
emausaurus = input("Is it an Emausaurus? (yes/no)\n")
if emausaurus == "no":
cat = input("Did the cat lick it? (yes/no)\n")
if cat == "no":
print("Decision: Eat it.")
elif cat == "yes":
healthy = input("Is your cat healthy? (yes/no)\n")
if healthy == "no":
print("Decision: Your call.")
elif healthy == "yes":
print("Decision: Eat it.")
elif emausaurus == "yes":
megalosaurus = input("Are you a Megalosaurus? (yes/no)\n")
if megalosaurus == "no":
print("Decision: Don't eat it.")
elif megalosaurus == "yes":
print("Decision: Eat it.") | true |
8f2553d34ca2ca5d8c3ed55906dcd90dd10df64a | Python | jaisanant0/face-recognition | /encode-faces.py | UTF-8 | 2,191 | 2.859375 | 3 | [
"MIT"
] | permissive | # network architecture for face recognition is based on ResNet-34.
# the network is trained by Davis King on LFW having 99.38% accuracy.
import face_recognition
import os
import glob
import argparse
import cv2
import csv
import pandas as pd
import numpy as np
# command line argument
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--method', type=str, choices = ['hog', 'cnn'], default = 'hog',
help = 'method to use to detect faces in image')
args = parser.parse_args()
print("[+] Using method : " + args.method)
# path of person images
images_path = os.getcwd() + '/images/'
persons = os.listdir(images_path)
# list to save name and encodings
encodings = []
names = []
for person in persons :
for person_img in glob.glob(images_path + person + '/' + '*.jpg') :
img_name = os.path.basename(person_img).split('.')[0]
print("[+] Creating face encoding for " + person + ' and image : ' + img_name)
img_read = cv2.imread(person_img)
img_rgb = cv2.cvtColor(img_read, cv2.COLOR_BGR2RGB)
# find the dimensions of box cintaining faces
faces = face_recognition.face_locations(img_rgb,2,model = args.method)
# check if face found
if len(faces) != 0 :
# create face encodings
encoding = face_recognition.face_encodings(img_rgb,faces,2)
encodings.append(np.array(*encoding).tolist())
names.append(person)
# if faces are not found
else :
print("[-] Could not find face for " + person + 'and image : ' + img_name)
# if method is HOG
if args.method == 'hog' :
with open(os.getcwd() + '/encodings-hog.csv',mode = 'a') as encode_db :
writer = csv.writer(encode_db)
writer.writerow(['Name', 'Encodings'])
for n,e in zip(names,encodings) :
writer.writerow([n,e])
# if method is cnn
if args.method == 'cnn' :
with open(os.getcwd() + '/encodings-cnn.csv',mode = 'a') as encode_db :
writer = csv.writer(encode_db)
writer.writerow(['Name', 'Encodings'])
for n,e in zip(names,encodings) :
writer.writerow([n,e])
| true |
262ab6787492a2da14fc5ad66ece10617b7eb7ea | Python | vbakhteev/segmentation_pipeline | /models/utils.py | UTF-8 | 4,198 | 2.5625 | 3 | [] | no_license | import torch
from torch import nn
def get_layers_by_dim(n_dim: int) -> dict:
assert n_dim in (1, 2, 3)
layers = {
"batch_norm": (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d),
"conv": (nn.Conv1d, nn.Conv2d, nn.Conv3d),
"conv_transpose": (nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d),
"pad": (nn.ReflectionPad1d, nn.ReflectionPad2d, nn.ReplicationPad3d),
"max_pool": (nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d),
"avg_pool": (nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d),
"adaptive_max_pool": (
nn.AdaptiveMaxPool1d,
nn.AdaptiveMaxPool2d,
nn.AdaptiveMaxPool3d,
),
"adaptive_avg_pool": (
nn.AdaptiveAvgPool1d,
nn.AdaptiveAvgPool2d,
nn.AdaptiveAvgPool3d,
),
}
result = dict()
for layer_name, choices in layers.items():
if len(choices) >= n_dim:
layer = choices[n_dim - 1]
else:
layer = None
result[layer_name] = layer
return result
def change_layers_dim(
from_dim: int,
to_dim: int,
layer_names=("Conv", "BatchNorm", "MaxPool"),
):
def decorator(function):
def wrapper(*args, **kwargs):
# Replace nn.Layer{from_dim}d to nn.Layer{to_dim}d
memory = dict()
for name in layer_names:
name_to_d, name_from_d = name + f"{from_dim}d", name + f"{to_dim}d"
memory[name_to_d] = getattr(nn, name_to_d)
setattr(nn, name_to_d, getattr(nn, name_from_d))
result = function(*args, **kwargs)
# Return nn.Layer{from_dim}d backward
for name_to_d, v in memory.items():
setattr(nn, name_to_d, v)
return result
return wrapper
return decorator
def initialize_decoder(module):
for m in module.modules():
if isinstance(m, nn.modules.conv._ConvNd):
nn.init.kaiming_uniform_(m.weight, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def initialize_head(module):
for m in module.modules():
if isinstance(m, (nn.Linear, nn.modules.conv._ConvNd)):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def freeze(model):
for param in model.parameters():
param.requires_grad = False
def unfreeze(model):
for param in model.parameters():
param.requires_grad = True
def load_state_dict(model, path, soft=False):
checkpoint = torch.load(path, map_location="cpu")
if "pytorch-lightning_version" in checkpoint:
checkpoint = filter_lightning_stuff(checkpoint)
if soft:
soft_load_state_dict(model, checkpoint)
else:
model.load_state_dict(checkpoint)
def filter_lightning_stuff(checkpoint):
checkpoint = checkpoint["state_dict"]
checkpoint = {k.replace("model.", "", 1): v for k, v in checkpoint.items()}
return checkpoint
def soft_load_state_dict(model, state_dict):
model_state = model.state_dict()
not_loaded_params = []
for name, param in state_dict.items():
if name.startswith("module."):
name = name[7:]
if name not in model_state or model_state[name].shape != param.shape:
not_loaded_params += [name]
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
model_state[name].copy_(param)
if len(not_loaded_params):
print(
"WARNING: following params couldn't be loaded into model:",
not_loaded_params,
)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group["lr"]
| true |
8e508bac431ae10afe67d37641f7f94ac65a9c30 | Python | OpenReader/LeetCode | /Python3/304_Range_Sum_Query_2D_-_Immutable.py | UTF-8 | 1,221 | 3.421875 | 3 | [] | no_license | class NumMatrix:
def __init__(self, matrix: List[List[int]]):
m = len(matrix)
if m == 0:
return
n = len(matrix[0])
if n == 0:
return
self.acc = [[0] * n for _ in range(m)]
self.acc[0][0] = matrix[0][0]
# init first row
for j in range(1, n):
self.acc[0][j] = self.acc[0][j-1] + matrix[0][j]
# init first col
for i in range(1, m):
self.acc[i][0] = self.acc[i-1][0] + matrix[i][0]
for i in range(1, m):
for j in range(1, n):
self.acc[i][j] = matrix[i][j] + self.acc[i-1][j] + self.acc[i][j-1] - self.acc[i-1][j-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
# print(self.acc[row2][col2])
ret = self.acc[row2][col2]
if row1 - 1 >= 0:
ret -= self.acc[row1-1][col2]
if col1 - 1 >= 0:
ret -= self.acc[row2][col1-1]
if row1 - 1 >= 0 and col1 - 1 >= 0:
ret += self.acc[row1-1][col1-1]
return ret
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2) | true |
35c0966fc9b30fffa4f3dab40528d240aaba3358 | Python | jasonfangmagic/Python_Basics | /basics.py | UTF-8 | 3,502 | 4.03125 | 4 | [] | no_license | #Input
print("Hello what's your name")
name = input()
print("Hello,", name)
#operator
num1 = 34
num2 = 3
#only have integer
print(num1 // num2)
#only residual
print(num1 % num2)
#expotion
print(num1 ** num2)
#convert numbers
print("pick a number")
num1 = input()
print("pick another number")
num2 = input()
sum = int(num1) + int(num2)
print("Your Number is", sum)
print(2 == 4)
age = input("Input your age; ")
if int(age) == 15:
print("Hey, your age is 15")
else: print("You're not 15")
#Chained Conditional and Nested if statement
x =3
y = 9
if not (y == x or x +y == 6):
print("run")
else:
print(":(")
if x == 3:
if y == 4:
print("x = 3, y = 4")
else:
print("x = 2, y != 4")
else:
print("x != 3")
#for loop
for x in range(0, 10): #start, stop, step
print(x)
loop= True
while loop:
name = input("insert sth: ")
if name == "stop":
break
#Lists and Tuples
fruits = ["apple", "pear", 3]
print(fruits[1])
fruits.append("orange")
print(fruits)
fruits[1] = "blueberry"
print(fruits)
position = (2, 3, 4)
# For Loop iteration
for i in fruits:
if i == "apple":
print(i)
else:
print("not apple")
#string methods
text = input("input sth: ")
print(text.strip())
print(len(text))
print(text.lower())
text = input("input sth: ")
print(text.split("."))
#Slice Operator
fruits = ["apple", "pear", 3]
text = "Hello I am Jason"
print(text[6:])
#steps
print(text[1:1])
fruits[1:1] = ["Blueberries"]
print(fruits)
#insert
#functions
def addtwo(x):
return x+2
x=5
y=addtwo(x)
print(y)
def writestring(x):
return print(x)
writestring("hello")
file = open("basics.txt", "r")
f = file.readlines()
print(f)
#using .count and .find
string1 = 'hello'
print(string1.find('o'))
print(string1.count('z'))
#optional parameters
def func(x, text='2'):
print(x)
if text == '1':
print('text is 1')
else:
print('text is not 1')
func('jason')
#try and except
#global and local variables
newVar = 9
loop = True
def func(x):
global newVar
newVar = 7
if x == 6:
return newVar
print(newVar)
func(6)
#objects and classes
def func(x):
print("hello")
func("a")
x = 'string'
print(type(x))
class number():
def __init__(self, num):
self.var = num
def display (self, x):
print(x)
num = number(23)
#
class employee:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last +'@company.com'
def fullname(self):
return '{} {}'.format(self.first, self.last)
emp1_1 = employee('corey','Schafer', 50000)
emp1_2 = employee('test','user', 60000)
print(emp1_1.email)
print('{} {}'.format(emp1_1.first, emp1_1.last))
print(emp1_2.fullname())
employee.fullname(emp1_1)
#optional parameters
def func(x = 1):
return x**2
call = func(5)
print(call)
def func(word, add=1, freq=1):
print(word*freq)
call = func('hello', 5, 3)
print(call)
#static and class
class person(object):
population=50
def __init__(self, name, age):
self.name = name
self.age = age
@classmethod
def getpopulation(cls):
return cls.population()
@staticmethod
def isAdult(age):
return age >=18
def display(self):
print(self.name, 'is', self.age, 'years old')
newperson = person('tim', 18)
#object orientated programming
print(type(True))
| true |
9c889a43972bb4b37157a1cd5f1386787f489e84 | Python | indrajitbarve/riptide | /riptide/running_median.py | UTF-8 | 2,746 | 3.4375 | 3 | [
"MIT"
] | permissive | import numpy
from bisect import insort, bisect_left
def running_median(data, width):
if not width % 2:
raise ValueError("width must be an odd number")
l = width * [data[0]]
mididx = (width - 1) // 2
result = numpy.zeros_like(data)
for idx, new_elem in enumerate(data):
old_elem = data[max(0, idx - width)]
del l[bisect_left(l, old_elem)]
insort(l, new_elem)
result[idx] = l[mididx]
return result[2*mididx:]
def fast_running_median(data, width, min_points):
""" Compute an approximate running median of data over large window sizes.
Parameters
----------
data : ndarray
Input data
width : int
Required width of the running median window in number of samples
min_points : int
The running median is calculated of a time scrunched version of the
input data to save time: minpts is the minimum number of
scrunched samples that must fit in the running median window.
Lower values make the running median calculation less accurate but
faster, due to allowing a higher scrunching factor.
"""
if width < 3:
raise ValueError('width must be > 3')
if min_points < 3:
raise ValueError('min_points must be > 3')
width = int(width)
min_points = int(min_points)
# num_points = effective width of running median on downsampled data
dsfactor = max(int(width / float(min_points)), 1)
num_points = int(numpy.ceil(width / float(dsfactor)))
# Make sure num_points is an odd number, makes life easier when it comes
# to padding the edges of the data
num_points += int(not num_points % 2)
### Edge case: no downsampling needed
if dsfactor <= 1:
# num_points is our final window width even in this case
y = numpy.pad(data, num_points // 2, 'median', stat_length=width)
return running_median(y, num_points)
### Add 'width' elements to data on both edges.
# Fill value at the beginning is the median of the first 'width' samples of data
# Fill value at the end is the median of the last 'width' samples of data
y = numpy.pad(data, width, 'median', stat_length=width)
# x-coordinates of padded data in original referential
x = numpy.arange(-width, data.size + width, 1, dtype=int)
### Downsample
n = y.size - y.size % dsfactor
yds = y[:n].reshape(-1, dsfactor).mean(axis=1)
xds = x[:n].reshape(-1, dsfactor).mean(axis=1)
### Running Median
rmed_ds = running_median(yds, num_points)
x_rmed_ds = xds[numpy.arange(rmed_ds.size) + num_points//2]
### Upsample
rmed = numpy.interp(numpy.arange(data.size), x_rmed_ds, rmed_ds)
return rmed
| true |
ad60d463d92c5c4331bf7704bc512ba855b03d1c | Python | SupakornNetsuwan/Prepro64 | /test14.py | UTF-8 | 147 | 3.296875 | 3 | [] | no_license | """Func"""
def func():
"""Modulo func"""
base = int(input())
modulo = int(input())
print(base - (modulo * (base//modulo)))
func()
| true |
1c70a5ebb62420b9cb48e909561d293c1d681df8 | Python | zh1047592355/ApiAutoTest | /day05/test_001.py | UTF-8 | 2,004 | 2.953125 | 3 | [] | no_license | '''
mock
1.接口测试的测试场景比较难模拟,需要大量的工作才能做好
2.该接口的测试,依赖其他模块的接口,依赖的接口尚未开发完成
测试条件不充分,怎么开展接口测试
使用mock模拟接口的返回值
'''
import requests
from unittest import mock
'''
支付接口:http://www.zhifu.com/
方法:post
参数:{"订单号":“12345”,"支付金额":20.56,"支付方式":"支付宝/微信/余额宝/银行卡"}
返回值:{"code":200,"msg":"支付成功"}、{"code":201,"msg":"支付失败"}
接口尚未实现
'''
class Pay:
def zhifu(self,data):
r=requests.post("http://www.zhifu.com/",data=data)
return r.json()
def test_001():
pay=Pay()
#通过mock模拟接口的返回值
pay.zhifu=mock.Mock(return_value={"code":200,"msg":"支付成功"})
canshu={"订单号":"12345","支付金额":20.56,"支付方式":"支付宝"}
r=pay.zhifu(canshu)
print(r)
assert r['msg']=="支付成功"
def test_002():
pay=Pay()
#通过mock模拟接口的返回值
pay.zhifu=mock.Mock(return_value={"code":201,"msg":"支付失败"})
canshu={"订单号":"12345","支付金额":-20.56,"支付方式":"支付宝"}
r=pay.zhifu(canshu)
print(r)
assert r['msg']=="支付失败"
# 模块名,类名,方法名
@mock.patch("test_001.Pay.zhifu",return_value={"code":200,"msg":"支付成功"})
def test_003(mock_pay):
pay=Pay()
canshu={"订单号":"12345","支付金额":201.56,"支付方式":"微信"}
r=pay.zhifu(canshu)
print(r)
assert r['msg']=="支付成功"
class Quxian:
def quxian(self,data):
url="http://www.zhifu.com/member/withdraw"
r=requests.post(url,data=data)
return r.json()
def test_004():
qx=Quxian()
qx.quxian=mock.Mock(return_value={"status":1,"code":"10001","msg":"取现成功"})
canshu={"mobilephone":"13213577531","amount":223}
r=qx.quxian(canshu)
print(r)
assert r["msg"]=="取现成功" | true |
925b760986df857a6881421bb112cb27bc94920f | Python | scande3/Tic-Tac-Toe | /tictactoe/game/tests.py | UTF-8 | 10,902 | 2.609375 | 3 | [] | no_license | import logging
from django.test import TestCase
from django.http import HttpRequest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import Client
from tictactoe.game.models import TicTacToeModel
class GameViewsTest(TestCase):
def test_index(self):
"""
Test the index view of the application.
"""
game_index = reverse('site-index')
response = self.client.get(game_index)
code = response.status_code
expected = 200
self.assertEqual(code, expected, 'Expected %s but returned %s for %s'
% (expected, code, game_index))
def test_createGame(self):
"""
Test the creation of the game board.
"""
game_creation = reverse('game:createGame')
#Get request should redirect.
response = self.client.get(game_creation)
code = response.status_code
expected = 303
self.assertEqual(code, expected, 'Expected %s but returned %s for %s'
% (expected, code, game_creation))
#Test with fully invalid form
form_data = {'blah': 'blah',
'size': 3,}
response = self.client.post(game_creation, data=form_data)
code = response.status_code
expected = 303
self.assertEqual(code, expected, 'Expected %s but returned %s for %s'
% (expected, code, game_creation))
#Test with valid params but invalid range input
form_data = {'boardSize': 10,
'playerCharacter': 'C',}
response = self.client.post(game_creation, data=form_data)
code = response.status_code
expected = 303
self.assertEqual(code, expected, 'Expected %s but returned %s for %s'
% (expected, code, game_creation))
#Test with valid params but non-integer value for size
form_data = {'boardSize': 'y',
'playerCharacter': 'X',}
response = self.client.post(game_creation, data=form_data)
code = response.status_code
expected = 303
self.assertEqual(code, expected, 'Expected %s but returned %s for %s'
% (expected, code, game_creation))
#Test with correct form of 3 with player 1
form_data = {'boardSize': '3',
'playerCharacter': 'X',}
response = self.client.post(game_creation, data=form_data)
code = response.status_code
expected = 200
self.assertEqual(code, expected, 'Expected %s but returned %s for %s'
% (expected, code, game_creation))
#Test with correct form of 4 with player 2
form_data = {'boardSize': '4',
'playerCharacter': 'O',}
response = self.client.post(game_creation, data=form_data)
code = response.status_code
expected = 200
self.assertEqual(code, expected, 'Expected %s but returned %s for %s'
% (expected, code, game_creation))
self.assertContains(response, 'tic_X.png', msg_prefix='Expected the CPU player to have an X on the board')
class GameModelsTest(TestCase):
def setUp(self):
self.gameObj = TicTacToeModel.objects.create(
boardSize = 3,
gameBoard = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']],
playerCharacter = 'X',
cpuCharacter = 'O',
sessionID = 'testing'
)
def test_checkGameOver(self):
#Check with its blank state
result = self.gameObj.checkGameOver()
expected_return = False
expected_winner = ' '
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a few random values
self.gameObj.gameBoard = [['O', 'O', 'X'], ['O', 'X', ' '], [' ', 'X', ' ']]
result = self.gameObj.checkGameOver()
expected_return = False
expected_winner = ' '
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a horizontal win for the CPU
self.gameObj.gameBoard = [['O', 'O', 'O'], ['O', 'X', ' X'], [' ', 'X', ' ']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = self.gameObj.cpuCharacter
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a verticle win for the Player
self.gameObj.gameBoard = [['O', 'O', 'X'], ['O', 'X', ' X'], [' ', 'O', 'X']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = self.gameObj.playerCharacter
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a diagonal win for the CPU (top left to bottom right)
self.gameObj.gameBoard = [['O', 'O', 'X'], ['X', 'O', ' X'], [' ', 'X', 'O']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = self.gameObj.cpuCharacter
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a diagonal win for the player (top right to bottom left)
self.gameObj.gameBoard = [['O', 'O', 'X'], ['O', 'X', ' '], ['X', 'X', 'O']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = self.gameObj.playerCharacter
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a draw for the both players
self.gameObj.gameBoard = [['X', 'O', 'X'], ['O', 'X', 'X '], ['O', 'X', 'O']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = ' '
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Change the characters and game board size
self.gameObj = TicTacToeModel.objects.create(
boardSize = 4,
gameBoard = [[' ', ' ', ' ', ' '], [' ', ' ', ' ', ' '], [' ', ' ', ' ', ' ']],
playerCharacter = 'O',
cpuCharacter = 'X',
sessionID = 'testing'
)
#Check with a draw for the both players on 4 sizes
self.gameObj.gameBoard = [['X', 'O', 'X', 'O'], ['O', 'O', 'X', 'O'], ['X', 'X', 'O', 'X'], ['X', 'O', 'O', 'X']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = ' '
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a horizontal win for the CPU
self.gameObj.gameBoard = [['O', 'O', 'X', 'X'], ['O', 'O', 'X ', 'O'], ['X', 'X', 'X', 'X'], ['X', 'O', 'O', 'O']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = self.gameObj.cpuCharacter
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#Check with a vertical win for the Player
self.gameObj.gameBoard = [['O', ' ', 'X', ' '], ['O', 'O', ' ', ' '], ['O', 'X', 'X', 'X'], ['O', 'X', ' ', ' ']]
result = self.gameObj.checkGameOver()
expected_return = True
expected_winner = self.gameObj.playerCharacter
self.assertEqual(result, expected_return, 'Expected %s but returned %s for %s'
% (expected_return, result, 'game models checkGameOver()'))
self.assertEqual(self.gameObj.winner, expected_winner, 'Expected %s but returned %s for %s'
% (expected_winner, self.gameObj.winner, 'game models checkGameOver() winner'))
#TODO: Checks for some other sizes and variations
def test_calculateCPUMove(self):
#Test for a block of X winning
self.gameObj.gameBoard = [[' ', ' ', ' '], ['O', ' ', ' '], [' ', 'X', 'X']]
self.gameObj.calculateCPUMove()
self.assertEqual(self.gameObj.gameBoard[2][0], 'O', 'Expected CPU move to block X winning')
#Test that it will pick winning
self.gameObj.gameBoard = [[' ', 'X', ' '], ['O', 'O', ' '], [' ', 'X', 'X']]
self.gameObj.calculateCPUMove()
self.assertEqual(self.gameObj.gameBoard[1][2], 'O', 'Expected CPU to pick winning')
#Test that it will pick stop a corner play by going for 3 in a row
self.gameObj.gameBoard = [['X', 'O', ' '], [' ', ' ', ' O'], [' ', ' ', 'X']]
self.gameObj.calculateCPUMove()
self.assertEqual(self.gameObj.gameBoard[1][1], 'O', 'Expected CPU to stop corner play and go for 3 in a row')
#TODO: More AI checks using theories from: http://en.wikipedia.org/wiki/Tic-tac-toe#Strategy
| true |
5456a633cba7d6d42ec507dbb32356e1b558af07 | Python | Bapan0814036/2nd-repository | /staticvar1.py | UTF-8 | 223 | 2.75 | 3 | [] | no_license | class A:
var="hello"
def __init__(self,a):
self.a=a
if __name__=="__main__":
a=A(12)
print(a.__dict__)
print(a.var)
print(a.__dict__)
a.var=24
print(a.__dict__)
print(A.var) | true |
4b878664cb8abc9967c4ebfbb0b01156ff5ffe22 | Python | michealtianlan/cloudtask | /cmd_destroy_workers.py | UTF-8 | 2,643 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
#
# Copyright (c) 2010-2012 Liraz Siri <liraz@turnkeylinux.org>
#
# This file is part of CloudTask.
#
# CloudTask is open source software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
"""
Destroy + unregister cloud workers and remove their addresses from file listing
Options:
--hub-apikey Hub APIKEY
Environment: HUB_APIKEY
Return codes:
0 destroyed all workers
1 fatal error
2 couldn't destroy some workers
Usage example:
cloudtask-destroy-workers worker-ips.txt
cat workers-ips | cloudtask-destroy-workers -
"""
import os
import sys
import getopt
from cloudtask import Hub
def usage(e=None):
if e:
print >> sys.stderr, "error: " + str(e)
print >> sys.stderr, "Usage: %s [ -opts ] ( path/to/list-of-ips | - )" % sys.argv[0]
print >> sys.stderr, __doc__.strip()
sys.exit(1)
def fatal(e):
print >> sys.stderr, "error: " + str(e)
sys.exit(1)
def main():
hub_apikey = os.environ.get('HUB_APIKEY', os.environ.get('CLOUDTASK_HUB_APIKEY'))
try:
opts, args = getopt.getopt(sys.argv[1:],
'h', [ 'help',
'hub-apikey=' ])
except getopt.GetoptError, e:
usage(e)
for opt, val in opts:
if opt in ('-h', '--help'):
usage()
if opt == '--hub-apikey':
hub_apikey = val
if not len(args) == 1:
usage()
if not hub_apikey:
fatal("missing required HUB_APIKEY")
input = args[0]
if input == '-':
fh = sys.stdin
else:
fh = file(input)
ip_addresses = fh.read().splitlines()
if not ip_addresses:
print "no workers to destroy"
return
destroyed = Hub(hub_apikey).destroy(*ip_addresses)
if not destroyed:
fatal("couldn't destroy any workers")
ip_addresses_left = list(set(ip_addresses) - set([ ip_address for ip_address, instanceid in destroyed ]))
if ip_addresses_left:
print >> sys.stderr, "warning: can't destroy " + " ".join(ip_addresses_left)
ip_addresses_left.sort()
if input != '-':
fh = file(input, "w")
for ip_address in ip_addresses_left:
print >> fh, ip_address
fh.close()
sys.exit(2)
if not ip_addresses_left:
if input != '-':
os.remove(input)
sys.exit(0)
if __name__ == "__main__":
main()
| true |
2b971d7a5b6a058da77074d7b0c58feccd6e9bc9 | Python | callmepr/Discord-chatbot | /bot.py | UTF-8 | 1,564 | 2.8125 | 3 | [] | no_license | import discord
token="NwijinnNIOMiiomdwfoihSDFwqwfohFw"#---enter your token id here--#
client=discord.Client()
@client.event
async def on_member_join(member):
for channel in member.server.channels:
if str(channel)=='general':
await client.send_message(f"""welcome to the server {member.mention}""")
@client.event
async def on_member_leave(member):
for channel in member.server.channels:
if str(channel)!='general':
await client.send_message(f"""{member.mention}has left the server""")
@client.event
async def on_message(message):
if message.content.find("-hii") != -1:
await message.channel.send("Hello")
elif message.content.find("-how are you") != -1:
await message.channel.send(" I am fine what about you")
elif message.content.find("-do you eat?") != -1:
await message.channel.send("No,I am a Machine")
elif message.content.find("-who has made you?") != -1:
await message.channel.send("Rupesh Prajapati ")
await message.channel.send("Follow on Insta @call.me_pr")
elif message.content.find("-really") != -1:
await message.channel.send("yehh.Don't you think so..")
await message.channel.send("Follow on Insta @call.me_pr")
elif message.content.find("-tell me your friend name") != -1:
await message.channel.send("I dont have any friend.Would you be my friend.")
elif message.content.find("-bye") != -1:
await message.channel.send("bbyyee,See you soon..")
client.run(token) | true |
daa0f622cc999a11f9b6e78ef2e84eea8f6d5445 | Python | alder711/linux-dotfiles | /GENTOO/dotfiles/bin/conky_rss.py | UTF-8 | 364 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
# This script takes an RSS feed and outputs
# the results.
# imports
import feedparser
# VARIABLES
RSS_SITE = "https://security.gentoo.org/glsa/feed.rss" #'.rss' URL to get feed from
# parse feed
feed = feedparser.parse(RSS_SITE)
# print feed title
#print(feed['feed']['title'])
# print first entry
print(feed['entries'][0]['title'])
| true |
16ab377f265325b0dcfc6a1ac5a5a310357c06ef | Python | hilsabeckt/auto-ERT | /classes.py | UTF-8 | 12,173 | 2.625 | 3 | [] | no_license | class Raid:
def __init__(self):
self.team = {}
self.roles = [0,0,0]
def add(self,player):
if isinstance(player,list):
for p in player:
self.add(p)
return
if player.spec in self.team:
speclist = self.team.get(player.spec)
speclist.append(player)
self.team.update({player.spec:speclist})
else:
self.team.update({player.spec:[player]})
if player.role == 'Tank':
self.roles[0] += 1
if player.role == 'Healer':
self.roles[1] += 1
if player.role == 'DPS':
self.roles[2] += 1
def remove(self,player):
if player.spec in self.team:
speclist = self.team.get(player.spec)
if player in speclist:
speclist.remove(player)
self.team.update({player.spec:speclist})
else: raise ValueError('Cannot find: ' + player + '.')
else: raise ValueError('Cannot find: ' + player + '.')
def get(self,speclist):
if speclist in self.team:
return self.team.get(speclist)
else: raise ValueError('Raid team does not contain: ' + speclist + '.')
def getAll(self):
getAll = []
for speclist in self.team:
for player in self.team.get(speclist):
getAll.append(player)
return getAll
def getAllNames(self):
getAll = []
for speclist in self.team:
for player in self.team.get(speclist):
getAll.append(player.name)
return getAll
def getCds(self):
getCds = {}
for speclist in self.team:
for player in self.team.get(speclist):
for cd in player.cds:
if cd in getCds:
value = getCds.get(cd)
value += 1
getCds.update({cd:value})
else:
getCds.update({cd:1})
return getCds
class Havoc:
def __init__(self,name,Netherwalk=False,staticTimer=True,dynamicTimer=True,nameTime=False,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'Havoc'
self.role = 'DPS'
self.cds = {'Darkness':180}
if Netherwalk==True:
self.cds.update({'Netherwalk':120})
self.classes = 'Demon Hunter'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Mage:
def __init__(self,name,Frost=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'Fire/Arcane'
if Frost==True:
self.spec = 'Frost'
self.role = 'DPS'
self.cds = {'Ice Block':240}
if Frost==True:
self.cds.update({'Cold Snap':300})
self.classes = 'Mage'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Rogue:
def __init__(self,name,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'Asn/Out/Sub'
self.role = 'DPS'
self.cds = {'Cloak of Shadows':120}
self.classes = 'Rogue'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Hunter:
def __init__(self,name,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'BM/MM/Surv'
self.role = 'DPS'
self.cds = {'Aspect of the Turtle':180}
self.classes = 'Hunter'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Balance:
def __init__(self,name,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'Balance'
self.role = 'DPS'
self.cds = {'Innervate':180}
self.classes = 'Druid'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class RestoDruid:
def __init__(self,name,TranqTalent=False,Flourish=True,Tree=True,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'RestoDruid'
self.role = 'Healer'
self.cds = {'Innervate':180,'Tranquility':180,'Ironbark':60}
if TranqTalent==True:
self.cds.update({'Innervate':120})
if Flourish==True:
self.cds.update({'Flourish':90})
self.classes = 'Druid'
if Tree==True:
self.cds.update({'Tree of Life':180})
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Mistweaver:
def __init__(self,name,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'Mistweaver'
self.role = 'Healer'
self.cds = {'Revival':180,'Life Cocoon':120}
self.classes = 'Monk'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class HolyPally:
def __init__(self,name,Visions=True,HolyAvenger=True,Unbreakable=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'HolyPally'
self.role = 'Healer'
self.cds = {'Avenging Wrath':120,'Aura Mastery':180,
'Blessing of Sacrifice':120,'Divine Shield':300,'Blessing of Protection':300}
if Visions==True:
self.cds.update({'Avenging Wrath':100})
if HolyAvenger==True:
self.cds.update({'Holy Avenger':90})
if Unbreakable==True:
self.cds.update({'Divine Shield':210})
self.classes = 'Paladin'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Pally:
def __init__(self,name,Tank=False,Unbreakable=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'Ret Pally'
self.role = 'DPS'
if Tank==True:
self.spec = 'Prot Pally'
self.role = 'Tank'
self.cds = {'Divine Shield':300,'Blessing of Protection':300}
if Tank==True:
self.cds.update({'Blessing of Sacrifice':120,'Ardent Defender':120})
if Unbreakable==True:
self.cds.update({'Divine Shield':210})
self.classes = 'Paladin'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Discipline:
def __init__(self,name,Evang=True,Luminous=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'Discipline'
self.role = 'Healer'
self.cds = {'Rapture':90,'Shadowfiend':180,'Power Word: Barrier':180,
'Pain Suppression':180,'Leap of Faith':90}
if Luminous==True:
del self.cds['Power Word: Barrier']
self.cds.update({'Luminous Barrier':180})
if Evang==True:
self.cds.update({'Evangelism':90})
self.classes = 'Priest'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class HolyPriest:
def __init__(self,name,Salv=True,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'HolyPriest'
self.role = 'Healer'
self.cds = {'Divine Hymn':180,'Symbol of Hope':300,
'Guardian Spirit':180,'Leap of Faith':90}
if Salv==True:
self.cds.update({'Holy Word: Salvation':300})
self.classes = 'Priest'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class ShadowPriest:
def __init__(self,name,Sanlayn=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'ShadowPriest'
self.role = 'DPS'
self.cds = {'Vampiric Embrace':120,'Leap of Faith':90}
if Sanlayn==True:
self.cds.update({'Vampiric Embrace':75})
self.classes = 'Priest'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class RestoSham:
def __init__(self,name,Ascendance=False,WindRush=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'RestoSham'
self.role = 'Healer'
self.cds = {'Spirit Link Totem':180,'Healing Tide Totem':180}
if Ascendance==True:
self.cds.update({'Ascendance':180})
if WindRush==True:
self.cds.update({'Wind Rush':120})
self.classes = 'Shaman'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class DPSSham:
def __init__(self,name,WindRush=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
self.spec = 'DPSSham'
self.role = 'DPS'
self.cds = {}
if WindRush==True:
self.cds.update({'Wind Rush':120})
self.classes = 'Shaman'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
class Warrior:
def __init__(self,name,Tank=False,staticTimer=True,dynamicTimer=True,nameTime=True,abbr=True):
if isinstance(name,str):
self.name = name
else: raise TypeError('Name must be String.')
if Tank==False:
self.spec = 'Fury/Arms'
self.role = 'DPS'
else:
self.spec = 'ProtWarr'
self.role = 'Tank'
self.cds = {'Rallying Cry':180}
self.classes = 'Warrior'
self.staticTimer = staticTimer
self.dynamicTimer = dynamicTimer
self.nameTime = nameTime
self.abbr = abbr
| true |
ddb780a5457e03654a530f299225a991a2112ca4 | Python | dalaAM/month-01 | /day04_all/day04/exercise03.py | UTF-8 | 554 | 4 | 4 | [] | no_license | """
累加0 1 2 3 4 5 6 7 8
累加3 4 5 6 7 8 9 10
累加2 4 6 8 10 12
累加8 7 6 5 4 3
累加-1 -2 -3 -4 -5 -6
"""
# 循环前 ... 创建
count = 0
for item in range(9):
count += item # 循环中 ... 累加
print(count) # 循环后 ... 结果
count = 0
for item in range(3, 11):
count += item
print(count)
count = 0
for item in range(2, 13, 2):
count += item
print(count)
count = 0
for item in range(8, 2, -1):
count += item
print(count)
count = 0
for item in range(-1, -7, -1):
count += item
print(count)
| true |
2498febd9107d5f40c528a1f1ec5eea755c74e00 | Python | anumoshsad/Algorithmic_Toolbox_Coursera_UCSD | /Assignment_2/fractional_knapsack/fractional_knapsack.py | UTF-8 | 970 | 3.359375 | 3 | [] | no_license | # Uses python3
import sys
def get_optimal_value(capacity, weights, values):
value = 0.
# write your code here
value_per_weight = [x/y for (x,y) in zip( values, weights)]
weights = [ x for (y,x) in sorted(zip(value_per_weight, weights))][::-1]
values = [ x for (y,x) in sorted(zip(value_per_weight, values))][::-1]
value_per_weight = value_per_weight[::-1]
curr_weight = 0;
for i in range(len(weights)):
if curr_weight + weights[i] > capacity:
value += (capacity - curr_weight)/ weights[i] * values[i]
break
else:
value = value + values[i]
curr_weight += weights[i]
return value
if __name__ == "__main__":
data = list(map(int, sys.stdin.read().split()))
n, capacity = data[0:2]
values = data[2:(2 * n + 2):2]
weights = data[3:(2 * n + 2):2]
opt_value = get_optimal_value(capacity, weights, values)
print("{:.4f}".format(opt_value))
| true |
b6a8a0a600101c4287ff9f42606db8b9cda00159 | Python | RiddhiRex/Leetcode | /Add One Row to Tree.py | UTF-8 | 1,554 | 3.34375 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def traverse(self, node, v, d, curd):
if(node is not None):
if(curd==d-1):
if(node.left is not None):
l = node.left
t = TreeNode(v)
node.left = t
t.left = l
else:
l = TreeNode(v)
node.left = l
l.left = None
l.right = None
if(node.right is not None):
r=node.right
t = TreeNode(v)
node.right = t
t.right = r
else:
r = TreeNode(v)
node.right = r
r.left = None
r.right = None
else:
self.traverse(node.left, v, d, curd+1)
self.traverse(node.right, v, d, curd+1)
return
def addOneRow(self, root, v, d):
"""
:type root: TreeNode
:type v: int
:type d: int
:rtype: TreeNode
"""
node = root
if(d==1):
t = TreeNode(v)
t.left = root
return t
self.traverse(node, v, d, 1)
return root
| true |
fe3645b4cec318da77ed402df36c1f382cb5c08c | Python | paletteOvO/LeetCode | /lc409.py | UTF-8 | 275 | 2.5625 | 3 | [] | no_license | class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
k = collections.Counter(s).values()
return sum([x // 2 * 2 for x in k]) + \
(1 if any([x % 2 == 1 for x in k]) else 0) | true |
d139256b510b6d543ec14470139a255505a10288 | Python | MarcusPeixe/marvin | /python/poesiaVogon/fase05/ex08/soletrar.py | UTF-8 | 90 | 2.5625 | 3 | [] | no_license | def soletrar(string):
array = [];
for i in string:
array.append(i);
return array; | true |
6d0ab2969b3c8a94bd2f0b040c9517cfd5e8688f | Python | Suvrojyoti/APS-2020 | /Codeforces_Submissions/1113B.py | UTF-8 | 1,099 | 3.328125 | 3 | [] | no_license | import math
# method to print the divisors
def printDivisors(n) :
lol=[]
# Note that this loop runs till square root
i = 1
while i <= math.sqrt(n):
if (n % i == 0) :
# If divisors are equal, print only one
if (n / i == i) :
lol.append(i)
else :
# Otherwise print both
lol.append(i)
lol.append(n/i)
i = i + 1
return(lol)
n=int(input())
a=[int(o) for o in input().split()]
a=sorted(a)
bob=a[:]
ansarr=[]
sumfinal=sum(a)
sum1=sumfinal
ansarr.append(sum(a))
aa=a[:]
sum1=sumfinal-0
bob.sort()
for i in range(1,n):
u=printDivisors(a[i])
for j in range(len(u)):
bob[0]=0
sumfinal+=(a[0]*u[j]-a[0])
bob[0]=1
sumfinal-=(a[i]-a[i]/u[j])
bob[0]=2
ansarr.append(sumfinal)
bob[0]=0
sumfinal=sum1
bob[0]=0
uhoy=9999999999
for i in range(len(ansarr)):
if ansarr[i]<uhoy:
uhoy=ansarr[i]
#bob.sort()
print(int(uhoy)) | true |
39caecba45e7eba770a211cc53cd7a40a4fe9e7e | Python | vtheno/lang | /Lex.py | UTF-8 | 3,103 | 3.078125 | 3 | [] | no_license | #coding=utf-8
# add lex support float number
class Ident(object):
def __init__(self,sym):
self.sym = sym
def __repr__(self):
return f"id{ {self.sym} }"
class GetNextTokenErr(Exception) : pass
class Lex(object):
def __init__(self,spectab,keywords,separators):
self.spectab = spectab # spectab : {str:[str]}
self.keywords = keywords # keywords : [str]
self.separators = separators
def IsDigit(self,x : str ) -> bool :
return '0' <= x <= '9'
def IsLetter(self,x : str ) -> bool :
return "a" <= x <= "z" or "A" <= x <= "Z"
def IsLetterOrDigit(self,x : str) -> bool:
return ("a" <= x <= "z" or "A" <= x <= "Z") or "0" <= x <= "9"
def IsSeparator(self,x : str) -> bool :
return x in self.separators#x == " " or x == "\n" or x == "\t"
def getTail(self,p,tok,lst):
temp = lst
while temp:
x,temp = temp[0],temp[1:]
if p(x):
tok += x
else:
temp = [x] + temp
self.inp = temp
return tok
else:
self.inp = temp
return tok
def getSymbol(self,tok,lst):
temp = lst
while temp:
x,temp = temp[0],temp[1:]
#print( x,self.spectab.get(tok,[]) )
if x in self.spectab.get(tok,[]):
tok += x
else:
self.inp = [x] + temp
return tok
else:
self.inp = [ ]
return tok
def Tokenise(self,inp):
self.inp = list(inp)
result = [ ]
while self.inp:
_x,l1 = self.inp[0],self.inp[1:]
l = [_x] + l1
if self.IsSeparator(_x):
self.inp = l1
else:
if self.inp == []:
raise GetNextTokenErr("{} length < 1 !".format(self.inp))
elif len(self.inp) == 1:
t,self.inp = self.inp[0],self.inp[1:]
else:
x,xs = self.inp[0],self.inp[1:]
c,cs = xs[0],xs[1:]
if "a" <= x <= "z" or "A" <= x <= "Z":#self.IsLetter(x):
buf = f'{x}'
t = self.getTail(self.IsLetterOrDigit,buf,xs)
elif '0' <= x <= '9':#self.IsDigit(x):
buf = f'{x}'
t = self.getTail(self.IsDigit,buf,xs)
else:
if c in self.spectab.get(x,[]):
# symbol
t = self.getSymbol(''.join([x,c]),cs)
else:
t,self.inp = x,xs
result = result + [t]
return result#[r if r in self.keywords else Ident(r) for r in result]
"""
SpecTab = {
"-":[">"],
":":[":"],
}
keywords = ["+","-","*","/","->","::"]
separators = ["\n","/"]
lex = Lex(SpecTab,keywords,separators)
inp = /index.jpg
print( lex.Tokenise(inp) )
print( {Ident('c'):233} )
"""
__all__ = ["Lex","GetNextTokenErr"]
| true |
92064c618d7b9d7e77b51a1eca4ac1e28be68c19 | Python | kusum95/Elastic_Application | /AppTier/appInstance.py | UTF-8 | 5,203 | 2.84375 | 3 | [] | no_license | import schedule
import subprocess
import boto3
import json
import botocore
import os
import time
INPUT_BUCKET_NAME='cse546-input-p1' #input s3 bucket to download images for classifier
OUTPUT_BUCKET_NAME='cse546-output-p1' #output s3 bucket to store results
# schedule a job to check for any messages available in SQS request qeueue
def check():
check_message_queue()
print("Checking SQS...")
#check if any messages are available in request queue. If any messages are available process the messages otherwise terminate the instance
def check_message_queue():
#get the available message from SQS request queue
response = get_message()
if response is not None and response.body is not None:
print("Message Found in SQS....")
#if messages are available in SQS request queue cancel the current schedule job and process the message
schedule.CancelJob
process_message(response.body)
#once message is processed start the scheduler
schedule.every(10).seconds.do(check)
#if no message is found in SQS request queue terminate the current instnce
else:
print("Queue Empty")
terminate_instance()
#method to terminate the current instance
def terminate_instance():
current_instance= subprocess.check_output(["ec2metadata", "--instance-id"], universal_newlines=True).strip()
client = boto3.client('ec2')
response = client.terminate_instances(
InstanceIds=[
current_instance
]
)
#get the message that is there in SQS request queue and then delete the message from queue
def get_message():
sqs = boto3.resource('sqs')
queue = sqs.Queue('https://sqs.us-east-1.amazonaws.com/992611621996/CSE546_RequestQueue.fifo')
response = queue.receive_messages(
AttributeNames=[
'All'
],
MaxNumberOfMessages=1,
VisibilityTimeout=120,
WaitTimeSeconds=2
)
for message in response:
message.delete()
return message
#method to process the message that is received from SQS request queue
def process_message(body):
message_dict = json.loads(body)
if message_dict['image_filename'] is not None:
#method to download the file from s3 bucket based on the url provided in SQS message
download_file_from_s3(message_dict['image_filename'])
#mehthod to run classifier on the downloaded image from s3
run_classifier(file=message_dict['image_filename'])
#method to download the image from s3 input bucket based on the url provided in SQS request queue message
def download_file_from_s3(key):
s3 = boto3.resource('s3')
try:
s3.Bucket(INPUT_BUCKET_NAME).download_file(key, key)
except botocore.exceptions.ClientError as excep:
if excep.response['Error']['Code'] == "404":
print("The image doesn't exist in s3.")
else:
raise
#method to execute the provided classifier on the downloaded image and save the result to stdout.txt and push the result to SQS Response queue
def run_classifier(file):
if file is not None:
os.system("python3 ~/classifier/image_classification.py "+file+" > classification_result.txt")
if os.path.exists(file):
os.remove(file)
message=create_SQS_message(file,'classification_result.txt')
sqs=boto3.resource('sqs')
queue=sqs.Queue('https://sqs.us-east-1.amazonaws.com/992611621996/CSE546_ResponseQueue.fifo')
response=queue.send_message(
MessageBody=message,
MessageGroupId='CSE546Project')
upload_result(file,'classification_result.txt')
#method to create message that has to be sent to SQS Response queue. Message is json with image file name and result
def create_SQS_message(file,resultfile):
file_data=open(resultfile,"r")
result=file_data.readline()
file_data.close()
print(file,result)
message={}
message['image_filename']=file
message['result']=result
return json.dumps(message)
#method to upload the result to s3 output bucket
def upload_result(key,resultfile):
s3 = boto3.resource('s3')
try:
name=key.split('.')[:-1]
file_data=open(resultfile,"r")
result=file_data.readline()
file_data.close()
if '2021_' in key:
img_name=key.split('2021_')[-1]
img_final_name=img_name.split('.')[0]
else:
img_final_name=key.split('.')[0]
result=img_final_name+','+result.replace('\n','')
#print(result)
file_data=open(resultfile,"w")
file_data.write(result)
file_data.close()
s3.Bucket(OUTPUT_BUCKET_NAME).upload_file(resultfile,".".join(name)+".txt")
except botocore.exceptions.ClientError as excep:
if excep.response['Error']['Code'] == "404":
print("The text does not exist in s3.")
else:
raise
schedule.every(10).seconds.do(check)
#run until the server is terminated
while True:
schedule.run_pending()
time.sleep(10)
| true |
63c4ae18d1dd1479b1e3684adad4d714bb3de60b | Python | mccullerlp/python-declarative | /test/test_properties.py | UTF-8 | 1,393 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test of the argparse library
TODO: use automated features
"""
from __future__ import (division, print_function, absolute_import)
from declarative import (
OverridableObject,
mproperty,
NOARG,
)
oldprint = print
print_test_list = []
def print(*args):
oldprint(*args)
if len(args) == 1:
print_test_list.append(args[0])
else:
print_test_list.append(args)
class OOT(OverridableObject):
"""
Runs the argparse test setup
"""
@mproperty
def A(self, val = NOARG):
if val is NOARG:
val = 'A'
print(val)
return val
@mproperty
def B(self, val = NOARG):
if val is NOARG:
val = 'B'
print(val)
return val
def test_mproperty():
print_test_list[:] = []
test = OOT()
assert(print_test_list == [])
print_test_list[:] = []
test = OOT()
test.A
assert(print_test_list == ['A'])
print_test_list[:] = []
test = OOT(B = 'BB')
test.A
test.B
assert(print_test_list == ['A', 'BB'])
class TBadAccess(OverridableObject):
"""
Runs the argparse test setup
"""
@mproperty
def no_args(self):
print('A')
return 'A'
@mproperty
def bad_set(self):
None.Test
return 'B'
if __name__ == '__main__':
test_mproperty
| true |
f767b17b3eb135ea2719b899685611267f4136df | Python | KeithDinh/Coding-Interview-Practices | /LeetCode/C#/Medium/Maximum DIfference Between Node and Ancestor.py | UTF-8 | 1,344 | 3.265625 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxAncestorDiff(self, root: TreeNode) -> int:
if not root:
return 0
diff =[0]
maxVal, minVal = self.findMaxMinSubTree(root, diff)
diff[0] = max(diff[0], max(abs(root.val - maxVal), abs(root.val-minVal)))
return diff[0]
def findMaxMinSubTree(self, current: TreeNode, diff):
if not current.left and not current.right:
return (current.val, current.val)
maxVal = minVal = 0
if not current.left:
maxVal, minVal = self.findMaxMinSubTree(current.right, diff)
elif not current.right:
maxVal, minVal = self.findMaxMinSubTree(current.left, diff)
else:
maxVal1, minVal1 = self.findMaxMinSubTree(current.left, diff)
maxVal2, minVal2 = self.findMaxMinSubTree(current.right, diff)
maxVal = max(maxVal1, maxVal2)
minVal = min(minVal1, minVal2)
diff[0] = max(diff[0], max(abs(current.val - maxVal), abs(current.val-minVal)))
return (max(maxVal, current.val), min(minVal, current.val)) | true |
238f4f3778e1b52b275bf3ffdf9aee029492d569 | Python | askdjango/snu-web-2016-09 | /class-20160928/report/박정훈_경영/a1_1.py | UTF-8 | 356 | 3.75 | 4 | [] | no_license | number = int(input("출력하고자 하는 구구단의 숫자를 입력하세요. 1단부터 9단까지 가능합니다. : "))
while number < 1 or number > 9 :
number = int(input("1단부터 9단까지만 출력가능합니다. 1 이상 9이하의 숫자를 입력하세요."))
for x in range (1,10) :
print(number, 'x',x,'=',number*x)
| true |
cfa9f38bb40feebe2d368689af2ac90bb73abf0f | Python | lostmarinero/slcsp | /rate_helpers.py | UTF-8 | 2,216 | 3.390625 | 3 | [] | no_license | from helpers import isfloat
def pull_unique_rate_silver_plans(plan_list):
'''
This function pulls all plans with a meta level of 'Silver' from a list of
plans and removes any silver plans with the same 'rate'
'''
seen = set()
return [x for x in plan_list
if (
x['metal_level'] == 'Silver' and
x['rate'] not in seen and
not seen.add(x['rate'])
)]
def pull_second_lowest_plan(list_of_values):
'''
This function takes a list of dicts. It sorts the list by the rate
attribute and then returns the object with the second lowest
value, or None if there is less than 2 values
'''
if list_of_values == []:
return None
list_of_values = [x for x in list_of_values
if ('rate' in x and
isinstance(x['rate'], str) and
isfloat(x['rate']))
]
sorted_list = sorted(list_of_values,
key=lambda x: float(x['rate']),
reverse=True)
return sorted_list[-2] if len(sorted_list) >= 2 else None
def pull_second_lowest_rate(all_plans):
'''
This function pulls the second lowest rate based on the 'rate' attribute.
test if given empty, multiple rates that it is second lowest (and not second highest), etc
'''
second_lowest_plan = pull_second_lowest_plan(all_plans)
return None if second_lowest_plan is None else second_lowest_plan['rate']
def pull_benchmark_plan(plan_list):
'''
This function pulls the silver plans from a list, and then from that list
returns the second lowest rate (or None if there is less than 1
silver plan). It will return None if no rate is found.
'''
all_silver_plans = pull_unique_rate_silver_plans(plan_list)
return pull_second_lowest_plan(all_silver_plans)
def pull_benchmark_rate(plan_list):
'''
This function finds the plan with the second lowest rate and then
returns the rate.
'''
if plan_list == []:
return None
benchmark_plan = pull_benchmark_plan(plan_list)
return None if benchmark_plan is None else benchmark_plan['rate']
| true |
86a7205c581ec5c92e225979556ecc10dfa1fc2b | Python | arianaolson419/AccessibleCooking | /app/helper_functions/conversions.py | UTF-8 | 11,444 | 2.53125 | 3 | [] | no_license | from flask_mongoalchemy import *
from bson.objectid import *
from app.helper_functions.media import video_id_from_url
from app.document_models.recipe_documents import Recipe
from app.document_models.tip_documents import Tip
from app.document_models.object_documents import Instruction, Ingredient, Equipment
import logging
import pdb
import pytz
import re
# import requests
from app import db
def mongo_to_dict(obj):
"""Get dictionary from mongoengine object
id is represented as a string
obj A mongodb object that will be converted to a dictionary
"""
return_data = []
if obj is None:
return None
# converts the mongoDB id for documents to a string from an ObjectID object
if isinstance(obj, Document):
return_data.append(("id",str(obj.id)))
for field_name in obj._fields:
if field_name in obj: # check if field is populated
if field_name in ("id",):
continue
data = obj[field_name]
if isinstance(obj._fields[field_name], ListField):
return_data.append((field_name, list_field_to_dict(data)))
elif isinstance(obj._fields[field_name], EmbeddedDocumentField):
return_data.append((field_name, mongo_to_dict(data)))
elif isinstance(obj._fields[field_name], DictField):
return_data.append((field_name, data))
else:
return_data.append((field_name, mongo_to_python_type(obj._fields[field_name], data)))
return dict(return_data)
def request_to_recipe_search(request):
""" Build search dictionary based on get parameters """
if isinstance(request, dict):
req_dict = request
else:
req_dict = request_to_dict(request)
split_to_list = lambda a: a if isinstance(a, list) else a.split(',')
difficulties = {
'beginner': ['beginner'],
'intermediate': ['intermediate'],
'advanced': ['advanced'],
}
preprocessing = {
'recipe_name':split_to_list,
'ingredients':split_to_list,
'equipment':split_to_list,
'tags':split_to_list,
'tags_not':split_to_list,
'difficulty':lambda a: difficulties.get(a, None),
}
search_dict = req_dict
for key, process in preprocessing.items():
if key in search_dict.keys():
search_dict[key] = process(search_dict[key])
return search_dict
def request_to_tip_search(request):
""" Build search dictionary based on get parameters """
if isinstance(request, dict):
req_dict = request
else:
req_dict = request_to_dict(request)
split_to_list = lambda a: a if isinstance(a, list) else a.split(',')
difficulties = {
'beginner': ['beginner'],
'intermediate': ['intermediate'],
'advanced': ['advanced'],
}
preprocessing = {
'tip_name':split_to_list,
'ingredients':split_to_list,
'equipment':split_to_list,
'tags':split_to_list,
'tags_not':split_to_list,
'difficulty':lambda a: difficulties.get(a, None),
}
search_dict = req_dict
for key, process in preprocessing.items():
if key in search_dict.keys():
search_dict[key] = process(search_dict[key])
return search_dict
def list_field_to_dict(list_field):
"""
Converts a list of mongodb Objects to a dictionary object
list_field list of embedded documents or other object types
"""
return_data = []
for item in list_field:
# if list is of embedded documents, convert each document to a dictionary
if isinstance(item, EmbeddedDocument):
return_data.append(mongo_to_dict(item))
# convert the data type
else:
return_data.append(mongo_to_python_type(item,item))
return return_data
def mongo_to_python_type(field, data):
"""
Converts certain fields to appropriate data types
field A field in a mongoDB object
data corresponding data to the field
"""
if isinstance(field, ObjectIdField):
return str(data)
elif isinstance(field, DecimalField):
return data
elif isinstance(field, BooleanField):
return data
else:
return str(data)
def request_to_dict(request):
"""Convert incoming flask requests for objects into a dict"""
if request.is_json:
req_dict = request.get_json() # get_dict returns python dictionary object
else:
req_dict = request.values.to_dict(flat=False)
obj_dict = {}
for k, v in req_dict.items():
# The to_dict method returns values as lists, which is necessary for
# the values with the name 'tag', but none of the other fields.
if k == 'tag' or k == 'tip':
obj_dict[k] = v
elif k == 'difficulty':
obj_dict['difficulty'] = req_dict['difficulty'][0]
else:
obj_dict[k] = check_for_fractions(v[0])
return obj_dict
def dict_to_recipe(request_dict, recipe=None):
ingredients = []
line_num = 0
for line in request_dict['ingredients'].split('\n'):
ingredients.append(Ingredient(line=line.strip(), line_num=line_num))
line_num+=1
equipment = []
line_num = 0
for line in request_dict['equipment'].split('\n'):
equipment.append(Equipment(line=line.strip(), line_num=line_num))
line_num+=1
instructions = []
line_num = 0
for line in request_dict['instructions'].split('\n'):
instructions.append(Instruction(line=line.strip(), line_num=line_num))
line_num+=1
if recipe:
recipe.recipe_name=request_dict['recipe_name']
recipe.description=request_dict['description']
recipe.media_type=request_dict['media_type']
recipe.ingredients=ingredients
recipe.equipment=equipment
recipe.instructions=instructions
recipe.difficulty=request_dict['difficulty']
recipe.servings=request_dict['servings']
recipe.time=request_dict['time']
recipe.tags=request_dict['tag']
recipe.tips=[]
if request_dict['media_type'] == 'Video':
recipe.video_id = video_id_from_url(request_dict['media_url'])
elif request_dict['media_type'] == 'Audio':
new_recipe.media_url = request_dict['media_url']
recipe.save()
return recipe
else:
new_recipe = Recipe(
recipe_name=request_dict['recipe_name'],
description=request_dict['description'],
media_type=request_dict['media_type'],
ingredients=ingredients,
equipment=equipment,
instructions=instructions,
difficulty=request_dict['difficulty'],
servings=request_dict['servings'],
time=request_dict['time'],
tags=request_dict['tag'],
tips=[])
if request_dict['media_type'] == 'Video':
new_recipe.video_id = video_id_from_url(request_dict['media_url'])
elif request_dict['media_type'] == 'Audio':
new_recipe.media_url = request_dict['media_url']
new_recipe.save()
return new_recipe
def dict_to_tip(request_dict):
print(request_dict)
new_tip = Tip(
tip_name=request_dict['tip_name'],
media_type=request_dict['media_type'],
media_url=request_dict['media_url'],
video_id=video_id_from_url(request_dict['media_url']),
difficulty=request_dict['difficulty'],
description=request_dict['description'],
equipment=[line.strip().lower() for line in request_dict['equipment'].split('\n')],
ingredients=[line.strip().lower() for line in request_dict['ingredients'].split('\n')],
techniques=[line.strip().lower() for line in request_dict['techniques'].split('\n')],
instructions=[line.strip() for line in request_dict['instructions'].split('\n')],
tags=request_dict['tag'])
new_tip.save()
return new_tip
def form_to_recipe_dict(formdata):
mapping = {'search':'recipe_name',
'tag_select':'tags'}
search_dict = {}
for key, val in formdata.items():
if key not in ['select'] and val != []: # expand this as needed
search_dict[mapping[key]] = val
return search_dict
def check_for_fractions(ingred):
replacements = {r"(\d) (1\/2)":r"\1 and a half",
r"(\d) (1\/3)":r"\1 and a third",
r"(\d) (1\/4)":r"\1 and a quarter",
r"(\d) (1\/8)":r"\1 and an eighth",
r"(\d) (3\/4)":r"\1 and three quarters",
r"(\d) (2\/3)":r"\1 and two thirds",
r"^1\/2":"Half", r"(\D) (1\/2)":r"\1 half",
r"^1\/3":"One third", r"(\D) (1\/3)":r"\1 one third",
r"^1\/4":"One quarter", r"(\D) (1\/4)":r"\1 one quarter",
r"^1\/8":"One eighth", r"(\D) (1\/8)":r"\1 one eigth",
r"^3\/4":"Three quarters", r"(\D) (3\/4)":r"\1 three quarters",
r"^2\/3":"Two thirds", r"(\D) (2\/3)":r"\1 two thirds",
r'(\d) (\½)':r"\1 and a half",
r"(\d) (\⅓)":r"\1 and a third",
r"(\d) (\¼)":r"\1 and a quarter",
r"(\d) (\⅛)":r"\1 and an eighth",
r"(\d) (\¾)":r"\1 and three quarters",
r"(\d) (\⅔)":r"\1 and two thirds",
r"^\½":"Half",
r"^\⅓":"One third",
r"^\¼":"One quarter",
r"^\⅛":"One eighth",
r"^\¾":"Three quarters",
r"^\⅔":"Two thirds"}
lines = ingred.split("\n")
replaced = []
for line in lines:
for err, rpl in replacements.items():
line = re.sub(err, rpl, line, flags=re.U)
replaced.append(line)
return "\n".join(replaced)
def form_to_tip_dict(formdata):
mapping = {'search':'tip_name',
'tag_select':'tags'}
search_dict = {}
for key, val in formdata.items():
if key not in ['select'] and val != []: # expand this as needed
search_dict[mapping[key]] = val
return search_dict
def get_all_recipe_text(recipe_obj):
"""Create a large string of the text in a recipe's equipment, ingredient,
and instruction lists to be used for searching for relevant tags.
"""
# All of the fields listed give lists of strings.
fields = ['ingredients', 'equipment', 'instructions']
recipe_text = ''
for field in fields:
recipe_text += ' '.join([recipe_obj[field]])
return recipe_text
def connect_line_and_tip(recipe_obj, tips):
for matcher, tip in tips.items():
if tip != "Add Tips":
tip_obj = Tip.query.get_or_404(tip)
[tip_type, line_num] = matcher.split('-')
line_num = int(line_num)
match_dict = {'Instruction':recipe_obj.instructions,
'Ingredient':recipe_obj.ingredients,
'Equipment':recipe_obj.equipment}
category = match_dict[tip_type][line_num].set_tip(tip, tip_obj.tip_name)
recipe_obj.save()
| true |
f35d4b9b890b0624c06fbcd32411500750c6c41e | Python | shihyuuuuuuu/LeetCode_practice | /prob1282.py | UTF-8 | 316 | 2.703125 | 3 | [] | no_license | class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
groups = {}
for uid, i in enumerate(groupSizes):
groups[i] = groups.get(i, [])
groups[i].append(uid)
return [groups[i][j:j+i] for i in groups for j in range(0, len(groups[i]), i)]
| true |
5b16eac549a6034cb0a6c45a387a331cfeb4e71e | Python | whitej6/DeviceRename | /DeviceRename.py | UTF-8 | 2,151 | 2.765625 | 3 | [] | no_license | import netmiko
from getpass import getpass
''' User input for password not displayed on screen '''
def define_password():
password = None
while not password:
password = getpass('Enter TACACS+ Password: ')
passwordverify = getpass('Re-enter TACACS+ Password to Verify: ')
if not password == passwordverify:
print('Passwords Did Not Match Please Try Again')
password = None
return password
''' Formatting devices.txt into list to be passed to for loop '''
def reformat_devices(devices):
devices = devices.read()
devices = devices.strip().splitlines()
devdict = {}
for line in devices:
words = line.split()
devdict.update({words[0]:words[1]})
devices = devdict
return devices
''' Common exceptions that could cause issues'''
exceptions = (netmiko.ssh_exception.NetMikoTimeoutException,
netmiko.ssh_exception.NetMikoAuthenticationException)
print('~'*79)
print('~'*26+' Cisco Device Rename Script '+'~'*25)
print('~'*79)
''' Get Variables '''
username = input('Enter TACACS+ Username: ')
password = define_password()
devices = open('.\\devices\\devices.txt','r')
devices = reformat_devices(devices)
device_type = 'cisco_ios'
''' Loop for devices '''
for device in devices:
try:
''' Connection Break '''
print('*'*79)
print('Connecting to:',device)
''' Connection Handler '''
connection = netmiko.ConnectHandler(ip=devices[device], device_type=device_type, username=username, password=password)
''' Check if hostname is correct '''
output = connection.send_command(' sh run | in hostname')
if not device in output:
print('Updating hostname')
connection.send_command_timing('config t')
connection.send_command_timing('hostname '+device)
connection.send_command_timing('end')
connection.send_command_timing('write memory')
else:
print('Device has correct hostname')
pass
except exceptions as exception_type:
print('Failed to ', device, exception_type)
print('*'*79)
| true |
38a2a6575a8c3deb44876971c751b73ef3dcaa10 | Python | jordanmslack/pytorch-plagiarism-detection | /methods.py | UTF-8 | 2,374 | 3.328125 | 3 | [] | no_license | import re
import operator
def create_datatype(df, train_value, test_value, datatype_var, compare_dfcolumn, operator_of_compare, value_of_compare,
sampling_number, sampling_seed):
df_subset = df[operator_of_compare(df[compare_dfcolumn], value_of_compare)]
df_subset = df_subset.drop(columns=[datatype_var])
df_subset.loc[:, datatype_var] = train_value
df_sampled = df_subset.groupby(['Task', compare_dfcolumn], group_keys=False).apply(
lambda x: x.sample(min(len(x), sampling_number), random_state = sampling_seed))
df_sampled = df_sampled.drop(columns = [datatype_var])
df_sampled.loc[:, datatype_var] = test_value
for index in df_sampled.index:
df_subset.loc[index, datatype_var] = test_value
for index in df_subset.index:
df.loc[index, datatype_var] = df_subset.loc[index, datatype_var]
def train_test_dataframe(clean_df, random_seed=100):
new_df = clean_df.copy()
new_df.loc[:,'Datatype'] = 0
create_datatype(new_df, 1, 2, 'Datatype', 'Category', operator.gt, 0, 1, random_seed)
create_datatype(new_df, 1, 2, 'Datatype', 'Category', operator.eq, 0, 2, random_seed)
mapping = {0: 'orig', 1: 'train', 2: 'test'}
new_df.Datatype = [mapping[item] for item in new_df.Datatype]
return new_df
def process_file(file):
all_text = file.read().lower()
all_text = re.sub(r"[^a-zA-Z0-9]", " ", all_text)
all_text = re.sub(r"\t", " ", all_text)
all_text = re.sub(r"\n", " ", all_text)
all_text = re.sub(" ", " ", all_text)
all_text = re.sub(" ", " ", all_text)
return all_text
def create_text_column(df, file_directory='data/'):
'''
Reads in the files, listed in a df and returns that df with an additional column, `Text`.
:param df:
A dataframe of file information including a column for `File`
:param file_directory:
the main directory where files are stored
:return:
A dataframe with processed text
'''
text_df = df.copy()
text = []
for row_i in df.index:
filename = df.iloc[row_i]['File']
file_path = file_directory + filename
with open(file_path, 'r', encoding='utf-8', errors='ignore') as file:
file_text = process_file(file)
text.append(file_text)
text_df['Text'] = text
return text_df
| true |
e6bd649fa3fc8a2ece7e9ea4b03718664a1e630e | Python | xjr7670/book_practice | /MasteringDataMiningwithPython/chapter4/basicNetworkMetrics5.py | UTF-8 | 1,088 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 28 22:03:23 2017
@author: cavin
"""
import networkx as nx
g = nx.read_weighted_edgelist('data/edgelist64.csv')
graphDegree = nx.degree(g)
pos = nx.spring_layout(g)
degree_values = [item[1] for item in graphDegree]
nx.draw(g,
pos,
node_size=[v * 10 for v in degree_values],
with_labels=False,
font_size=8)
nx.draw_networkx_nodes(g,
pos,
nodelist=['tirsen',
'shen',
'mlee',
'ged',
'objo',
'stellsmi',
'cowboyd',
'asong',
'christkv',
'hisnice',
'duelin_markers',
'stillflame'],
node_size=300,
node_color='g') | true |
246e1e4021b1161445aa7ea448886acc6dab9a3b | Python | TengXu/CS-2015 | /CS 111/ps2pr3.py | UTF-8 | 1,747 | 4.0625 | 4 | [] | no_license | #
# ps2pr3.py - Problem Set 2, Problem 3
#
# Indexing and slicing puzzles
#
# name: teng xu
# email: xt@bu.edu
# 1
def mult(n, m):
""" takes two integers n and m as inputs and returns the product
of those integers
"""
if n == 0:
return 0
elif n < 0:
return -mult(-n, m)
else :
rest = mult(n-1,m)
return m + rest
# 2
def dot(l1, l2):
""" takes as inputs two lists of numbers, l1 andl2, and returns
the dot product of those lists
"""
if len(l1) != len(l2):
return 0.0
elif l1 == [] or l2 == []:
return 0.0
else:
rest = dot(l1[1:],l2[1:])
return l1[0]*l2[0] + rest
# 3
def letter_score(letter):
""" takes a lowercase letter as input and returns the value of
that letter as a scrabble tile.
"""
if letter in ['a','b','c','d','e','f','g','h','i','j','k','l','m',
'n','o','p','q','r','s','t','u','v','w','x','y','z',]:
if letter in ['a','n','o','i','e','l','r','s','t','u']:
return 1
elif letter in ['d','g']:
return 2
elif letter in ['b','c','m','p']:
return 3
elif letter in ['f','v','w','y','h']:
return 4
elif letter in ['k']:
return 5
elif letter in ['j','x',]:
return 8
elif letter in ['q','z']:
return 10
else:
return 0
# 4
def scrabble_score(word):
""" takes as input a string word containing only lowercase letters and
returns the scrabble score of that string
"""
if word == '':
return 0
else:
rest = scrabble_score(word[1:])
return letter_score(word[0]) + rest
| true |
1b7db7a7a9160b76249903130e248ba5a2532808 | Python | vm2591/StochasticGD | /Tester.py | UTF-8 | 824 | 2.953125 | 3 | [] | no_license | import GradientDescent as gd
import Plotter as pl
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
y = df.iloc[0:100,4].values
y = np.where(y == 'Iris-setosa' , -1 , 1)
X = df.iloc[0:100 , [0,2]].values
X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean())/X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean())/X[:,1].std()
ada = gd.AdalineSGD(n_iter = 15, eta = 0.01)
ada.fit(X_std,y)
pl.plot_decision_regions(X_std,y,classifier=ada)
plt.title('Gradient Descent')
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.legend(loc = 'upper left')
plt.show()
plt.plot(range(1, len(ada.cost_)+1), ada.cost_, marker='o')
plt.xlabel('iterations')
plt.ylabel('Squared Error Sum')
plt.show()
| true |
bece17e97ec4c52c60dcf6eddc07504154489a50 | Python | andrew-christianson/Polyglot-Euler | /Problem 6.py | UTF-8 | 411 | 3.53125 | 4 | [
"MIT"
] | permissive | # Constraied to one-liners for this one
from __future__ import print_function
# I'm relatively satisfied here. All computation is one line, following pep8
s, r = sum, list(range(101)); a = s(r) ** 2 - s(i ** 2 for i in r)
print("The answer to Euler Probelm 6 is", a)
# A true one liner disregarding pep8 could be:
# print("The answer to Euler Problem 6 is", sum(range(101))**2-sum(i**2 for i in range(101)))
| true |
d6b4a3fbe50c6804acd4dadf9037cd49a22f6d0a | Python | reichlj/PythonBsp | /Schulung/py05_listcompr/lb_26_defaultdict.py | UTF-8 | 563 | 3.671875 | 4 | [] | no_license | from collections import defaultdict
def letter_frequency(s):
letter_fre = defaultdict(lambda : 0)
for letter in s.lower():
if letter.isalpha():
letter_fre[letter] += 1
items = [ (c,round(f/len(s),4)) for c,f in letter_fre.items()]
# items.sort(key=itemgetter(1,0),reverse=True)
items.sort(key=lambda x: (-x[1], x[0]))
return items
s = 'Monty Python'
x = letter_frequency(s)
for element in x:
print(element)
print('1984.txt')
s = open('1984.txt').read()
x = letter_frequency(s)
for element in x:
print(element) | true |
369246abe2eee6158e9eb7c339e804313fcf2f87 | Python | DougWilkinson/led-dotclock | /node.py | UTF-8 | 3,247 | 2.578125 | 3 | [] | no_license | import urandom
from neopixel import NeoPixel
from sensorclass import Sensor
from machine import Pin
import time
# ledclock2
# updated 2/1/2021
def set_nightlight(brightlevel):
#print("brightlevel: " + str(brightlevel))
global led
for x in range(13):
led[x] = (brightlevel,brightlevel,brightlevel)
led.write()
ledpin = Pin(5, Pin.OUT)
led = NeoPixel(ledpin, 13)
m = [6,5,4,3,2,1,0,11,10,9,8,7,12]
state = False
statechange = False
timechange = True
nldelay = 60
nightlight = False
heartbeat = True
hour = 0
minute = 0
lasthour = 0
lastminute = 0
second = 1
lastsecond = 0
gottime = False
brightness = Sensor("brightness", initval=40)
def main():
global led
global gottime
global state
global statechange
global timechange
global nldelay
global brightness
global nightlight
global heartbeat
global hour
global minute
global lasthour
global lastminute
global second
global lastsecond
secfade = time.ticks_ms()
set_nightlight(1)
Sensor.MQTTSetup("ledclock")
Sensor.lasthour = -1
secfade = time.ticks_ms()
print("End Setup... Starting loop")
while True:
Sensor.lastblink = time.time()
Sensor.Spin()
secbright = time.ticks_ms() - secfade
fadetime = 500
if secbright < fadetime:
led[m[11-lastsecond]] = (led[m[11-lastsecond]][0],led[m[11-lastsecond]][1], int(brightness.value * (fadetime - secbright) / fadetime)+1)
led.write()
if secbright < 1001:
led[m[11-second]] = (led[m[11-second]][0],led[m[11-second]][1], int(brightness.value * secbright / 1000))
led.write()
if (secbright > 100 ) and not gottime:
secbright = 5001
timechange = True
led[m[11-lastsecond]] = (0,0,0)
if second == 11 and not gottime:
lastminute = minute
minute += 1
if minute > 11:
minute = 0
lasthour = hour
hour += 1
if hour > 11:
hour = 0
if secbright > 5000:
lastsecond = second
second = second + 1
if second == 12:
second = 0
secfade = time.ticks_ms()
if (Sensor.lastminute != minute) and (Sensor.lasthour >= 0):
set_nightlight(1)
gottime = True
lasthour = hour
lastminute = minute
hour = Sensor.lasthour
if hour > 12:
hour = hour - 12
if hour == 0:
hour = 12
minute = int(Sensor.lastminute / 5)
if minute == 0:
minute = 12
Sensor.lastminute = minute
timechange = True
if timechange or brightness.triggered:
led[m[12-lasthour]] = (1,1,1)
led[m[12-lastminute]] = (1,1,1)
led[m[11-lastsecond]] = (1,1,1)
led.write()
led[m[12-hour]] = (brightness.value,0,0)
led[m[12-minute]] = (led[m[12-minute]][0],brightness.value,0)
led.write()
brightness.triggered = False
timechange = False
| true |
8a942f99e54a5b9a18b235bc79179cabd2025ba4 | Python | ymccarter/flashcard_project | /codeacademy/Reggie_Linear_Regression.py | UTF-8 | 146 | 2.90625 | 3 | [] | no_license | def get_y(m,b,x):
return m*x+b
print(get_y(1, 0, 7))
print(get_y(1, 0, 7) == 7)
print(get_y(5, 10, 3) == 25)
#def calculate_error(m, b):
| true |
2d1e0e359a21d7f1a17eee27a980f32381e13859 | Python | SpicyGarlicAlbacoreRoll/AI_Water | /scripts/make_vrt.py | UTF-8 | 1,427 | 2.625 | 3 | [] | no_license | import json
import os
import re
from argparse import ArgumentParser
from collections import Counter
from osgeo import gdal
PROJECTION = re.compile(r'AUTHORITY\["([A-Z]+)","([0-9]+)"\]')
def main(path: str, vrtname: str):
path_and_proj = []
proj_counter = Counter()
for fname in os.listdir(path):
if not fname.endswith(".tif"):
continue
fpath = os.path.join(path, fname)
info = gdal.Info(fpath, options=['-json'])
info = json.loads(info)['coordinateSystem']['wkt']
m = re.findall(PROJECTION, info)
typ, zone = m[-1]
proj = f"{typ}:{zone}"
path_and_proj.append((fpath, proj))
proj_counter.update([proj])
dst_proj, count = proj_counter.most_common(1)[0]
print(f"Most common projection with {count} votes: {dst_proj}")
for fpath, proj in path_and_proj:
if proj == dst_proj:
continue
print(f"Reprojecting {fpath}")
gdal.Warp(fpath, fpath, srcSRS=proj, dstSRS=dst_proj)
vrtpath = os.path.join(path, vrtname)
print(f"Creating VRT: {vrtpath}")
gdal.BuildVRT(vrtpath, list(map(lambda x: x[0], path_and_proj)))
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument("path", help="path to the folder with individual tiffs")
p.add_argument("vrtname", help="name of the resulting vrt file")
args = p.parse_args()
main(args.path, args.vrtname)
| true |
6ee80366bda59548968cf69b116d6e0c9cdbd1cb | Python | okassov/dvbcastlib | /code/libs/dvbobjects/generator/NITGenerator.py | UTF-8 | 3,243 | 2.53125 | 3 | [] | no_license | import os
from dvbobjects.utils.SectionLength import *
from dvbobjects.utils.Write import *
from dvbobjects.PSI.NIT import *
from SQL.NITSQL import *
from SQL.SQLMain import *
#############################
# Network Information Table #
#############################
def nit(network_object_id, network_id, network_data):
nit_file_name = "output\\nit_" + str(network_object_id) + ".sec"
nit_sections = []
# Get list of ts_lists
sections_ts = check_length(
nit_loops(
network_data,
network_id)[0],
network_data,
"NIT",
network_id = network_id)
# Generate NIT sections
if len(sections_ts) != 0:
for idx, i in enumerate(sections_ts):
nit = network_information_section(
network_id = network_id,
network_descriptor_loop = nit_loops(
i,
network_id = network_id)[1], # Get first loop items
transport_stream_loop = nit_loops(
i,
network_id = network_id)[2], # Get second loop items
version_number = 1,
section_number = idx,
last_section_number = len(sections_ts) - 1
)
nit_sections.append(nit)
write_section(nit_file_name, nit_sections)
else:
pass
def regenerate_all_nit():
'''This function regenerate all NIT'''
all_networks = get_all_networks()
networks = [
{
"network_object_id": network[0],
"network_id": network[1]
} for network in all_networks
]
for network in networks:
network_data = sql_api_nit(network["network_object_id"], network["network_id"]) # Get network information with transports
if network_data != None and len(network_data["transports"]) != 0:
nit(network["network_object_id"], network["network_id"], network_data) # Generate Sections
null_list("NIT") # Null section list for next loop
else:
print ("Not found any transports in network with ID: " + str(network["network_object_id"]))
def regenerate_changed_nit(network_object_ids):
'''This function regenerate changed NIT'''
changed_networks = []
for object_id in network_object_ids:
changed_networks.append(get_network(object_id))
if len(changed_networks) != 0:
networks = [
{
"network_object_id": network[0],
"network_id": network[1]
} for network in changed_networks
]
for network in networks:
network_data = sql_api_nit(network["network_object_id"], network["network_id"]) # Get network information with transports
if network_data != None and len(network_data["transports"]) != 0:
nit(network["network_object_id"], network["network_id"], network_data) # Generate Sections
null_list("NIT") # Null section list for next loop
else:
print ("Not found any transports in network with ID: " + str(network["network_object_id"]))
pass
else:
print ("Error! Not found any networks.")
pass | true |
73c5c8b3a45b52f804a93eeab72b552d5136afd1 | Python | LuckFXY/python | /practice_for_python/FigureCanvas.py | UTF-8 | 1,267 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 12 11:17:34 2017
@author: rain
"""
from tkinter import *
class FigureCanvas(Canvas):
def __init__(self,container,figureType,width=100,height=100):
super().__init__(container,width=width,height=height)
def drawFigure(self):
func_list=[displayRect,displayOval,displayPolygon,displayArc]
func_list[self.__figure]()
def displayRect(self):
w=int(self["width"])
h=int(self["height"])
self.canvas.create_rectangle(10,10,w-10,h-10,tags="rect")
def displayOval(self):
w=int(self["width"])
h=int(self["height"])
self.canvas.create_oval(10,10,w-10,h-10,fill="red",tags=1)
def displayPolygon(self):
w=int(self["width"])
h=int(self["height"])
self.canvas.create_arc(10,10,w-10,h-10,start=0,
extent=90,width=8,fill="red",tags="arc")
def displayArc(self):
w=int(self["width"])
h=int(self["height"])
self.canvas.create_arc(10,10,190,190,start=0,
extent=90,width=8,fill="red",tags="arc")
def clearCanvas(self):
self.canvas.delete("rect","oval","arc","polygon","line","string") | true |
d41f299ac90de3f783496f78a18a50f47aea53ac | Python | TangHa0/Realization-of-Handwritten-Chinese-Characters-Recognition | /openfile.py | UTF-8 | 935 | 3.046875 | 3 | [] | no_license | import pickle,pprint
with open('result.dict', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.#协议版本被自动探测到并且使用,所以我们不需要明确它是什么。
data = pickle.load(f) #使用pickle的load函数下载被打开被读取到的数据。
with open("char_dict", "rb") as f:
data1 = pickle.load(f)
# pprint.pprint(data1)
# print(dir(data))
# print(type({}))
# print(sorted(data1.items(), key=lambda e: e[1], reverse=True))
with open("dict.txt", "w") as f:
for key, values in sorted(data1.items(), key=lambda e: e[1], reverse=False):
f.writelines('{0} {1} '.format(key, values) + "\n")
with open("result.txt", "w") as f:
for i in range(len(data['prob'])):
f.writelines('{0} {1} {2}'.format(data['prob'][i][0], data['indices'][i][0], data['groundtruth'][i])+"\n")
| true |
a9b2ba2b8a512543d7a39c936444f19e0c7c436c | Python | kitt10/master_thesis_2016 | /py/scripts/kitt_classify.py | UTF-8 | 3,493 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
scripts.kitt_classify
~~~~~~~~~~~~~~~~~~~~~
This script classifies testing data with a trained classifier provided by kitt :-).
@arg clf : name of the classifier file
"""
import matplotlib as mpl
mpl.rcParams['axes.labelsize'] = 18
mpl.rcParams['xtick.labelsize'] = 15
mpl.rcParams['ytick.labelsize'] = 15
mpl.rcParams['legend.fontsize'] = 18
import argparse
import numpy as np
from shelve import open as open_shelve
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
from kitt_nn.nn_structure.kitt_net import NeuralNet
from kitt_nn.nn_tool.nn_function import print_cm
from functions import load_params
from termcolor import colored
def parse_arguments():
parser = argparse.ArgumentParser(description='Classifies testing data.')
parser.add_argument('-c', '--clf', type=str, required=True,
help='Classifier filename to classify with')
parser.add_argument('-ds', '--dataset', type=str, required=True,
help='Dataset to classify on')
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
clf_dir = '../cache/trained/'+args.clf+'.net'
''' Loading the classifier and testing data '''
clf = open_shelve(clf_dir, 'c')
nn_classifier = clf['net']
structure = nn_classifier[0]
weights = nn_classifier[1]
biases = nn_classifier[2]
labels = nn_classifier[3]
dataset_dir = args.dataset
print '\n\n ## Classification : training parameters:', clf['training_params']
clf.close()
net = NeuralNet(program=None, name=str(structure), structure=structure)
net.weights = weights
net.biases = biases
net.labels = labels
net.map_params()
dataset = open_shelve('../cache/datasets/'+dataset_dir+'.ds', 'c')
''' Classifying '''
''' Getting results on testing set '''
print '\n\n ## Testing...'
y_pred = net.predict(dataset['x']['testing'])
c_accuracy = accuracy_score(y_true=np.array(dataset['y']['testing']), y_pred=y_pred)
c_report = classification_report(np.array(dataset['y']['testing']), y_pred)
cm = confusion_matrix(y_true=np.array(dataset['y']['testing']), y_pred=y_pred, labels=net.labels)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print '\n # Kitt net accuracy score on testing data:', colored(str(c_accuracy), 'green')
print '\n # Kitt net classification report on testing data:\n', colored(str(c_report), 'cyan')
print '\n # Kitt net confusion matrix on testing data:\n'
print_cm(cm=cm, labels=net.labels)
print '\n'
print_cm(cm=cm_normalized, labels=net.labels, normed=True)
terrain_ids = (1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15)
terrain_types = load_params('terrain_types')[0]
terrains = [terrain_types[str(t_id)] for t_id in terrain_ids]
plt.matshow(cm_normalized, vmin=0, vmax=1)
plt.colorbar()
plt.xticks(range(14), terrains, rotation=45)
plt.yticks(range(14), terrains)
for t1_i, terrain1 in enumerate(terrains):
for t2_i, terrain2 in enumerate(terrains):
if cm_normalized[t1_i][t2_i] >= 0.01:
plt.text(t2_i, t1_i, round(cm_normalized[t1_i][t2_i], 2), va='center', ha='center', fontsize=12)
plt.show()
#plt.savefig('../../thesis/img/amter_classification_nn_cm.eps', bbox_inches='tight', pad_inches=0.1)
dataset.close()
| true |
0a6e5941aa027cb47ca51a77ee6c478b96139e36 | Python | Ramos159/BuffettBot | /cogs/info.py | UTF-8 | 2,736 | 2.953125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | from discord.ext import commands
class Info(commands.Cog):
"""
Info Module
Will contain commands that pertain to any information regarding the bot
Inherits from the cog class, as all command cogs do
...
Attributes
----------
bot : commands.Bot
Bot instance from main.py
Methods
-------
info() -> None
sets up the command group
invite() -> None
sends an invite message to the requested channel
github() -> None
sends the github link for this bot to requested channel
contact() -> None
sends a message detailing method of contact to owner
stock() -> None
sends a message about the source of stock information
"""
def __init__(self, bot):
self.bot = bot
@commands.group()
async def info(self, ctx):
"""Get information regarding to the bot"""
return
@info.command()
async def invite(self, ctx):
"""
Sends an invite message with link to the channel command was used in
"""
await ctx.trigger_typing()
invite = 'https://discord.com/oauth2/authorize?client_id=746604823759290399&permissions=392256&scope=bot'
await ctx.send('>>> Use this link to invite me to your server.\n'
"all permisions are **REQUIRED** for the bot to work, please don't remove any!\n"
f"{invite}"
)
@info.command()
async def github(self, ctx):
"""
Sends a github repository link to the channel it was called in
"""
await ctx.trigger_typing()
await ctx.send("https://github.com/Ramos159/BuffettBot")
@info.command()
async def ping(self, ctx):
"""
Get latency number from bot
"""
await ctx.trigger_typing()
await ctx.send(f">>> Trading and browsing r/wallstreetbets at {round(self.bot.latency * 1000)} ms")
@info.command()
async def contact(self, ctx):
"""
Sends contact info for owner.
"""
await ctx.send(
f">>> If you want to gift someone Tesla stock, **edwin#9454** will gladly accept them"
)
@info.command()
async def stock(self, ctx):
"""Sends information about stock info source"""
await ctx.send(">>> We use https://finnhub.io/ information for all our stock related commands.")
def setup(bot):
"""
Setup function for Cog class in file
Ran when cog is loaded VIA load_extension() in main.py
The bot param is automatically passed in during loading
...
Parameters
----------
bot: Bot
Bot instance from main.py
"""
bot.add_cog(Info(bot))
| true |
8f3be37bad53e1d7ec919450f5c28f3e6ce34e38 | Python | thnglhu/NetVis | /Visual/Canvas/port.py | UTF-8 | 1,237 | 2.65625 | 3 | [] | no_license | class Port:
# region Declaration
def __init__(self, name, device, port_id=None, mac_address=None):
self.name = name
self.device = device
self.link = None
self.id = port_id if port_id else id(self)
self.mac_address = mac_address
self.active = True
def save(self):
result = {
'id': self.id,
'name': self.name,
}
if self.mac_address:
result['mac_address'] = self.mac_address
return result
def destroy(self, collector):
if self.link:
self.link.destroy(collector)
# endregion
# region Logical
def connect(self, link):
self.link = link
self.device.subscribe(link)
def disconnect(self):
self.device.disconnect(self)
def send(self, frame):
if self.active and self.link and self.link.active:
self.link.send(frame, self)
def enable(self):
self.active = True
if self.link:
self.link.enable()
def disable(self):
self.active = False
if self.link:
self.link.disable()
def receive(self, frame):
return self.device.receive(frame, self)
# endregion
| true |
6b5ecf59aee91287debfc7a264002ca531ac0eb0 | Python | hasin-abrar/Machine-Learning | /Decision-Tree-with-Adaboost/MainCode/DecisionTreeFull.py | UTF-8 | 19,422 | 3.015625 | 3 | [] | no_license | # pre processing
import math
import random
import datetime
import numpy as np
import pandas as pd
class PreProcessing(object):
def __init__(self, examples):
self.examples = examples
# takes a list as input and gives the mode. (1)[0][0] 1st appearance and more signifies
def Most_Common(self, lst):
from collections import Counter
data = Counter(lst)
return data.most_common(1)[0][0]
# missing_col = Find_Missing_Col(examples,4)
def Find_Missing_Col(self,attr_length):
missing_col = []
for i in range(attr_length):
for e in examples:
if e[i] == " ":
# print(e[i])
missing_col.append(i)
elif isinstance(e[i], str):
continue
# print(type(e[i]))
# print(len(e[i]),e[i])
if math.isnan(e[i]):
missing_col.append(i)
break
return missing_col
# number_type is a list
# number_type = [1,2]
# Replace_With_Mean(examples,number_type)
def Replace_With_Mean(self, examples_df, examples, number_type):
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN',
strategy='mean', axis=0)
for n in number_type:
imputer = imputer.fit(examples[:, n:(n + 1)])
examples[:, n:(n + 1)] = imputer.transform(examples[:, n:(n + 1)])
# examples = Remove_Useless_Rows(examples)
def Remove_Useless_Rows(self, examples):
index = -1
for e in examples:
index += 1
last_value = e[-1]
# print(last_value,index)
if isinstance(last_value, str):
continue
if math.isnan(last_value):
# print(index)
examples = np.delete(examples, index, 0)
# print(examples)
return examples
def Replace_With_Mode(self, examples, string_type):
for s in string_type:
single_col = examples[:, s]
_max_appearance = self.Most_Common(single_col)
for j in range(len(single_col)):
if isinstance(single_col[j], str):
continue
if math.isnan(single_col[j]):
single_col[j] = _max_appearance
def GetBooleanEntropy(self, yes, no):
succ_prob = (yes / (yes + no))
if succ_prob == 0:
return 0
elif succ_prob == 1:
return 0
# print ("succ : ",succ_prob)
return -(succ_prob * math.log2(succ_prob) + (1 - succ_prob) * math.log2((1 - succ_prob)))
def Get_Split_Val(self, examples, index):
# selected_col = examples
sorted_col = sorted(examples, key=lambda k: k[index])
print(sorted_col)
start = sorted_col[0][index] - 10
# end = sorted_col[0] + 10
class_col = examples[:, -1]
yes = []
no = []
yes.append(0)
yes.append(0)
no.append(0)
no.append(0)
pos = neg = 0
# print(class_col)
for c in class_col:
if c == "Yes":
yes[1] += 1
else:
no[1] += 1
pos = yes[1]
neg = no[1]
# print(yes, no)
init_entropy = self.GetBooleanEntropy(yes[1], no[1])
_max = 0
split = start
for j in range(len(sorted_col) - 1):
mid = (sorted_col[j][index] + sorted_col[j + 1][index]) / 2
remainder_attrb_entropy = 0
if sorted_col[j][-1] == "Yes":
yes[0] += 1
yes[1] -= 1
else:
no[0] += 1
no[1] -= 1
for k in range(2):
remainder_attrb_entropy += ((yes[k] + no[k]) / (pos + neg)) * self.GetBooleanEntropy(yes[k], no[k])
gain = init_entropy - remainder_attrb_entropy
if gain > _max:
_max = gain
split = mid
# print(split)
return split
def Binarization(self, examples, num_type):
for n in num_type:
split_val = self.Get_Split_Val(examples, n)
# print("##########")
# print("attribute ", n, " : ", split_val)
changed_col = examples[:, n]
for i in range(len(changed_col)):
if changed_col[i] <= split_val:
changed_col[i] = -1 # making all values having same type
else:
changed_col[i] = +1
def GetAttributeList(self, dataframe):
attr_list = []
for i in range(len(list(dataframe)) - 1):
attr_list.append(i)
return attr_list
def GetAtrributeLength(self, dataframe):
return len(dataframe.columns) - 1
def GetAtrributeMapping(self,examples,attr_length):
attr_mapping = {}
index = {}
for i in range(attr_length):
single_col = examples[:,i]
attr_types = list( set(single_col) )
attr_name = i
attr_mapping[attr_name] = attr_types
index[attr_name] = i
return attr_mapping, index
def DoLastColEncoding(self,examples,last_col,choice):
y = examples[:,-1]
for i in range(len(examples)):
y[i] = y[i].strip()
if choice == 2:
y[i] = y[i].strip('.')
if y[i] == last_col[0]:
y[i] = 1
else:
y[i] = -1
return y
def GetTrainTestSplit(self,x,y,split_size):
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
return train_test_split(x, y,test_size=split_size,random_state=60)
class Node:
def __init__(self,val,isLeaf):
self.child = []
self.val = val
self.isLeaf = isLeaf
# sutree is also a node
def insert(self,subtree):
self.child.append(subtree)
class DecisionTree:
def __init__(self,attr_mapping,index,depth_max):
self.attr_mapping = attr_mapping
self.index = index
self.depth_max = depth_max
def setMaxDepth(self,depth):
self.depth_max = depth
def GetBooleanEntropy(self,yes,no):
if (yes + no) == 0:
# print("WHAT")
return 0
succ_prob = (yes / (yes + no))
if succ_prob == 0:
return 0
elif succ_prob == 1:
return 0
else:
# print ("succ : ",succ_prob)
return -(succ_prob * math.log2(succ_prob) + (1 - succ_prob) * math.log2((1 - succ_prob)))
# attribute is a String, index is an integer
def Importance(self,attribute,x_train,y_train, index):
yes = no = 0
remainder_attrb_entropy = 0
for y in y_train:
if y == 1: #this means class "Yes"
yes+=1
else:
no+=1
attr_entropy = self.GetBooleanEntropy(yes,no)
# all the attribute values of that attribute = list
# attr_vals is a list
attr_vals = self.attr_mapping[attribute]
pos = []
neg = []
for j in range(len(attr_vals)):
pos.append(0)
neg.append(0)
for i in range(len(x_train)):
for j in range(len(attr_vals)):
# example has the same attribute value
if x_train[i][index] == attr_vals[j] :
if y_train[i] == 1:
pos[j] += 1
else:
neg[j] += 1
break
for k in range(len(attr_vals)):
weight = ((pos[k] + neg[k])/(yes+no) )
# print(weight)
remainder_attrb_entropy += weight* self.GetBooleanEntropy(pos[k],neg[k] )
return attr_entropy - remainder_attrb_entropy
# attributes is a list of attribute(String)
def Dec_Tree_Learning(self,x_train,y_train,attributes,par_x_train,par_y_train,depth):
same_class = 1
yes = no = 0
if depth >= self.depth_max:
return self.Plurality_Value(y_train)
for y in y_train:
if y == 1:
yes += 1
else:
no += 1
if yes >0 and no >0:
same_class = 0
break
if len(x_train) == 0:
return self.Plurality_Value(par_y_train)
elif same_class == 1:
if yes > 0 :
return Node(1,1)
else :
return Node(-1,1)
elif len(attributes) == 0:
return self.Plurality_Value(y_train)
else:
_max = -1
root = attributes[0]
for a in attributes: # 'a' is an int
importance = self.Importance(a,x_train,y_train,self.index[a])
if importance > _max:
_max = importance
root = a
tree = Node(root,0)
attribute_list = self.attr_mapping[root]
for a in attribute_list: # each a is a attribute value
child_x_train = []
child_y_train = []
for i in range(len(x_train)):
# attribute index and its corresponding value in example e[index[root]]
if x_train[i][self.index[root]] == a:
child_x_train.append(x_train[i])
child_y_train.append(y_train[i])
new_attributes = []
for a in attributes:
if a == root:
continue
new_attributes.append(a)
subtree = self.Dec_Tree_Learning(child_x_train,child_y_train,new_attributes,x_train,y_train,depth+1)
tree.insert(subtree)
return tree
def Plurality_Value(self,y_val):
yes = no = 0
for y in y_val:
if y == 1:
yes+=1
else:
no+=1
if yes > no:
return Node(1,1) # 1st 1 = Yes
else:
return Node(-1,1) # 1st 0 = No
def Prediction(self,x_test, node):
if node.isLeaf == 1 :
return node.val
attr = node.val
attr_list = self.attr_mapping[attr]
indx = self.index[attr]
found = False
next_node = Node(0,0)
for i in range(len(attr_list)):
if x_test[indx] == attr_list[i]:
found = True
next_node = node.child[i]
break
if found != True :
print(indx," Default in Searching !",x_test)
defaultNode = self.Plurality_Value(x_test)
return defaultNode.val
else:
return self.Prediction(x_test,next_node)
def Adaboost(self,x_train,y_train, k_count, attributes):
h = []
z = []
weight = []
x_train_index = []
y_train_index = []
for i in range(len(x_train)):
weight.append((1 / len(x_train)))
x_train_index.append(i)
y_train_index.append(i)
# print(weight)
for k in range(k_count):
z.append(0.0)
node = Node(0,0)
h.append(node)
next_x_train = []
next_y_train = []
# data = examples_dataframe.sample(len(examples_dataframe), weights=weight)
data = np.random.choice(x_train_index, len(x_train_index), p=weight)
for ind in data:
next_x_train.append(x_train[ind])
next_y_train.append(y_train[ind])
h[k] = self.Dec_Tree_Learning(next_x_train,next_y_train, attributes,[], [], 0)
error = 0
for j in range(len(x_train)):
if self.Prediction( x_train[j],h[k]) != y_train[j]:
error += weight[j]
if error > 0.5:
k -= 1
print("K KOMSEEEE")
continue
# print(k," Error : ",error)
for j in range(len(x_train)):
if self.Prediction( x_train[j],h[k]) == y_train[j]:
weight[j] *= (error / (1 - error))
# weight = preprocessing.normalize(weight)
weight = [ float(i) / sum(weight) for i in weight ]
z[k] = math.log10((1 - error) / error)
return Weighted_Majority(h,z)
def Prediction_Stump(self,weighted_majority, k_count, x_test):
val = 0
h = weighted_majority.h
z = weighted_majority.z
for i in range(k_count):
pred = self.Prediction( x_test,h[i])
# print (pred, z[i])
val += ( pred* z[i] )
# print("final : ",val)
if val > 0:
return 1
else:
return -1
class Weighted_Majority:
def __init__(self,h,z):
self.z = z
self.h = h
def PreProcessData(dataset_frame,number_type,replace_with_mean,dropping_col,last_col,choice):
dataset_frame = dataset_frame.replace(' ', np.NaN)
dataset_frame = dataset_frame.replace('?', np.NaN)
dataset_frame.drop(dataset_frame.columns[dropping_col], axis=1, inplace=True)
examples = dataset_frame.iloc[:, :].values
examples_df = dataset_frame.iloc[:, :]
# x = dataset_frame.iloc[:, :-1].values
# y = dataset_frame.iloc[:, -1].values
if choice == 0:
extra_sample_count = 7
examples_filtered = []
examples_filtered_index = []
for i in range(len(examples)):
if examples[i][-1] == 1:
examples_filtered.append(list(examples[i]) )
examples_filtered_index.append(i)
indx_count = 0
while(True):
rand_index = random.randint(0,len(examples) - 1)
if rand_index not in examples_filtered_index:
examples_filtered.append(list(examples[rand_index]) )
indx_count += 1
if indx_count == extra_sample_count:
break
random.shuffle(examples_filtered)
print(examples)
examples_filtered = np.array(examples_filtered)
print("########### FILTERED ##############")
print(examples_filtered)
examples = examples_filtered
pre_processing = PreProcessing(examples)
attr_list = pre_processing.GetAttributeList(examples_df)
# attr_length does not include label (last col)
attr_length = pre_processing.GetAtrributeLength(dataset_frame)
replace_with_mode = []
for i in range(attr_length):
if i in number_type:
continue
replace_with_mode.append(i)
examples = pre_processing.Remove_Useless_Rows(examples)
pre_processing.Replace_With_Mean(examples_df, examples, replace_with_mean)
pre_processing.Replace_With_Mode(examples, replace_with_mode)
for n in number_type:
examples[:,n:(n+1)] = examples[:,n:(n+1)].astype(np.float64)
pre_processing.Binarization(examples, number_type)
attr_mapping, index = pre_processing.GetAtrributeMapping(examples, attr_length)
y = pre_processing.DoLastColEncoding(examples, last_col,choice)
x = examples[:, :-1]
return x, y, attr_mapping, index, attr_list,pre_processing
##################### START #######################
choice = 0
dropping_col = []
number_type = []
dataset_test = []
if choice == 0:
dataset = pd.read_csv("Data.csv")
last_col = ["1", "0"]
dropping_col = []
replace_with_mean = number_type = [1, 2]
elif choice == 1:
dataset = pd.read_csv("1/1.csv")
last_col = ["Yes", "No"]
dropping_col = [0]
replace_with_mean = number_type = [4, 17, 18]
elif choice == 2:
dataset = pd.read_csv("2/2.csv",header = None)
dataset_test = pd.read_csv("2/2_test.csv",header = None)
last_col = ["<=50K", ">50K"]
dropping_col = []
number_type = [0, 2, 4, 10, 11, 12]
replace_with_mean = [0,2,4,12]
elif choice == 3:
dataset = pd.read_csv("3/3.csv")
last_col = ["1", "0"]
dropping_col = []
attrb_length = len(dataset.columns) - 1
for i in range(attrb_length):
number_type.append(i)
# number_type = [0, 2, 4, 10, 11, 12]
print(number_type,len(number_type))
replace_with_mean = number_type
else:
dataset = pd.read_csv("Data.csv")
last_col = ["Yes", "No"]
dropping_col = []
replace_with_mean = number_type = [1, 2]
x, y, attr_mapping, index, attr_list,pre_processing = PreProcessData(dataset,number_type, replace_with_mean,dropping_col,last_col,choice)
'''
dataset = dataset.replace(' ',np.NaN)
dataset = dataset.replace('?',np.NaN)
dataset.drop(dataset.columns[dropping_col], axis=1, inplace=True)
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
examples = dataset.iloc[:, :].values
examples_df = dataset.iloc[:, :]
pre_processing = PreProcessing(examples)
attr_list = pre_processing.GetAttributeList(examples_df)
# attr_length does not include label (last col)
attr_length = pre_processing.GetAtrributeLength(dataset)
for i in range(attr_length):
if i in number_type:
continue
string_type.append(i)
# missing_col = pre_processing.Find_Missing_Col(attr_length)
# print(missing_col)
examples = pre_processing.Remove_Useless_Rows(examples)
pre_processing.Replace_With_Mean(examples_df,examples,number_type)
pre_processing.Replace_With_Mode(examples,string_type)
pre_processing.Binarization(examples,number_type)
attr_mapping, index = pre_processing.GetAtrributeMapping(examples,attr_length)
y = pre_processing.DoLastColEncoding(examples,last_col)
x = examples[:,:-1]
'''
if choice == 2:
x_train = x
y_train = y
x_test, y_test,*rest = PreProcessData(dataset_test, number_type, replace_with_mean, dropping_col, last_col,choice)
else:
x_train, x_test, y_train, y_test = pre_processing.GetTrainTestSplit(x,y,split_size=0.2)
# print(x_train,"\n",y_train,"\n", x_test ,"\n", y_test)
# print(examples)
############### Decision Tree Learning ############
decision_tree = DecisionTree(attr_mapping,index, math.inf)
decision_tree_adaboost = DecisionTree(attr_mapping,index,1)
print(len(x_train), len(x_test))
tree = decision_tree.Dec_Tree_Learning(x_train,y_train,attr_list,[],[],0)
not_match = match = 0
# print(tree)
for i in range(len(x_test)):
# y_test[i] = y_test[i].strip()
# if choice == 2:
# y_test[i] = y_test[i].strip('.')
if decision_tree.Prediction(x_test[i],tree) == y_test[i]:
match += 1
# print("Match")
else:
not_match += 1
# print("Does not match")
print(match,not_match)
accuracy = (match)/ (match + not_match) * 100
print("Decision Tree : ",accuracy,"%","Time : ",datetime.datetime.now().time())
print("*******Adaboost*******")
k_list = [5,10,15,20]
for k in k_list:
k_count = k
weighted_majority = decision_tree_adaboost.Adaboost(x_train,y_train,k_count,attr_list)
not_match = match = 0
for i in range(len(x_test)):
if decision_tree_adaboost.Prediction_Stump(weighted_majority,k_count,x_test[i]) == y_test[i]:
match += 1
# print("Match")
else:
not_match += 1
# print("Does not match")
accuracy = (match)/ (match + not_match) * 100
print("LoopCount : ",k," accuracy : ",accuracy,"%","Time : ",datetime.datetime.now().time())
# ''' | true |
ab1a715e0bbaae5f768af042b90040b288c150cd | Python | reesporte/euler | /17/p17.py | UTF-8 | 1,618 | 3.796875 | 4 | [] | no_license | """
project euler problem 17
i kept misspelling forty as fourty and so i kept trying a thousand different
ways to solve the problem and the real problem with my code was the spelling of the
word FORTY
i am So angry
"""
def please_god():
cache = [0] * (1001)
cache[0] = 0
cache[1] = len('one')
cache[2] = len('two')
cache[3] = len('three')
cache[4] = len('four')
cache[5] = len('five')
cache[6] = len('six')
cache[7] = len('seven')
cache[8] = len('eight')
cache[9] = len('nine')
cache[10] = len('ten')
cache[11] = len('eleven')
cache[12] = len('twelve')
cache[13] = len('thirteen')
cache[14] = len('fourteen')
cache[15] = len('fifteen')
cache[16] = len('sixteen')
cache[17] = len('seventeen')
cache[18] = len('eighteen')
cache[19] = len('nineteen')
cache[20] = len('twenty')
cache[30] = len('thirty')
cache[40] = len('forty')
cache[50] = len('fifty')
cache[60] = len('sixty')
cache[70] = len('seventy')
cache[80] = len('eighty')
cache[90] = len('ninety')
for i in range(21, 100):
tens_place = (i//10)*10
ones_place = i - tens_place
cache[i] = cache[tens_place] + cache[ones_place]
for i in range(100, 1000):
hundreds_place = i//100
teens = i - (hundreds_place*100)
if teens == 0:
cache[i] = cache[hundreds_place] + len('hundred')
else:
cache[i] = cache[hundreds_place] + len('hundredand') + cache[teens]
cache[1000] = len('onethousand')
print(sum(cache))
if __name__ == '__main__':
please_god()
| true |
1d22eca056d357759d265d228f253f40c6f22440 | Python | DLTarasi/LPTHW | /ex3.py | UTF-8 | 1,163 | 4.21875 | 4 | [] | no_license | # prints I will now count my chickens:
print("I will now count my chickens:")
# divides 30 by 6 then adds the result to 25
print("Hens", 25.0 + 30.0 / 6.0)
# multiplies 25 * 3 then gives the remainder of that result divided by 4 and subtracts it from 100.
print("Roosters", 100.0 - 25.0 * 3.0 % 4.0)
#prints I will now count the eggs:
print("I will now count the eggs:")
#prints the result of the calculation. divides 1 by 4 and the remainder of 4 /2 first
print(3.0 + 2.0 + 1.0 - 5.0 + 4.0 % 2.0 - 1.0 / 4.0 + 6.0)
#prints Is it true that 3 + 2 < 5 - 7?
print ("Is it true that 3 + 2 < 5 - 7?")
# prints the result of whether 3 + 2 is less than 5-7
print(3.0 + 2.0 < 5.0 - 7.0)
#prints the statement and then the result of the calculation
print("What is 3 + 2?", 3.0 + 2.0)
#prints the statement and then the result of the calculation
print("What is 5-7?", 5.0-7.0)
#prints the statement
print("Oh, that's why it's False.")
#prints the statement
print("How about some more.")
#prints the statement and then the result of the calculation
print("Is it greater?", 5.0 > -2.0)
print("is it greater or equal?", 5.0 >= -2.0)
print("Is it less or equal?", 5.0 <= -2.0)
| true |
611f061c1a018d8a4fdaecfe7ce866b514442a2a | Python | Gorazor/leetcode | /栈/32. 最长有效括号.py | UTF-8 | 728 | 3.28125 | 3 | [] | no_license | class Solution:
def longestValidParentheses(self, s: str) -> int:
self.max_length=0
stack=[-1]
for i,c in enumerate(s):
if c=='(':
stack.append(i)
else:
tmp=stack.pop()
if not stack:
stack.append(i)
else:
length=i-stack[-1]
self.max_length=max(self.max_length,length)
return self.max_length
#https://leetcode-cn.com/problems/longest-valid-parentheses/solution/zui-chang-you-xiao-gua-hao-by-leetcode-solution/https://leetcode-cn.com/problems/longest-valid-parentheses/solution/zui-chang-you-xiao-gua-hao-by-leetcode-solution/
# 初始化栈为-1 | true |
ec220f980666423ff543b622ca5a636360f8deda | Python | NordThing/TopCut | /AutoPilot/v1.py | UTF-8 | 11,185 | 2.625 | 3 | [] | no_license | #2020-09-13 Latest updated
#Author Henrik Allberg @henrikallberg
#Start of the autopilot for the lawnmower
#First it will just take use of the Magnetometer and GPS from a network stream
#Will put waypoints in a vector - would be nice to have an alogritm for that later
#Should make this program works first
import os
from gps import *
import string
from pynmea import nmea
import socket
import sys
import RPi.GPIO as GPIO
from time import sleep
#Imported numpy to use array inside the program especially for the waypoints
import numpy as np
#This to use the heading/bearing from the magnetometer hmc5883l
import hmc5883l as HMC
#This is to calculate the distance and angle to the next waypoint - this is used for the autopilot
import gpscalculatemodule as GPSCalc
#Initialistion for the calculated distance
#and for the calculated course angle
distance = 0
angle = 0
#motor variables
FORWARD_VEL = 230
SLOW_TURN_VEL = 220
PIVOT_WHEEL_VEL = 50
FAST_TURN_VEL = 180
#Calibration constants for the motors in differential drive config.
# e. g. if the left motor is slower that the right one, you can add some
# % to the left and reduce the same % to the right, and viceversa.#Initialise motor 1 and motor 2 on $
K_RIGHT_MOTOR = 1.0
K_LEFT_MOTOR = 1.0
vel = 255
#Tolerance how near you need to be the waypoint to set it as reached higher value less sensitive
TOLERANCE_RADIUS = 1.0
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
#Initialise motor 1 controller on L298
Motor1A = 16
Motor1B = 18
Motor1E = 32
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1E,GPIO.OUT)
#Initialise motor 2 controller on L298
Motor2A = 21
Motor2B = 23
Motor2E = 33
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
GPIO.setup(Motor2E,GPIO.OUT)
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor1E,GPIO.HIGH)
GPIO.output(Motor2E,GPIO.HIGH)
#Function to turn motor on
def motor_on():
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1E,GPIO.HIGH)
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor2E,GPIO.HIGH)
#Function to turn motor off
def motor_off():
GPIO.output(Motor1E,GPIO.LOW)
GPIO.output(Motor2E,GPIO.LOW)
#Here is the waypoint that the program will use
nav_waypoint = np.loadtxt("waypoints.txt", dtype=np.float64, delimiter=",", skiprows=1)
#nav_waypoint = np.array([[1,2],[3,4],[5,6]])
#Number of waypoints to use in the loop so all waypoints will be went trough
#Removing 1 because of the index_waypoints array starts a 0 and the len will give us 3
#but there are for 4 waypoints we should pass
num_waypoints = (len(nav_waypoint)) - 1
#Index of waypoints
index_waypoints = 0
#Setting global variable for waypoint_lat and waypoint_lon to get acces to the globals outside the method
def act_waypoint(index_waypoints):
print 'Actual waypoint'
print(nav_waypoint[index_waypoints,0:2])
global waypoint_lat
global waypoint_lon
waypoint_lat = nav_waypoint[index_waypoints,0]
waypoint_lon = nav_waypoint[index_waypoints,1]
#Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Define the server address and port from which the NMEA
# should be read from.
server_address = ('localhost', 50022)
try:
# Connect the socket to the port where the RTK server is listening
sock.connect(server_address)
print '-------------------------------------------'
print >>sys.stderr, 'connecting to <%s> port <%s>' % server_address
print '-------------------------------------------'
except:
print "Unexpected error:", sys.exc_info()[0]
raise
# Read each line from the NMEA stream.
def readlines(sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.recv(recv_buffer)
buffer += data
while buffer.find(delim) != -1:
line, buffer = buffer.split('\n', 1)
yield line
return
# Create an instance of an GPGGA object.
gpgga = nmea.GPGGA()
# Constants for defining error ranges for the stepped proportional control
MAX_HEADING_ANGLE = 180
MIN_HEADING_ANGLE = 5
ANGLE_RANGE_DIV = 0.25
def Forward(val):
GPIO.output(Motor1E,vel * K_RIGHT_MOTOR)
GPIO.output(Motor2E,vel * K_LEFT_MOTOR)
def Turn_Left(vel):
pwmMotor1 = GPIO.PWM(Motor1E, vel * K_RIGHT_MOTOR)
pwmMotor2 = GPIO.PWM(Motor2E, PIVOT_WHEEL_VEL * K_LEFT_MOTOR)
def Turn_Right(vel):
pwmMotor1 = GPIO.PWM(Motor1E, PIVOT_WHEEL_VEL * K_RIGHT_MOTOR)
pwmMotor2 = GPIO.PWM(Motor2E, vel * K_LEFT_MOTOR)
def Turn_Left_Fast_Spin(vel):
pwmMotor1 = GPIO.PWM(Motor1E, vel * K_RIGHT_MOTOR)
pwmMotor2 = GPIO.PWM(Motor2E, vel * K_LEFT_MOTOR)
def Turn_Right_Fast_Spin(vel):
pwmMotor1 = GPIO.PWM(Motor1E, vel * K_RIGHT_MOTOR)
pwmMotor2 = GPIO.PWM(Motor2E, vel * K_LEFT_MOTOR)
#Stopping all motors on rover
def Stop():
GPIO.output(Motor1E,GPIO.LOW)
GPIO.output(Motor2E,GPIO.LOW)
def Control_Navigation(act_heading,dir_heading):
heading_error = (dir_heading - act_heading)
# print heading_error
if heading_error < -180:
heading_error = heading_error + 360
elif heading_error > 180:
heading_error = heading_error -360
print heading_error
#Turn right
if (heading_error > MIN_HEADING_ANGLE and heading_error <= MAX_HEADING_ANGLE * ANGLE_RANGE_DIV):
Turn_Right(SLOW_TURN_VEL)
print"Turn right"
elif (heading_error > MAX_HEADING_ANGLE * ANGLE_RANGE_DIV and heading_error <= MAX_HEADING_ANGLE):
Turn_Right_Fast_Spin(FAST_TURN_VEL)
print"Turn right fast"
elif (heading_error < -MIN_HEADING_ANGLE and heading_error >= -MAX_HEADING_ANGLE * ANGLE_RANGE_DIV):
Turn_Left(SLOW_TURN_VEL)
print"Turn left"
elif (heading_error < -MAX_HEADING_ANGLE * ANGLE_RANGE_DIV and heading_error >= -MAX_HEADING_ANGLE):
Turn_Left_Fast_Spin(FAST_TURN_VEL)
print"Turn left fast"
elif (heading_error >= -MIN_HEADING_ANGLE and heading_error <= MIN_HEADING_ANGLE):
Forward(FORWARD_VEL)
print"Forward"
else:
print"Not defined"
# Process the NMEA data and store it in two variables continuous.
try:
for line in readlines(sock):
#print line
if line[0:6] == '$GPGGA':
os.system('clear')
#method for parsing the NMEA sentence
gpgga.parse(line)
lats = gpgga.latitude
# Print some information about the position of the
# mobile station / robot.
print '-------------------------------------------'
print >>sys.stderr, 'connecting to <%s> port <%s>' % server_address
print '-------------------------------------------'
print '\n-------------------------------------------'
print ' READ RTK NMEA STREAM '
print '-------------------------------------------'
#Latitude direction
lat_dir = gpgga.lat_direction
#Longitude values
longitude = gpgga.longitude
#Longitude direction
long_dir = gpgga.lon_direction
#GPS time stamps
time_stamp = gpgga.timestamp
#Antenna altitude
alt = gpgga.antenna_altitude
lats = gpgga.latitude
longs = gpgga.longitude
#convert degrees,decimal minutes to decimal degrees
# The source for the decimal calcualtion was:
# http://dlnmh9ip6v2uc.cloudfront.net/tutorialimages/Python_and_GPS/gpsmap.py
lat1 = (float(lats[2]+lats[3]+lats[4]+lats[5]+lats[6]+lats[7]+lats[8]))/60
lat = (float(lats[0]+lats[1])+lat1)
long1 = (float(longs[3]+longs[4]+longs[5]+longs[6]+longs[7]+longs[8]+longs[9]))/60
long = (float(longs[0]+longs[1]+longs[2])+long1)
print '\n------------ rover information --------------'
print 'lat: ',lat
print 'long: ',long
print 'direction: ', HMC.bearing()
print '-------------------------------------------'
#Calling the function act_waypoint() to get the waypoint that we are heading against
#To call waypoint_lat and waypoint_lon act_waypoint has to be run before so globals variables are defined
act_waypoint(index_waypoints)
#Call of the module gpc-calculate-module with the GPS coordinates
distance, angle = GPSCalc.calc(lat, long, waypoint_lat, waypoint_lon)
if angle < 0:
angle += 360
else:
pass
Control_Navigation(HMC.bearing(),angle)
print '\n------------ waypoint info ----------------'
print 'lat: ',waypoint_lat
print 'long: ',waypoint_lon
print 'direction: ', round(angle, 1)
print 'distance to waypoint: ', round(distance,1), 'm'
if round(distance,1) < TOLERANCE_RADIUS:
print('You have reached waypoint',index_waypoints)
index_waypoints = 1 + index_waypoints
print('You will now head against',index_waypoints)
sleep(3)
elif index_waypoints == num_waypoints:
print('Yoh have been on all waypoints - exit')
print('Rover is going to home position')
Stop()
break
else:
print('You are too far away from the waypoint')
# print('You have reached waypoint',index_waypoints)
# index_waypoints = 1 + index_waypoints
# print('You will now head against',index_waypoints)
# sleep(3)
print 'total waypoints: ',num_waypoints + 1
print 'wayponints left: The rover is on',index_waypoints + 1, ' of total ',num_waypoints + 1
print '-------------------------------------------'
print '--------Waypoints from file ---------------'
print '-------------------------------------------'
except:
print "Unexpected error:", sys.exc_info()[0]
raise
| true |
509b82aea0e1b808d6112bed492bce1785ce27ff | Python | RadwanDuadu/Privacy-Source | /HOG detector/dlib_detectorTest.py | UTF-8 | 2,298 | 2.84375 | 3 | [] | no_license | import dlib
import cv2
import glob
import os
import time
# initialize dlib's face detector (HOG-based) and then create the
# facial landmark predictor
print("[INFO] loading facial detector")
detector = dlib.get_frontal_face_detector()
def rect_to_bb(rect):
# take a bounding predicted by dlib and convert it
# to the format (x, y, w, h)
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
return (x, y, w, h)
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
(h, w) = image.shape[:2]
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
textFile = open('/home/pi/Desktop/recordTime.txt', 'w')
# loop over the frames from the video stream
for filename in glob.iglob('/home/pi/Desktop/pos/*.pgm', recursive=True):
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
startTime = time.time()
head, tail = os.path.split(filename)
print("working on pic: " + tail)
frame = cv2.imread(filename)
frame = resize(frame, width=400)
result_image = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# compute the bounding box of the face and draw it on the
(bX, bY, bW, bH) = rect_to_bb(rect)
cv2.rectangle(frame, (bX, bY), (bX + bW, bY + bH),
(255, 255, 0), 5)
sub_face = frame[bY:bY + bH, bX:bX + bW]
# apply a gaussian blur on this new recangle image
sub_face = cv2.GaussianBlur(sub_face, (23, 23), 30)
# merge this blurry rectangle to our final image
result_image[bY:bY + sub_face.shape[0], bX:bX + sub_face.shape[1]] = sub_face
print(tail + ': ' + str(time.time() - startTime) + " Seconds \n")
textFile.write(tail + ': ' + str(time.time() - startTime) + " Seconds \n")
path = '/home/pi/Desktop/blur'
cv2.imwrite(os.path.join(path, tail), result_image)
textFile.close()
| true |
ad259aa8a4f84792e07248bf1fb8b5bec2dd2bc8 | Python | williamwang8901/Algorithm-Data-Structure | /LeetCode_Python/Rotate_Array_189.py | UTF-8 | 786 | 3.3125 | 3 | [] | no_license | import pdb
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if k <= 0 or len(nums) == 0:
return
length = len(nums)
k %= length
self.reverse(nums, 0, length - 1)
self.reverse(nums, 0, k - 1)
self.reverse(nums, k, length - 1)
def reverse(self, nums, start, end):
pdb.set_trace()
while start < end:
temp = nums[start]
nums[start] = nums[end]
nums[end] = temp
start += 1
end -= 1
if __name__ == '__main__':
solution = Solution()
nums = [1,2,3]
k = 1
solution.rotate(nums, k)
| true |
a60d81d7b3e9363b61b912d36080e6eec982f705 | Python | buxuele/algo_snippet | /junk/127_word_dragon.py | UTF-8 | 313 | 3.28125 | 3 | [] | no_license | # author: fanchuangwater@gmail.com
# date: 2020/4/12 下午9:59
# 目的:
# beginWord = "hit"
# wordList = ["hot","dot","dog","lot","log","cog"]
#
nums = [1,1,1,1, 1, 2, 2, 2, 2,2,3]
i = 0
while i < len(nums) - 1:
if nums[i] == nums[i+1]:
nums.pop(i)
print(nums)
i += 1
print("in the end :", nums)
| true |
ddeacd1685c8a111708a50dcc1c51a1d0728c9c9 | Python | pipichensir/pytorchtool | /walk.py | UTF-8 | 514 | 3.640625 | 4 | [] | no_license | def walk_modules(module, name="", depth=-1):
"""生成器。根据depth遍历pytorch模块,生成Trace元组"""
child_list = list(module.named_children())
'''
遍历到叶子结点或depth指定的深度时返回当前模块元组;
否则继续向下遍历
'''
if depth == 0 or len(child_list) == 0:
yield (name, module)
else:
for child in child_list:
yield from walk_modules(child[1], child[0] if name=="" else name + "." + child[0], depth - 1) | true |
cc9053d4f541a3e13c981ea6b60ad60a8a1954e3 | Python | cfbanks/data-533-lab4 | /duel_learnspells_test.py | UTF-8 | 1,548 | 2.703125 | 3 | [] | no_license | from potterworld.sub2 import learn_spells
from potterworld.sub2 import duel as dl
import unittest
class TestDuel_LearnSpells(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\nsetUpClass\n")
cls.learn_spells = learn_spells.learn_spells()
def setUp(self):
print('setUp')
self.spell_keys_list = list(self.learn_spells.spells_dict.keys())
def test_learn_spells(self):
self.valid_set = ('l','r','u','d', 'random')
self.assertEqual(self.learn_spells.spells_dict['Reparo'], 'Repair things that are broken.')
self.assertIn(self.learn_spells.cast_a_spell(), self.spell_keys_list)
pattern = self.learn_spells.get_wand_movement_pattern()
print(pattern)
if pattern == 'random':
self.assertEqual(pattern, 'random')
else:
for i in pattern:
self.assertIn(i, self.valid_set)
def test_duel(self):
self.wand_movement_pattern = 'random'
self.assertIn(dl.duel_voldemort(self.wand_movement_pattern)[0], [0,1])
self.assertIn(dl.duel_voldemort(self.wand_movement_pattern)[1], self.spell_keys_list)
self.assertIn(dl.duel_voldemort(self.wand_movement_pattern)[2], self.spell_keys_list)
self.assertEqual(len(dl.duel_voldemort(self.wand_movement_pattern)), 3)
def tearDown(self):
print('tearDown')
self.spell_keys_list = None
@classmethod
def tearDownClass(cls):
print('teardownClass')
cls.learn_spells = None
| true |
61134c6a602710d24fd40304f3d10c9371e41eaa | Python | mehdi1902/natural_computational_tools | /arc_diagram.py | UTF-8 | 1,693 | 3.703125 | 4 | [] | no_license | """
Very simple application for vizualizing the arc diagrams
@ Mehdi Saman Booy
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge, Arc, Circle
import numpy as np
from sys import argv
def _arc(i, j, width=1, linestyle='-', color='black'):
"""
Creating a single arc from i to j
"""
return Arc(((i+j)/2., 0), abs(i-j), abs(i-j), 0, 0, 180, linewidth=width,
edgecolor=color, fill=False, linestyle=linestyle)
def _circle(i, r=.05):
"""
Create a small filled circle with center at (i, 0) and radius r
"""
return Circle((i, 0), r, fill=True, color='black')
def arc_diagram(x, linestyle='-', color='black', width=.5, self_loop='same'):
"""
self_loop (str): 'same' means you are showing self-loop of i with i
'-1' means you are showing self0loop of i with -1
"""
plt.clf()
ax = plt.gca()
plt.plot([0, len(x)-1], [0, 0], color='black', linewidth=.7)
plt.axis('off')
for i in range(len(x)):
j = x[i]
ax.add_patch(_circle(i))
sl_val = -1 if self_loop=='-1' else i
if j != sl_val:
c = _arc(i, j, width=width, linestyle=linestyle, color=color)
ax.add_patch(c)
plt.axis('scaled')
return ax
def phrantheses_to_pairing_list(str, self_loop='same'):
N = len(str)
pairing = [0] * N
stack = []
for (i, s) in enumerate(str):
if s == ')':
j = stack[-1]
stack = stack[:-1]
pairing[i] = j
pairing[j] = i
elif s == '(':
stack.append(i)
else:
sl_val = -1 if self_loop=='-1' else i
pairing[i] = sl_val
return list(pairing)
if __name__ == '__main__':
d1 = '((((....))))'
d2 = '..((((..))))'
for i in [d1, d2]:
plt.figure()
ax = arc_diagram(phrantheses_to_pairing_list(i), width=.8, linestyle='--')
plt.show()
| true |
ab25e617a29b34e14c93a199b4f18ccf2176045c | Python | fm1randa/curso-desenvolvimentoweb-python-django | /aulas/modulos/modulo_math.py | UTF-8 | 170 | 4 | 4 | [] | no_license | import math
print(math.sqrt(5)) #raiz quadrada
print(math.floor(5.9)) #obtem a parte inteira
print(math.ceil(5.1)) #obtem a parte inteira + 1
print(math.factorial(3)) | true |
da52c694eeb06177edc2a7cdc07fccd70365992b | Python | SimaSheibani/Assignments_Northeastern_University | /numbers/triangular_number_list.py | UTF-8 | 655 | 4.28125 | 4 | [] | no_license | def triangular_number():
'''
Takes a number and calculates the triangular of that number
Input: Integer -> Return: Integer
when done printed, It return the list of triangular numbers
'''
number = input("Enter a number, or enter 'done' :")
list_sum_number = []
while (number != 'done'):
n = int(number)
if n > 0:
sum_number = int((n * (n + 1))/2)
print("The triangular number for", n, "is", sum_number)
list_sum_number.append(sum_number)
number = input("Enter a number, or enter 'done'")
print(list_sum_number)
def main():
triangular_number()
main()
| true |
ed4b5e4538b17cd8ed78b1ad69bfbd7e48ae050d | Python | pystatic/pystatic | /pystatic/arg.py | UTF-8 | 6,105 | 2.6875 | 3 | [
"MIT"
] | permissive | import copy
import itertools
from pystatic.error.errorcode import *
if TYPE_CHECKING:
from pystatic.typesys import TypeIns
from pystatic.infer.util import ApplyArgs
class Arg(object):
def __init__(self, name, ann: "TypeIns", default=None, valid=False):
"""
valid: whether this argument has default value
"""
self.name = name
self.ann = ann
self.default = default
self.valid = valid
def __str__(self):
from pystatic.predefined import any_ins
result = self.name
if self.ann != any_ins:
result += ": " + str(self.ann)
if self.valid:
result += " = ..."
return result
class Argument(object):
def __init__(self):
self.posonlyargs: List[Arg] = []
self.args: List[Arg] = []
self.kwonlyargs: List[Arg] = []
# vararg and kwarg's star is not store in the name
self.vararg: Optional[Arg] = None
self.kwarg: Optional[Arg] = None
def get_arg_namelist(self) -> List[str]:
"""Get arguments' name and keep their order"""
res = []
for arg in itertools.chain(self.posonlyargs, self.args):
res.append(arg.name)
if self.vararg:
res.append(self.vararg.name)
for arg in self.kwonlyargs:
res.append(arg.name)
if self.kwarg:
res.append(self.kwarg.name)
return res
def __str__(self):
arg_list = [str(arg) for arg in self.posonlyargs + self.args]
if self.vararg:
arg_list.append("*" + str(self.vararg))
arg_list += [str(arg) for arg in self.kwonlyargs]
if self.kwarg:
arg_list.append("**" + str(self.kwarg))
return "(" + ", ".join(arg_list) + ")"
def copy_argument(argument: Argument):
new_argument = Argument()
new_argument.posonlyargs = copy.copy(argument.posonlyargs)
new_argument.args = copy.copy(argument.args)
new_argument.kwonlyargs = copy.copy(argument.kwonlyargs)
new_argument.vararg = argument.vararg
new_argument.kwarg = argument.kwarg
return new_argument
def match_argument(argument: Argument, applyargs: "ApplyArgs",
callnode: Optional[ast.AST]) -> List[ErrorCode]:
from pystatic.consistent import is_consistent
errorlist = []
missing_args: List[str] = []
too_more_arg = False
def match_arg(arg: Arg, name: str, typeins: "TypeIns", node: ast.AST):
if not is_consistent(arg.ann, typeins):
errorlist.append(IncompatibleArgument(node, name, arg.ann,
typeins))
i_apply_arg = 0 # index of args scanned in applyargs argument list
len_apply_arg = len(applyargs.args)
args = applyargs.args # args part of applyargs argument list
for arg in argument.posonlyargs:
if i_apply_arg >= len_apply_arg:
missing_args.append(arg.name)
else:
# match posonly arguments
match_arg(arg, arg.name, args[i_apply_arg].value,
args[i_apply_arg].node)
i_apply_arg += 1
i_param_arg = 0 # index of args scanned in parameter argument list
len_param_arg = len(argument.args)
param_args = argument.args # args part of parameter argument list
while i_param_arg < len_param_arg:
if i_apply_arg >= len_apply_arg:
break
else:
# match arg
match_arg(
param_args[i_param_arg],
param_args[i_param_arg].name,
args[i_apply_arg].value,
args[i_apply_arg].node,
)
i_apply_arg += 1
i_param_arg += 1
kwargs = applyargs.kwargs
if i_param_arg >= len_param_arg:
# args part of parameter argument list are all matched
if i_apply_arg < len_apply_arg:
# use args in applyargs to match keyword argument of parameter
if not argument.vararg:
if not too_more_arg:
errorlist.append(TooMoreArgument(callnode))
too_more_arg = True
else:
# match *args
while i_apply_arg < len_apply_arg:
match_arg(
argument.vararg,
"*" + argument.vararg.name,
args[i_apply_arg].value,
args[i_apply_arg].node,
)
i_apply_arg += 1
else:
# args part of applyargs are all matched
assert i_apply_arg >= len_apply_arg
while i_param_arg < len_param_arg:
# match keyword argument in applyargs with args of parameter args
cur_argname = param_args[i_param_arg].name
if cur_argname in kwargs:
match_arg(
param_args[i_param_arg],
cur_argname,
kwargs[cur_argname].value,
kwargs[cur_argname].node,
)
kwargs.pop(cur_argname)
else:
missing_args.append(cur_argname)
i_param_arg += 1
for arg in argument.kwonlyargs:
if arg.name in kwargs:
# match kwonlyargs of parameter
target_ins = kwargs[arg.name]
match_arg(arg, arg.name, target_ins.value, target_ins.node)
kwargs.pop(arg.name)
else:
missing_args.append(arg.name)
if len(kwargs):
if argument.kwarg:
for apply_kwarg in kwargs.values():
# match **kwargs
match_arg(
argument.kwarg,
"**" + argument.kwarg.name,
apply_kwarg.value,
apply_kwarg.node,
)
else:
if not too_more_arg:
too_more_arg = True
errorlist.append(TooMoreArgument(callnode))
if missing_args:
errorlist.append(TooFewArgument(callnode, missing_args))
return errorlist
| true |
3e3761e9f3675d08bbfde6eb8e339462fcde006d | Python | m-zakeri/IUSTCompiler | /language_apps/expr2/expr2main.py | UTF-8 | 1,006 | 2.5625 | 3 | [
"MIT"
] | permissive | """
Main script for grammar Expr2
"""
__version__ = '0.1.0'
__author__ = 'Morteza'
from antlr4 import *
from language_apps.expr2.gen.Expr2Lexer import Expr2Lexer
from language_apps.expr2.gen.Expr2Parser import Expr2Parser
from language_apps.expr2.expr2listener import *
# Step 0: Give an input
input_string = 'y = (2 + 5 + (3*6)) * 8 / 2'
# Step 1: Convert input to a byte stream
stream = InputStream(input_string)
# Step 2: Create lexer
lexer = Expr2Lexer(stream)
# Step 3: Create a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create parser
parser = Expr2Parser(token_stream)
# Step 5: Create parse tree
parse_tree = parser.start()
# Step 6: Adding a listener
my_listener = DummyListener()
# my_listener = TreeAddressCode()
#ParseTreeWalker.DEFAULT.walk(t=parse_tree, listener=my_listener)
walker = ParseTreeWalker()
walker.walk(listener=my_listener, t=parse_tree)
# lexer.reset()
# for token in lexer.getAllTokens():
# if token.channel != 1:
# print(token.type)
| true |
7c635e92fc6119bdd0bb38fc1501de528257d504 | Python | JingkaiTang/github-play | /next_life_and_part/way_or_different_hand/get_case_under_old_problem.py | UTF-8 | 202 | 2.609375 | 3 | [] | no_license |
#! /usr/bin/env python
def time(str_arg):
child_and_first_day(str_arg)
print('have_week')
def child_and_first_day(str_arg):
print(str_arg)
if __name__ == '__main__':
time('problem')
| true |
a6e2084a79f221f6da1cb693c24288996b26a5b0 | Python | jdfr/Foreground-detection-for-moving-cameras-with-stochastic-approximation | /UtilBM.py | UTF-8 | 8,746 | 2.5625 | 3 | [] | no_license | import numpy as np
import scipy.signal as ssig
import imageio as i
def readImg(imname):
return np.array(i.imread(imname), dtype=np.float64, order='F')
#FROM ExtractFeatures.m
def ExtractFeatures(VideoFrame,SelectedFeatures):
NumRowsImg=VideoFrame.shape[0]
NumColsImg=VideoFrame.shape[1]
NumFeatures=len(SelectedFeatures)
FeatureFrame=np.zeros((NumRowsImg,NumColsImg,NumFeatures), order='F')
for NdxFeature in range(NumFeatures):
MyFeature=SelectedFeatures[NdxFeature]
if MyFeature>=0 and MyFeature<=2:
# Red, green and blue channels
FeatureFrame[:,:,NdxFeature]=VideoFrame[:,:,MyFeature]/255
elif MyFeature>=3 and MyFeature<=5:
# Normalized red, green and blue channels
SumFrame=VideoFrame.sum(axis=2)
SumFrame[SumFrame==0]=1
FeatureFrame[:,:,NdxFeature]=VideoFrame[:,:,MyFeature-3]/SumFrame
elif MyFeature>=6 and MyFeature<=11:
# Haar-like features considered in Han & Davis (2012)
SumFrame=VideoFrame.sum(axis=2)
MyFilter=HFilter(MyFeature-6)
#equivalences SCIPY <=> MATLAB:
# signal.convolve2d(img, np.rot90(fil), mode='same') <=> imfilter(img, rot90(fil, 2), 0, 'conv')
# signal.correlate2d(img, fil, mode='same') <=> imfilter(img, fil, 0, 'corr')
FeatureFrame[:,:,NdxFeature]=ssig.correlate2d(SumFrame, MyFilter, mode='same')/(3*255*np.abs(MyFilter).sum())
elif MyFeature==12:
# Gradient in the horizontal direction, considered in Han & Davis (2012)
SobelFilter=np.array([[-1,0,1],[-2,0,2],[-1,0,1]], order='F')
SumFrame=VideoFrame.sum(axis=2)
FeatureFrame[:,:,NdxFeature]=ssig.correlate2d(SumFrame, SobelFilter, mode='same')/(3*255*np.abs(SobelFilter).sum())
elif MyFeature==13:
# Gradient in the vertical direction, considered in Han & Davis (2012)
SobelFilter=np.array([[-1,0,1],[-2,0,2],[-1,0,1]], order='F').T
SumFrame=VideoFrame.sum(axis=2)
FeatureFrame[:,:,NdxFeature]=ssig.correlate2d(SumFrame, SobelFilter, mode='same')/(3*255*np.abs(SobelFilter).sum())
elif MyFeature==14:
# Red channel of the current pixel, normalized with the current pixel
# and the pixel immediately to the left of the current one
#ShiftedFrame=circshift(VideoFrame,[1 0 0])
ShiftedFrame=np.roll(VideoFrame,1, axis=0)
SumFrame=np.sum(VideoFrame+ShiftedFrame,axis=2)
SumFrame[SumFrame==0]=1
FeatureFrame[:,:,NdxFeature]=6*VideoFrame[:,:,1]/SumFrame
elif MyFeature==15:
# Green channel of the pixel immediately to the lower right of the
# current one, normalized with the current pixel
ShiftedFrame=np.roll(VideoFrame,(-1,-1), axis=(0,1))
SumFrame=VideoFrame.sum(axis=2)
SumFrame[SumFrame==0]=1
FeatureFrame[:,:,NdxFeature]=3*ShiftedFrame[:,:,1]/SumFrame
elif MyFeature>=16 and MyFeature<=18:
# Red, green and blue channels, median filtered
FeatureFrame[:,:,NdxFeature]=ssig.medfilt(VideoFrame[:,:,MyFeature-16], (5,5))/255
elif MyFeature>=19 and MyFeature<=21:
# Normalized red, green and blue channels, median filtered
SumFrame=VideoFrame.sum(axis=2)
SumFrame[SumFrame==0]=1
NormFrame=VideoFrame[:,:,MyFeature-19]/SumFrame
FeatureFrame[:,:,NdxFeature]=ssig.medfilt2d(32768*NormFrame,(5,5))/32768
elif MyFeature==22:
# Tiny Haar-like feature (I)
MyFilter=np.array([[1,0,1],[1,0,1],[1,0,1]], order='F')
SumFrame=VideoFrame.sum(axis=2)
FeatureFrame[:,:,NdxFeature]=ssig.correlate2d(SumFrame,MyFilter,mode='same')/(3*6*255)
elif MyFeature==23:
# Tiny Haar-like feature (II)
MyFilter=np.array([[1,1,1],[1,0,1],[1,1,1]], order='F')
SumFrame=VideoFrame.sum(axis=2)
FeatureFrame[:,:,NdxFeature]=ssig.correlate2d(SumFrame,MyFilter)/(3*8*255)
return FeatureFrame
#FROM ExtractFeatures.m
# Filters corresponding to the Haar-like features considered in:
# Han, B. and Davis, L.S. (2012). Density-Based Multifeature Background
# Subtraction with Support Vector Machine. IEEE Transactions on Pattern
# Analysis and Machine Intelligence 34(5), 1017-1023.
def HFilter(NdxFilter):
if NdxFilter==0 or NdxFilter==1:
fil = [
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1]]
elif NdxFilter==2:
fil = [
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0, 0, 0, 0, 0.5, 1, 1, 1, 1],
[0, 0, 0, 0, 0.5, 1, 1, 1, 1],
[0, 0, 0, 0, 0.5, 1, 1, 1, 1],
[0, 0, 0, 0, 0.5, 1, 1, 1, 1]]
elif NdxFilter==3 or NdxFilter==4:
fil = [
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0],
[1, 1, 1, 1, 0.5, 0, 0, 0, 0]]
elif NdxFilter==5:
fil = [
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1]]
return np.array(fil, dtype=np.float64, order='F')
def matlab_style_gauss2D(shape=(3,3),sigma=0.5):
m = (shape[0]-1.)/2.
n = (shape[1]-1.)/2.
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
SmoothingFilter = matlab_style_gauss2D((3,3),0.5)
def estimateNoise(model):
# noise = estimateNoise(model)
# This function compute the noise of a sequence from the structure
# previously computed 'model'.
# R.M.Luque and Ezequiel Lopez-Rubio -- February 2011
# The mean of the scene is used as the original frame
MuImage = np.array(np.squeeze(shiftdim(model.Mu,2,1)), dtype=np.float64, order='F')
# The smoothing approach is applied
SmoothFrame=np.zeros(MuImage.shape, order='F')
for idx in range(MuImage.shape[2]):
SmoothFrame[:,:,idx] = ssig.correlate2d(MuImage[:,:,idx],SmoothingFilter, mode='same')
# The difference between the two images is obtained
dif = np.square(MuImage - SmoothFrame)
# A 0.01-winsorized mean is applied instead of the standard mean because
# the first measure is more robust and certain extreme values are removed
dif2 = dif.reshape((dif.shape[0]*dif.shape[1],model.Dimension), order='F')
dif3 = np.sort(dif2,axis=0)
idx = int(np.round(np.max(dif3.shape)*0.99))
for NdxDim in range(model.Dimension):
dif3[idx:,NdxDim] = dif3[idx-2,NdxDim]
noise = np.mean(dif3,axis=0)
return noise
def shiftdim(x, n=None, nargout=2):
outsel = slice(nargout) if nargout > 1 else 0
x = np.asanyarray(x)
s = x.shape
m = next((i for i, v in enumerate(s) if v > 1), 0)
if n is None:
n = m
if n > 0:
n = n % x.ndim
if n > 0:
if n <= m:
x = x.reshape(s[n:])
else:
x = x.transpose(np.roll(range(x.ndim), -n))
elif n < 0:
x = x.reshape((1,)*(-n)+x.shape)
return (x, n)[outsel]
def padarray_replicate_both(x, numPads):
numPads = np.column_stack((numPads, numPads))
padded = np.pad(x, numPads, 'edge')
return padded.copy(order='F')
#c=multiprod(a,b,[1 0] [0 1]) with size(a)==size(b) seems to be equivalent to:
# size(a)==[a1 a2...]
# aa=reshape(a, a1, 1, a2, ...)
# bb = reshape(b, 1, a1, ...)
# c = bsxfun(@times, a, b) =>this step is implicit in numpy expansion of singleton dimensions
def multiprod1001(a,b):
sa = a.shape
sb = b.shape
sa = (sa[0], 1)+sa[1:]
sb = (1,)+sb
return a.reshape(sa)*b.reshape(sb) | true |
d5f114246e15ddc0613c3095d90fcea8f43ef76c | Python | zhimu66/Python-Crypto | /DES_CFB.py | UTF-8 | 1,110 | 3.203125 | 3 | [] | no_license | from Crypto.Cipher import DES
from Crypto import Random
BS = DES.block_size
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[0:-ord(s[-1])]
class DESCipher:
def __init__(self, key):
self.key = key.decode("hex")
def encrypt(self, pt):
pt = pad(pt)
iv = Random.new().read(DES.block_size);
cipher = DES.new(self.key, DES.MODE_CFB, iv)
# Return iv || ct
return (iv+cipher.encrypt(pt)).encode('hex')
def decrypt(self, ct):
ct = ct.decode("hex")
# Extract iv from ciphertext
iv = ct[:BS]
ct = ct[BS:]
cipher = DES.new(self.key, DES.MODE_CFB, iv)
return unpad(cipher.decrypt(ct))
if __name__== "__main__":
key = '012345678'
plaintext = 'abcdefghijkmnopqa';
key = key.encode('hex')
# Truncate hex encoded key to block size, so 2x BS
key=key[:2*BS]
crypto = DESCipher(key)
ciphertext = crypto.encrypt(plaintext)
print "Cipher text - %s" % ciphertext
plaintext = crypto.decrypt(ciphertext)
print "Plain text - %s" % plaintext | true |
75d2f4a7d96213df8ace263dea96563837194806 | Python | linzihan-backforward/PyTorchTransformer | /Transformer/Decoder.py | UTF-8 | 3,403 | 2.578125 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
from MultiHeadAttention import MultiHeadAttention
from PositionalWiseFeedForward import PositionalWiseFeedForward
from PositionalEncoding import PositionalEncoding
def padding_mask(seq_k, seq_q):
"""
:param seq_k: key 序列
:param seq_q: query 序列
:return: Attention 中用到的mask矩阵 size与 Q.dot(K^T) 相同
"""
# seq_k和seq_q的形状都是[B,L]
len_q = seq_q.size(1)
# `PAD` is 0
pad_mask = seq_k.eq(0)
pad_mask = pad_mask.unsqueeze(1).expand(-1, len_q, -1) # shape [B, L_q, L_k]
return pad_mask
def sequence_mask(seq):
"""
:param seq: 要生成mask的Tensor shape=(b,l)
:return: 三维 上三角Tensor shape=(b,l,l)
"""
batch_size, seq_len = seq.size()
mask = torch.triu(torch.ones((seq_len, seq_len), dtype=torch.uint8),
diagonal=1)
mask = mask.unsqueeze(0).expand(batch_size, -1, -1) # [B, L, L]
return mask
class DecoderLayer(nn.Module):
def __init__(self, model_dim, num_heads=8, ffn_dim=2048, dropout=0.0):
super(DecoderLayer, self).__init__()
self.attention = MultiHeadAttention(model_dim, num_heads, dropout)
self.feed_forward = PositionalWiseFeedForward(model_dim, ffn_dim, dropout)
def forward(self,
dec_inputs,
enc_outputs,
self_attn_mask=None,
context_attn_mask=None):
# self attention, all inputs are decoder inputs
dec_output, self_attention = self.attention(
dec_inputs, dec_inputs, dec_inputs, self_attn_mask)
# context attention
# query is decoder's outputs, key and value are encoder's inputs
dec_output, context_attention = self.attention(
enc_outputs, enc_outputs, dec_output, context_attn_mask)
# decoder's output, or context
dec_output = self.feed_forward(dec_output)
return dec_output, self_attention, context_attention
class Decoder(nn.Module):
def __init__(self,
vocab_size,
max_seq_len,
num_layers=6,
model_dim=512,
num_heads=8,
ffn_dim=2048,
dropout=0.0):
super(Decoder, self).__init__()
self.num_layers = num_layers
self.decoder_layers = nn.ModuleList(
[DecoderLayer(model_dim, num_heads, ffn_dim, dropout) for _ in
range(num_layers)])
self.seq_embedding = nn.Embedding(vocab_size + 1, model_dim, padding_idx=0)
self.pos_embedding = PositionalEncoding(model_dim, max_seq_len)
def forward(self, inputs, inputs_len, enc_output, context_attn_mask):
inputs = inputs.type(torch.LongTensor)
output = self.seq_embedding(inputs)
output += self.pos_embedding(inputs_len)
self_attention_padding_mask = padding_mask(inputs, inputs)
seq_mask = sequence_mask(inputs)
self_attn_mask = torch.gt((self_attention_padding_mask + seq_mask), 0)
self_attentions = []
context_attentions = []
for decoder in self.decoder_layers:
output, self_attn, context_attn = decoder(
output, enc_output, self_attn_mask, context_attn_mask)
self_attentions.append(self_attn)
context_attentions.append(context_attn)
return output, self_attentions, context_attentions
| true |
21d1a3a7f344a8bbdf11f8860badf6bba10d53dc | Python | Shekharrajak/competitve-programming | /spojPython/TRT.py | UTF-8 | 604 | 3.015625 | 3 | [] | no_license | def getMaxMoney():
n = int(input())
a = []
for i in range(n):
a.append(int(input()))
done = [False for i in range(n)]
ans = 0
start = 0
end = n - 1
# print(("n => {}, a => {}, done => {}").format(n, a, done))
for i, v in enumerate(a):
if a[start] < a[end] and done[start] == False and start < n:
done[start] = True
ans += a[start]*(i+1)
start = start + 1
else:
done[end] = True
ans += a[end]*(i+1)
end = end - 1
print(ans)
if __name__ == '__main__':
getMaxMoney()
| true |
a7560ff9c520f8829aa352473b3fb2e98b4527db | Python | sebastianceloch/wd_io | /lab5/zadanie2.py | UTF-8 | 196 | 3.453125 | 3 | [] | no_license | class Kwadrat():
def __init__(self, x):
self.x = x
self.y = x
def __add__(self, kwadrat):
return self.x + kwadrat.x
kw = Kwadrat(5)
kw1 = Kwadrat(6)
print(kw+kw1) | true |
27370914284271dce19b72d6f8870789e56c8c89 | Python | ajha17/wikidetox | /conversation_reconstruction_local_pipeline/test_utils/query.py | UTF-8 | 1,971 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | import requests
import json
import os
def query_with_end(title, end):
request = {}
request['action'] = 'query'
request['format'] = 'json'
request['prop'] = 'revisions'
request['titles'] = title
request['rvprop'] = 'ids|timestamp|user|content|userid|sha1'
request['rvlimit'] = 'max'
request['rvstartid'] = end
revs = []
lastContinue = {}
while True:
# Clone original request
req = request.copy()
# Modify it with the values returned in the 'continue' section of the last result.
# Call API
result = requests.get('http://en.wikipedia.org/w/api.php', params=req).json()
if 'error' in result:
raise Error(result['error'])
if 'warnings' in result:
print(result['warnings'])
if 'query' in result:
p = list(result['query']['pages'].keys())[0]
return result['query']['pages'][p]['revisions'], p, result['query']['pages'][p]['ns']
if 'rvcontinue' not in result:
break
request['rvcontinue'] = result['continue']
def get_revisions(title, end = '773232766', output_dir = 'json_dumps'):
ret = []
page_id = 0
while True:
ans, page_id, ns = query_with_end(title, end)
if ret == []:
ret = ans
else:
for x in ans[1:]:
ret.append(x)
if not(len(ans) < 50):
end = ans[-1]['revid']
else:
break
revlist = []
ret = ret[::-1]
for x in ret:
x['text'] = x['*']
x['page_id'] = page_id
x['page_title'] = title
x['user_id'] = x['userid']
x['user_text'] = x['user']
x['rev_id'] = x['revid']
del x['*']
del x['user']
del x['userid']
del x['revid']
filename = '%s_%s_%s.json' % (page_id, title, ns)
with open(os.path.join(output_dir, filename), 'w') as w:
json.dump(ret, w)
return ns, ret
| true |
fdfde4d0cae5460b2b8e236f7f6e5071e9902cd5 | Python | daimessdn/py-incubator | /exercise list (py)/praktikum/latprak2/beasiswa.py | UTF-8 | 375 | 3.265625 | 3 | [] | no_license | # 12217070
# Dimas Wihandono
# 3 September 2018
# Latihan Praktikkum: Beasiswa
# Menampilkan kategori beasiswa dari faktor IP dan pendapatan orang tua
# KAMUS
# ip, pot = float
# ALGORITMA
ip = float(input(""))
pot = float(input(""))
if (ip >= 3.5):
print (4)
else:
if (pot < 1):
print (1)
elif (pot < 5):
if (ip >= 2):
print (3)
else:
print (2)
else:
print (0)
| true |
56647bf27790caa6e4cd2a0ef70f6b82713324da | Python | hrishikesh38/hrishi-choco | /Untitled.py | UTF-8 | 962 | 3 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
# In[3]:
df = pd.read_csv('example.txt')
df
# In[6]:
df = pd.read_csv('example.csv')
df
# In[7]:
import pandas as pd
import numpy as np
# In[8]:
from numpy.random import randn
np.random.seed(101)
# In[11]:
df = pd.DataFrame(randn(5,4),index='A B C D E'.split(),columns='W X Y Z'.split())
# In[12]:
df
# In[13]:
df['W']
# In[15]:
df[['W','Z']]
# In[16]:
df['NEW'] = df['W'] + df['Y']
# In[17]:
df
# In[26]:
df=df.drop('NEW',axis=1)
# In[19]:
df.loc['B','Y']
# In[20]:
df.loc[['A','B'],['W','Y']]
# In[21]:
df>0
# In[22]:
df<0
# In[27]:
df[df<0]
# In[2]:
import numpy as np
import pandas as pd
# In[3]:
labels = ['a','b','c']
my_list = [10,20,30]
arr = np.array([10,20,30])
d = {'a':10,'b':20,'c':30}
# In[4]:
pd.Series(data=my_list)
# In[5]:
pd.Series(data=my_list,index=labels)
# In[ ]:
| true |
7b2a1e87c11b371bb831c4229671e0e2a5216e57 | Python | nsshayan/Python | /Learning/Network_process_WA/Day1/2020_Jul23/run_test_py3.py | UTF-8 | 195 | 2.65625 | 3 | [] | no_license | from subprocess import run, CalledProcessError
try:
ret = run(["ls", "/bin"], check=True)
except CalledProcessError as e:
print("*** Caught exception:", e)
else:
print("ret =", ret)
| true |
1d62dc3c5a927f4c3b47cf77b34f529239fc8604 | Python | milanmenezes/python-tutorials | /solutions/unit1/sumofn.py | UTF-8 | 159 | 3.8125 | 4 | [] | no_license | def sum(x):
if(x==0):
return 0
return x+sum(x-1)
x=eval(raw_input("Enter a number\n"))
print "The sum to "+str(x)+ " natural numbers is: "+str(sum(x)) | true |
e7e46bc47293282bfdfd83614b300fbb52ce93f3 | Python | semapu/DateParse | /xor.py | UTF-8 | 1,497 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 10:46:57 2017
@author: CTTC
"""
#Librerias a importar
import numpy as np
#keras permite dos APIs --> functional/sequential
from keras.models import Sequential
#keras afrece muchos tipos de capas. En nuetro caso DENSE
from keras.layers.core import Dense
#input
data_input = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
#output
data_output = np.array([[0],[1],[1],[0]], "float32")
#inicialización del modelo
model = Sequential()
#hidden layer
model.add(Dense(16, input_dim=2, activation='relu'))
#output layer sin especificar la dimensión de la entrada
model.add(Dense(1, activation='sigmoid'))
#proceso de aprendizaje
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['binary_accuracy'])
#entrenamiento
model.fit(data_input, data_output, nb_epoch=800040, verbose=2)
#evaluamos el modelo
loss_and_metrics = model.evaluate(data_input, data_output)
#función nativa de predict
print (model.predict(data_input).round()) #En la realidad seria el test-set no del trainning
#obtención de los pesos
for layer in model.layers:
weights = layer.get_weights() # list of numpy arrays
#print(weights)
weights1 = np.array(model.layers[0].get_weights())
print("\nPesos entre la INPUT y la HIDDEN layer")
print(weights1)
weights2 = np.array(model.layers[1].get_weights())
print("\nPesos entre la HIDDEN y la OPUTPUT layer")
print(weights2)
| true |
4a819d36ff732504476256a92e0405b7ab375b8e | Python | Mnenmenth/Python | /Main.py | UTF-8 | 2,285 | 3.0625 | 3 | [] | no_license | import pygame
from Python import Python
from Food import Food
pygame.init()
pygame.display.set_caption('Python')
screen_width, screen_height = (640, 480)
screen = pygame.display.set_mode((640, 480))
python = Python((200, 200))
food = Food()
pygame.font.init()
game_over_font = pygame.font.SysFont(pygame.font.get_default_font(), 30)
game_over_text = game_over_font.render('GAME OVER', 16, (255, 255, 0))
max_score = 50
score = 0
score_font = pygame.font.SysFont(pygame.font.get_default_font(), 20)
score_text = score_font.render("Score: 0/{0}".format(max_score), 16, (255, 255, 0))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
if event.key != pygame.K_ESCAPE:
python.move_python(event.key)
else:
pygame.quit()
quit()
if score >= max_score:
game_over_text = game_over_font.render("YOU WIN!", 16, (255, 255, 0))
pygame.display.get_surface().blit(game_over_text,
game_over_text.get_rect(center=(screen_width/2, screen_height/2)))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
quit()
pygame.display.update()
screen.fill((0, 0, 0))
food.draw()
if not python.draw():
pygame.display.get_surface().blit(game_over_text,
game_over_text.get_rect(center=(screen_width/2, screen_height/2)))
if food.is_eaten(python.head_rect()):
python.add_tail_segment()
score += 1
score_text = score_font.render("Score: {0}/{1}".format(score, max_score), False, (255, 255, 0))
food = Food()
pygame.display.get_surface().blit(score_text, (screen_width - score_text.get_width() - 50,
screen_height - score_text.get_height() - 10))
pygame.display.update()
pygame.time.delay(250)
| true |
a6f16c1c558380571bc5abbf8669dba01c228229 | Python | nemanjatesic/Masinsko | /ml_d1_rn1-17_y_z/4.py | UTF-8 | 13,219 | 3 | 3 | [] | no_license | import os
import pandas as pd
import re
import numpy as np
import random
from nltk.tokenize import wordpunct_tokenize
from nltk.stem import PorterStemmer
import sys
class MultinomialNaiveBayes:
def __init__(self, nb_classes, nb_words, pseudocount):
self.nb_classes = nb_classes
self.nb_words = nb_words
self.pseudocount = pseudocount
self.like = np.zeros((self.nb_classes, self.nb_words))
self.occurrences = np.zeros((self.nb_classes, self.nb_words))
self.numberOfFeatures = 0
self.numberOfPositive = 0
self.numberOfNegative = 0
def add_feature_vector_tmp(self, feature_vector, classPN):
self.numberOfFeatures += 1
if classPN == 1:
self.numberOfPositive += 1
else:
self.numberOfNegative += 1
for index, value in feature_vector:
if index != -1:
self.occurrences[classPN][index] += value
def fit(self):
# Racunamo P(Klasa) - priors
# np.bincount nam za datu listu vraca broj pojavljivanja svakog celog
# broja u intervalu [0, maksimalni broj u listi]-
self.priors = np.asarray([self.numberOfNegative/self.numberOfFeatures, self.numberOfPositive/self.numberOfFeatures])
print('Priors:')
print(self.priors)
# Racunamo P(Rec_i|Klasa) - likelihoods
for c in range(self.nb_classes):
for w in range(self.nb_words):
up = self.occurrences[c][w] + self.pseudocount
down = np.sum(self.occurrences[c]) + self.nb_words * self.pseudocount
self.like[c][w] = up / down
print('Finished fitting')
def predict(self, bow):
# Racunamo P(Klasa|bow) za svaku klasu
probs = np.zeros(self.nb_classes)
for c in range(self.nb_classes):
prob = np.log(self.priors[c])
for w in range(self.nb_words):
cnt = bow[w]
prob += cnt * np.log(self.like[c][w])
probs[c] = prob
# Trazimo klasu sa najvecom verovatnocom
prediction = np.argmax(probs)
return prediction
def predict_tmp(self, bow):
# Racunamo P(Klasa|bow) za svaku klasuu
probs = np.zeros(self.nb_classes)
for c in range(self.nb_classes):
prob = np.log(self.priors[c])
for index, value in bow:
# Moze da se desi da se neka rec pojavila manje od 10 puta i da smo je
# izbacili za takve reci saljemo -1 i ne ubacujemo ih
if index != -1:
prob += value * np.log(self.like[c][index])
probs[c] = prob
# Trazimo klasu sa najvecom verovatnocom
prediction = np.argmax(probs)
return prediction
def best_tweets(self, vocabulary, amount=5):
# Napravi dve liste pozitivnih i negativnih reci i njihovih ponavljanja u occurrences matrici
# sortiramo po broju ponavljanja i uzmemo 5 najboljih
list_negatives = []
for i in range(len(self.occurrences[0])):
list_negatives.append((vocabulary[i], self.occurrences[0][i]))
list_positives = []
for i in range(len(self.occurrences[1])):
list_positives.append((vocabulary[i], self.occurrences[1][i]))
list_negatives.sort(key=lambda x: x[1], reverse=True)
list_positives.sort(key=lambda x: x[1], reverse=True)
list_out = []
tmp_neg = []
tmp_pos = []
for i in range(amount):
# Da vrati samo string dodati [0] na kraj
tmp_neg.append(list_negatives[i])
tmp_pos.append(list_positives[i])
list_out.append(tmp_neg)
list_out.append(tmp_pos)
return list_out
def best_lr_tweets(self, vocabulary, amount=5):
# Slicno kao best_tweets samo sto umesto da vidimo koliko puta se sta desilo i stavimo to
# kao drugi element samo stavimo LR(rec) = countPos(rec)/countNeg(rec) i sortiramo po tome
# i uzimamo 5 najboljih i 5 najgorih
list_all = []
for i in range(len(self.occurrences[0])):
if self.occurrences[0][i] >= 10 and self.occurrences[1][i] >= 10:
list_all.append((vocabulary[i], self.occurrences[1][i]/self.occurrences[0][i]))
list_all.sort(key=lambda x: x[1], reverse=True)
list_out = []
best_tmp = []
for i in range(amount):
best_tmp.append(list_all[i])
list_out.append(best_tmp)
list_all.reverse()
worst_tmp = []
for i in range(amount):
worst_tmp.append(list_all[i])
list_out.append(worst_tmp)
return list_out
def print_progress_bar(i, max, postText):
n_bar = 10
j = i/max
sys.stdout.write('\r')
sys.stdout.write(f"[{'=' * int(n_bar * j):{n_bar}s}] {int(100 * j)}% {postText}")
sys.stdout.flush()
def remove_mentions(obj):
return re.sub(r'@\w+', '', obj)
def remove_links(obj):
return re.sub(r'http.?://[^\s]+[\s]?', '', obj)
def remove_hashtag(obj):
return re.sub(r'#([^\s]+)', r'\1', obj)
def remove_symbols(obj):
return re.sub(r'[^a-zA-Z\s]', '', obj)
def too_many_chars(obj):
# return re.sub(r'(\w)\1{2,}', r'\1', obj)
return re.sub(r'(.)\1+', r'\1', obj)
def numocc_score(word, doc):
return 1 if word in doc else 0
# return doc.count(word)
def create_random_indexes(maximum):
random_index = []
for i in range(maximum):
random_index.append(i)
random.shuffle(random_index)
return random_index
def load_data(num_of_rows):
dir_path = os.path.dirname(os.path.realpath(__file__))
fileName = dir_path + os.sep + 'data' + os.sep + 'twitter.csv'
data = dict()
print('Loading file...')
data['y'] = pd.read_csv(fileName, sep=',', usecols=[1], nrows=num_of_rows).T.values.tolist()[0]
data['x'] = pd.read_csv(fileName, usecols=[2], nrows=num_of_rows, encoding="ISO-8859-1").T.values.tolist()[0]
return data
random.seed(7465633)
# Broj twitova koji zelimo da ucitamo
max = 100000
# Broj redova koji zelimo da ucitamo iz fajla
num_of_rows = max
if num_of_rows > 90000:
# None ucitava sve automatski
num_of_rows = None
data = load_data(num_of_rows)
corpus = data['x']
forbidden = ['and', 'to', 'have', 'get', 'now', 'thi', 'oh', 'got', 'am', 'he', 'back', 'lt', 'gt', 'quot', 'amp',
'ned', 'so', 'at', 'it', 'my', 'that', 'is', 'in', 'have', 'me', 'im', 'so', 'be', 'out', 'wa', '']
stop_punc = set(forbidden)
# Mapirati svaku rec na njen broj pojavljivanja
dictonary = dict()
clean_corpus = []
porter = PorterStemmer()
cnt = 0
current_index = -1
# Filtriranje tvitova
print('Cleaning the corpus...')
for doc in corpus:
if cnt % (max//100) == 0:
print_progress_bar(cnt, max, 'cleaned')
cnt += 1
current_index += 1
doc = remove_mentions(doc)
doc = remove_links(doc)
doc = remove_hashtag(doc)
doc = remove_symbols(doc)
words = wordpunct_tokenize(doc)
words_filtered = [w.lower() for w in words]
words_filtered = [remove_hashtag(w) for w in words_filtered]
words_filtered = [w for w in words_filtered if w not in stop_punc]
words_filtered = [w for w in words_filtered if w.isalpha()]
words_filtered = [too_many_chars(w) for w in words_filtered]
words_filtered = [porter.stem(w) for w in words_filtered]
words_filtered = [w for w in words_filtered if w not in stop_punc]
# Ubacivanje u mapu koliko se svaka rec pojavila
for word in words_filtered:
key = dictonary.get(word, 0)
dictonary[word] = key + 1
# Proverite da li postoji najmanje jedna reč u filtriranim recima
if len(words_filtered) > 0:
clean_corpus.append(words_filtered)
else:
# Ako smo sve filtrirali, ne zelimo da ga dodamo i te podatke moramo popovati iz data['y']
data['y'].pop(current_index)
current_index -= 1
if cnt == max:
break
print_progress_bar(max, max, 'cleaned')
# Moramo da promenimo max u slucaju da smo izbacili neke twitove
max = len(clean_corpus)
print('\nCreating the vocab...')
vocab_set = set()
for doc in clean_corpus:
for word in doc:
vocab_set.add(word)
vocab = list(vocab_set)
# MORA DA STOJI !!! JER SE UVEK DRUGACIJE UBACUJE U SET
vocab.sort()
# Ako je broj razlicitih reci vec od 10000 zelimo da ih svedemo na najboljih 10000
if len(vocab) > 10000:
list_of_all_words = []
# Ubacujemo u listu tuplova (rec, broj_pojavljivanja)
for word in vocab:
key = dictonary.get(word, 0)
list_of_all_words.append((word, key))
# Sortiramo po broju pojavljivanja
list_of_all_words.sort(key=lambda x: x[1], reverse=True)
out_list = []
# Zelimo da uzmemo samo one reci koje se pojavljuju makar 10 puta u svim twitovima
for i in range(10000):
if list_of_all_words[i][1] >= 10:
out_list.append(list_of_all_words[i][0])
vocab = out_list
print('Feature vector size: ', len(vocab))
model = MultinomialNaiveBayes(nb_classes=2, nb_words=len(vocab), pseudocount=1)
# Random indeksi da ne bi morali data da mesamo
random_index = create_random_indexes(max)
# Bag of Words model
print('Creating BOW features...')
feature_vector_size = len(vocab)
# Mapiranje svake reci na njen indeks u vokabularu
vocab_dic = dict()
for i in range(feature_vector_size):
vocab_dic[vocab[i]] = i
# Uzimamo i pravo feature vektore od 80% podataka za treniranje
for i in range(int(max*0.8)):
# Uzimamo jedan twit iz corpusa
doc_idx = random_index[i]
doc = clean_corpus[doc_idx]
doc_set = set()
for word in doc:
doc_set.add(word)
# Umesto da pravimo feature vector velicine vokabulara pravimo samo niz tuplova
# gde je prva stvar indeks na kom se nalazi u vokabularu a druga stvar je koliko
# puta se nalazi u tom twitu
# prakticno kompresija niza [0,0,0,1,0,0,5,0,0,0,2] na [(3,1),(6,5),(10,2)]
# posto tvitovi generalno nece imati previse reci ovo je ogromna optimizacija jer
# ne moramo vise da imamo za svaki feature vector po 10000 elemenata vec mozemo da
# imamo po 10-ak po twitu
new_feature_vector = []
for word in doc_set:
number_of_occ = numocc_score(word, doc)
new_feature_vector.append((vocab_dic.get(word, -1), number_of_occ))
# Modelu saljemo feature vector i da li je twit bio pozitivan ili ne to je deo data['y'][doc_idx]
model.add_feature_vector_tmp(new_feature_vector, data['y'][doc_idx])
# Kada zavrsimo sa ubacivanjem svih feature vectora fitujemo
model.fit()
brojTacnih = 0
class_names = ['Negative', 'Positive']
confusion_matrix = []
true_negatives = 0
true_positives = 0
false_negative = 0
false_positives = 0
print('Checking for test set...')
# Uzimamo i pravo feature vektore od 20% podataka za testiranje
for i in range(int(max*0.8), max):
# Ista stvar kao i gore
doc_idx = random_index[i]
doc = clean_corpus[doc_idx]
doc_set = set()
for word in doc:
doc_set.add(word)
new_feature_vector = []
for word in doc_set:
number_of_occ = numocc_score(word, doc)
new_feature_vector.append((vocab_dic.get(word, -1), number_of_occ))
# Predictujemo da li je postivan ili negativan twit
prediction = model.predict_tmp(new_feature_vector)
if class_names[prediction] == 'Positive':
if data['y'][doc_idx] == 1:
true_positives += 1
brojTacnih += 1
else:
false_positives += 1
if class_names[prediction] == 'Negative':
if data['y'][doc_idx] == 0:
true_negatives += 1
brojTacnih += 1
else:
false_negative += 1
confusion_matrix.append([true_negatives, false_positives])
confusion_matrix.append([false_negative, true_positives])
print('Confusion matrix [[TN,FP],[FN,TP]]\n', confusion_matrix)
print('Proecenat tacnosti : ', (true_positives+true_negatives) / (true_positives+true_negatives+false_negative+false_positives) * 100)
# negative = [('i', 15900.0), ('the', 8377.0), ('a', 6364.0), ('you', 6064.0), ('but', 3971.0)]
# positive = [('i', 13712.0), ('you', 11874.0), ('the', 11238.0), ('a', 9189.0), ('for', 5839.0)]
print(model.best_tweets(vocab, amount=5))
# negative = [('sad', 0.067), ('sadli', 0.075), ('por', 0.125), ('upset', 0.137), ('depres', 0.14)]
# positive = [('folowfriday', 20.43), ('vip', 13.083), ('welcom', 13.0), ('recomend', 10.54), ('congrat', 8.64)]
print(model.best_lr_tweets(vocab, amount=5))
# LR metrika kaze da ako je dobijeni broj preko 1 to znaci da se ta rec vise pojavljuje u pozitivnim tvitovima
# a ako je broj manji od 1 to znaci da se vise pojavljuje u negativnim sto je broj veci ili manji to je drasticnija razlika
# za LR == 20, to znaci da se rec pojavlju 20 puta vise u pozitivnim tvitovima nego u negativnim
# za LR == 0.067 (1/0.067) to znaci da se rec pojavljuje skoro 15 puta vise u negativnim nego pozitivnim
# Ovih 10 dobijenih reci iz LR-a ne moraju nuzno da budu i u prvom skupu, one samo oznacavaju koliko puta se vise pojavljuju
# u nekoj klasi, ali i dalje postoje reci koje se pojavljuju u obe klase mnogo puta i koje imaju LR metriku obicno izmedju 0.5 - 1.5
print('Done.')
| true |
673980962211a4cc84cd07fe4d0e8239c341b37f | Python | jaynedu/digital-image-processing | /canny.py | UTF-8 | 4,462 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2020/6/24 15:04
# @Author : Du Jing
# @FileName: canny.py
# @Usage : Canny
import cv2
import numpy as np
import matplotlib.pyplot as plt
"""
1.原始图像与高斯核卷积,获得稍模糊的图像,目的是降噪,因为导数对噪声敏感
2.使用一阶偏导算子sobel计算梯度
3.非极大值抑制,寻找像素点局部的最大值,目的是排除非边缘像素
4.双阈值法抑制假边缘,连接真边缘
低于阈值1的像素点会被认为不是边缘;
高于阈值2的像素点会被认为是边缘;
在阈值1和阈值2之间的像素点,若与第2步得到的边缘像素点相邻,则被认为是边缘,否则被认为不是边缘。
"""
img = cv2.imread('src/lena.jpg')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.float32)
"""Step 1 Gaussian"""
from image_filter import gauss_kernel, image_convolution
kernel_size = 5
kernel_gauss = gauss_kernel(kernel_size, 1.5)
gaussian = image_convolution(img_gray, kernel_gauss)
fft_gaussian = np.fft.fft2(gaussian)
kernel_gauss_pad = np.pad(kernel_gauss, [[0, img_gray.shape[0]-kernel_size], [0, img_gray.shape[1]-kernel_size]])
fft_kernel = np.fft.fft2(kernel_gauss_pad)
fft_img_gray = np.fft.fft2(img_gray)
fft_gaussian = fft_img_gray * fft_kernel
ifft_gaussian_show = cv2.convertScaleAbs(abs(np.fft.ifft2(fft_gaussian)))
"""Step 2 Gradient"""
op_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
op_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
kernel_sobel_x = np.zeros_like(fft_gaussian, dtype=np.float32)
kernel_sobel_x[:op_x.shape[0], :op_x.shape[1]] = op_x
fft_kernel_sobel_x = np.fft.fft2(kernel_sobel_x)
kernel_sobel_y = np.zeros_like(fft_gaussian, dtype=np.float32)
kernel_sobel_y[:op_y.shape[0], :op_y.shape[1]] = op_y
fft_kernel_sobel_y = np.fft.fft2(kernel_sobel_y)
Gx = np.real(np.fft.ifft2(fft_kernel_sobel_x * fft_gaussian))
Gy = np.real(np.fft.ifft2(fft_kernel_sobel_y * fft_gaussian))
G = (Gx ** 2 + Gy ** 2) ** 0.5
Theta = np.arctan2(Gy, Gx) * 180 / np.pi
"""Step 3 Non-Max Suppression"""
local_maximum = np.zeros_like(G, dtype=np.float32)
h, w = G.shape[:2]
angle = abs(Theta)
for i in range(1, h-1):
for j in range(1, w-1):
# 0 degrees
if (0<=angle[i, j]<22.5) or (157.5<=angle[i, j]<=180) :
if (G[i, j] >= G[i, j+1]) and (G[i, j] >= G[i, j-1]):
local_maximum[i, j] = G[i, j]
# 45 degrees
elif (22.5<=angle[i, j]<67.5):
if (G[i, j] >= G[i-1, j+1]) and (G[i, j] >= G[i+1, j-1]):
local_maximum[i, j] = G[i, j]
# 90 degrees
elif (67.5<=angle[i, j]<112.5):
if (G[i, j] >= G[i-1, j]) and (G[i, j] >= G[i+1, j]):
local_maximum[i, j] = G[i, j]
# 135 degrees
elif (112.5<=angle[i,j]<157.5):
if (G[i, j] >= G[i-1, j-1]) and (G[i, j] >= G[i+1, j+1]):
local_maximum[i, j] = G[i, j]
"""Step 4 Double Thresholding"""
threshold = np.zeros_like(local_maximum, np.float32)
strong = 1
weak = 0.5
maximum = np.max(local_maximum)
print("maximum:", maximum)
low = 0.1 * maximum
high = 0.25 * maximum
h, w = local_maximum.shape[:2]
for i in range(h):
for j in range(w):
if local_maximum[i, j] >= high:
threshold[i, j] = strong
elif local_maximum[i, j] >= low:
threshold[i, j] = weak
"""Step 5 Tracking"""
h, w = threshold.shape[:2]
for i in range(h):
for j in range(w):
if threshold[i, j] == weak:
try:
if threshold[i, j+1] == strong or \
threshold[i, j-1] == strong or \
threshold[i+1, j] == strong or \
threshold[i-1, j] == strong or \
threshold[i+1, j+1] == strong or \
threshold[i+1, j-1] == strong or \
threshold[i-1, j+1] == strong or \
threshold[i-1, j-1] == strong:
threshold[i, j] = strong
else:
threshold[i, j] = 0
except IndexError:
pass
threshold *= 255
edge = cv2.convertScaleAbs(threshold)
show_list = [img_gray, gaussian, ifft_gaussian_show, edge]
title_list = ['raw gray', 'gaussian', 'ifft_gaussian', 'edge']
plt.figure(figsize=(12, 3))
nrow, ncol = 1, 4
for i, img in enumerate(show_list):
plt.subplot(nrow, ncol, i+1)
plt.imshow(img, 'gray')
plt.title(title_list[i])
plt.show()
| true |
7d834eaec80c48506cbff69fa06d099e33d42b6c | Python | rpplayground/CS814 | /practical1_miu/miu_breadth_first_search.py | UTF-8 | 1,737 | 3.484375 | 3 | [] | no_license | # University of Strathclyde - MSc Artificial Intelligence and Applications
# CS814 - Artificial Intelligence for Autonomous Systems
# Assignment 1 - Part 2 - MUI Next States Function
# File Created - 15th October 2019 - Barry Smart
#
# ABOUT:
# This file contains the function that...
# We will use the next_states function, so let's import that:
from miu_extend_path import extend_path
def breadth_first_search(goal_string):
# TODO - check that goal string contains only the letters M, I or U.
# Initialise the agenda and visited list
agenda = [["MI"]]
visited_list = []
# Initialise the counter for calls to next_states
extend_path_counter = 0
# Maximum agenda size
maximum_agenda_length = 0
while True:
# Pop the first path from the agenda
current_path = agenda.pop(0)
# Track the maximum length of agenda encountered
agenda_length = len(agenda)
if agenda_length > maximum_agenda_length:
maximum_agenda_length = agenda_length
# Extract the last state from the current path
last_state = current_path[-1]
# Compare it with the goal state
if last_state == goal_string:
# If it is equal break from the while loop
break
else:
# Else we need call the extend_path function
new_paths, visited_list = extend_path(current_path, visited_list)
# Then add these paths to the END of the agenda
agenda = agenda + new_paths
# Increment the extend_paths counter by 1
extend_path_counter = extend_path_counter + 1
goal_path = current_path
return goal_path, extend_path_counter, agenda_length, maximum_agenda_length | true |
a8f54eeca445b85ce27e9ff2beb944dd98fb2ad7 | Python | tielushko/The-Modern-Python-3-Bootcamp | /Section 34 - Regular Expressions/substitute_regex.py | UTF-8 | 291 | 3.3125 | 3 | [] | no_license | import re
text = "Last night Mrs. Daisy and Mr. White murdered Ms. Chow"
pattern = re.compile(r'(Mr\.|Mrs\.|Ms\.)([A-Za-z]) ([a-z])+', re.IGNORECASE)
#1st arg - the string you want to work as a sub, and second the sting in which matches occured
print(pattern.sub("\g<2>\g<1>", text))
| true |
c6b21447acd80b34864984c122a27205fa66ebb3 | Python | aevear/Stonktastic | /src/stonktastic/optimization/optimizeRanFor.py | UTF-8 | 5,469 | 2.890625 | 3 | [
"MIT"
] | permissive | """
.. module:: optimizeRanFor
:synopsis: Preforms optimization scenarios for Random Forest and reports on best configuration options
"""
import itertools
import time
import pandas as pd
from stonktastic.config.config import ranForEstimators, ranForVariables
from stonktastic.machinelearning.prepDataSets import prepareRanForData
from stonktastic.machinelearning.ranForest import runRandomForest
class ranForOptResultClass:
"""
Class for holding processed values for Jupyter notebook analysis
Values:
optSubSet (list): List of best indicators to use for Random Forest in terms of accuracy/resource cost
subDf (dataframe): Dataframe with all subset/time/score values for graphing
optEstimators (int): Most optimum number of trees to use in random forest calculations
estimatorsDf (dataframe): Dataframe with all estimator amounts as well as the time it took to process and the accuracy of those predictions
"""
def __init__(
self,
optSubSet=[""],
subDf=pd.DataFrame(),
optEstimators=100,
estimatorsDf=pd.DataFrame(),
):
self.optSubSet = optSubSet
self.subDf = subDf
self.optEstimators = optEstimators
self.estimatorsDf = estimatorsDf
def ranForVariableOpt(stonk):
"""
Generates a random forest model and test the model with varying indicators of values and ranks them based on accuracy/time.
Args:
stonk (str): stock ticker that will be used for optimization
:returns:
list: Top list of indicators in terms of time/accuracy for Random Forest using the stock ticker provided
dataframe: complete dataframe for stock ticker with all subsets, times and scores
"""
ranForVariables = [
"SAR",
"RSI",
"CCI",
"MACDHist",
"BBUpperBand",
"BBMiddleBand",
"BBLowerBand",
"EMA",
"Chaikin",
"StochK",
"StochD",
"WILLR",
]
combinationOfColumnValues = []
for k in range(0, len(ranForVariables) + 1):
for subset in itertools.combinations(ranForVariables, k):
subset = subset + (("Close", "date"))
if len(subset) > 4:
combinationOfColumnValues.append(subset)
resultsList = []
for subset in combinationOfColumnValues:
startTime = time.time()
xValueList, yValueList, date = prepareRanForData(stonk, subset)
_, results = runRandomForest(xValueList, yValueList, date, ranForEstimators)
timeToRun = time.time() - startTime
resultsList.append(
[
subset,
results,
str(timeToRun),
str(results / timeToRun),
int(len(subset)),
]
)
df = pd.DataFrame(
resultsList, columns=["subset", "results", "time", "score", "numOfVariables"]
)
df.sort_values(by="score", ascending=False, inplace=True)
df = df.reset_index(drop=True)
optSubSet = df["subset"][0]
return (optSubSet, df)
# Degrees of Polynomial
def ranForEstimatorOpt(stonk):
"""
Generates a random forest model and test the model with varying amounts of decision trees (estimators) of values and ranks them based on accuracy/time.
Args:
stonk (str): stock ticker that will be used for optimization
:returns:
int: Optimum number of estimators in terms of time/accuracy for Random Forest using the stock ticker provided
dataframe: complete dataframe for stock ticker with all estimators, times and scores
"""
estimatorOptions = range(10, 1000, 10)
resultsList = []
for estOption in estimatorOptions:
startTime = time.time()
xValueList, yValueList, date = prepareRanForData(stonk, ranForVariables)
_, results = runRandomForest(xValueList, yValueList, date, estOption)
timeToRun = time.time() - startTime
resultsList.append(
[estOption, float(results), float(timeToRun), float(results / timeToRun)]
)
df = pd.DataFrame(resultsList, columns=["estOption", "results", "time", "score"])
df.sort_values(by="score", ascending=False, inplace=True)
df = df.reset_index(drop=True)
optPolyValue = df["estOption"][0]
print(df.head(10))
return (optPolyValue, df)
def runRanForOptimization(stonk):
"""
Full optimization test for Random Forest looking at both the *indicator subsets* and *estimators*
The class has defaults loaded in so you do not have to run both optimizers at once.
Args:
stonk (str): the stock ticker that will be used for optimization
:return:
ranForOptResultClass (class): Class storing the top subset and estimators as well as full dataframes with the complete results from both optimization test.
"""
ranOptResults = ranForOptResultClass()
optSubSet, subDf = ranForVariableOpt(stonk)
ranOptResults.optSubSet = optSubSet
ranOptResults.subDf = subDf
optEstimators, estimatorsDf = ranForEstimatorOpt(stonk)
ranOptResults.optEstimators = optEstimators
ranOptResults.estimatorsDf = estimatorsDf
print("==========================")
print("Random Forest Optimization")
print("==========================")
print(f"{stonk} | Optimized Variables : {', '.join(list(optSubSet))}")
print(f"{stonk} | Optimized Estimators : {optEstimators}")
return ranOptResults
| true |
9dc50845f4c27652cc70cd64c746c5cbf80d3bce | Python | palbbel/git-lesson | /dz/0423/task_bubble_sort.py | UTF-8 | 371 | 3.390625 | 3 | [] | no_license | def bubble_sort(lst):
for i in range(len(lst)-1):
k = len(lst) - 1
while k != i:
print(k)
print(lst[k])
print(lst[k - 1])
if lst[k] < lst[k - 1]:
lst[k - 1], lst[k] = lst[k], lst[k - 1]
k -= 1
print(lst)
bubble_sort([99,6,5,2,9,4,33,1,9.77,32,45,3,0.4444,0.000666])
| true |