blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3531e324c88e1571f8eea1443ae0420f6e7f1b46 | Python | arnab415/cmdcalc.exe | /calc.py | UTF-8 | 1,060 | 3.515625 | 4 | [] | no_license | import sys
import argparse as ap
def calc(args):
if args.o == "add":
return args.f + args.s
elif args.o == "sub":
return args.f - args.s
elif args.o == "mul":
return args.f * args.s
elif args.o == "div":
return args.f / args.s
elif args.o == "div":
return args.f / args.s
else:
return "This is a calculator in cmd.Please enter the first number with argument --f (Number),and second number with --s (Number) and operation --o (add,sub,mul,div)"
if __name__ == '__main__':
parser = ap.ArgumentParser()
parser.add_argument("--f", type=float, default=1.0,
help=("Please enter first number."))
parser.add_argument("--s", type=float, default=3.0,
help=("Please enter second number."))
parser.add_argument("--o", type=str, default="",
help=("Please enter the operation."))
args = parser.parse_args()
sys.stdout.write(str(calc(args))) | true |
ffdb741624bfcfb31d69ce87dac93bff0f85f35b | Python | jwills15/garden | /Python Files/GardenPi/testingScripts/ConfigTest/configTest.py | UTF-8 | 412 | 2.765625 | 3 | [] | no_license | import configparser
config = configparser.ConfigParser()
config.read('configfile.ini')
current = int(config['DEFAULT']['whichValve'])
current += 1
config['DEFAULT']['whichValve'] = str(current)
print(config['DEFAULT']['whichValve'])
if current >= 4:
config['DEFAULT']['whichValve'] = '0'
print(config['DEFAULT']['whichValve'])
with open('configfile.ini', 'w') as configfile:
config.write(configfile)
| true |
6b1df47677fe311d221c5b8aef62d6b206778749 | Python | EmotionalBeast/muse | /animation.py | UTF-8 | 7,273 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python3
#coding: utf-8
#@author: Lazy Yao
#@email: none
#@date: 2020/07/10 14:08
import os, json
CH = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
NUM = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
class AnimationData(object):
def __init__(self, path1, path2):
self.txt = path1
self.json = path2
self.img, self.clone_img = self.getPictureName()
self.dic = self.getJsonDic()
self.index = []
def getPictureName(self):
img = []
clone_img = []
with open(self.txt, "r") as f:
content = f.read()
temp = content.split("\n")
temp = [i for i in temp if i != '']
print(temp)
if len(temp) == 1:
pic_list = temp[0].split(" ")
pic_list = [i for i in pic_list if i != ""]
if len(temp) == 2:
pic_list = temp[0].split(" ")
pic_list = [i for i in pic_list if i != ""]
clone_list = temp[1].split(" ")
clone_list = [i for i in clone_list if i != ""]
for name in clone_list:
tmp = name.replace("img", "image").split(".")[0]
clone_img.append(tmp)
for name in pic_list:
tmp = name.replace("img", "image").split(".")[0]
img.append(tmp)
return img, clone_img
def getJsonDic(self):
dic = {}
with open(self.json, "r") as f:
jsonStr = f.read()
dic = json.loads(jsonStr, strict = False)
return dic
def replaceNM(self):
#self.img 原图
for i in range(len(self.dic["layers"])):
if "refId" in self.dic["layers"][i].keys():
if self.dic["layers"][i]["refId"] in self.img:
self.dic["layers"][i]["nm"] = self.getValue(self.dic["layers"][i]["refId"])
for i in range(len(self.dic["assets"])):
if "layers" in self.dic["assets"][i].keys():
for j in range(len(self.dic["assets"][i]["layers"])):
if "refId" in self.dic["assets"][i]["layers"][j].keys():
if self.dic["assets"][i]["layers"][j]["refId"] in self.img:
self.dic["assets"][i]["layers"][j]["nm"] = self.getValue(self.dic["assets"][i]["layers"][j]["refId"])
#self.clone 艺术滤镜图
for i in range(len(self.dic["layers"])):
if "refId" in self.dic["layers"][i].keys():
if self.dic["layers"][i]["refId"] in self.clone_img:
self.dic["layers"][i]["nm"] = self.getValue(self.dic["layers"][i]["refId"])
for i in range(len(self.dic["assets"])):
if "layers" in self.dic["assets"][i].keys():
for j in range(len(self.dic["assets"][i]["layers"])):
if "refId" in self.dic["assets"][i]["layers"][j].keys():
if self.dic["assets"][i]["layers"][j]["refId"] in self.clone_img:
self.dic["assets"][i]["layers"][j]["nm"] = self.getValue(self.dic["assets"][i]["layers"][j]["refId"])
with open(self.json, "w") as f:
jsonStr = json.dumps(self.dic, sort_keys=True, indent=2, ensure_ascii=False)
f.write(jsonStr)
def getValue(self, value):
for c1 in CH:
tmp = value.replace("_", "") + c1
if tmp not in self.index:
self.index.append(tmp)
return tmp
print(self.index)
return None
def getLayersNM(self):
dic = {}
index = []
#layer层
for layer in self.dic["layers"]:
if "refId" in layer.keys():
if layer["refId"] not in index and layer["refId"] in self.img:
index.append(layer["refId"])
dic[layer["refId"]] = []
for layer in self.dic["layers"]:
if "refId" in layer.keys():
if layer["refId"] in self.img:
dic[layer["refId"]].append(layer["nm"])
#assert层
for i in range(len(self.dic["assets"])):
if "layers" in self.dic["assets"][i].keys():
for j in range(len(self.dic["assets"][i]["layers"])):
if "refId" in self.dic["assets"][i]["layers"][j].keys():
refId = self.dic["assets"][i]["layers"][j]["refId"]
if refId not in index and refId in self.img:
index.append(refId)
dic[refId] = []
for i in range(len(self.dic["assets"])):
if "layers" in self.dic["assets"][i].keys():
for j in range(len(self.dic["assets"][i]["layers"])):
if "refId" in self.dic["assets"][i]["layers"][j].keys():
refId = self.dic["assets"][i]["layers"][j]["refId"]
if refId in self.img:
dic[refId].append(self.dic["assets"][i]["layers"][j]["nm"])
return dic
def getCloneLayersNM(self):
dic = {}
index = []
#layer层
for layer in self.dic["layers"]:
if "refId" in layer.keys():
if layer["refId"] not in index and layer["refId"] in self.clone_img:
index.append(layer["refId"])
dic[layer["refId"]] = []
for layer in self.dic["layers"]:
if "refId" in layer.keys():
if layer["refId"] in self.clone_img:
dic[layer["refId"]].append(layer["nm"])
#assert层
for i in range(len(self.dic["assets"])):
if "layers" in self.dic["assets"][i].keys():
for j in range(len(self.dic["assets"][i]["layers"])):
if "refId" in self.dic["assets"][i]["layers"][j].keys():
refId = self.dic["assets"][i]["layers"][j]["refId"]
if refId not in index and refId in self.clone_img:
index.append(refId)
dic[refId] = []
for i in range(len(self.dic["assets"])):
if "layers" in self.dic["assets"][i].keys():
for j in range(len(self.dic["assets"][i]["layers"])):
if "refId" in self.dic["assets"][i]["layers"][j].keys():
refId = self.dic["assets"][i]["layers"][j]["refId"]
if refId in self.clone_img:
dic[refId].append(self.dic["assets"][i]["layers"][j]["nm"])
return dic
def getImageContentSize(self):
dic = {}
for asset in self.dic["assets"]:
if asset["id"] in self.img or asset["id"] in self.clone_img:
dic[asset["id"]] = []
dic[asset["id"]].append(asset["w"])
dic[asset["id"]].append(asset["h"])
return dic
def ignore(self): #bool
dic = self.getLayersNM()
for image in dic.keys():
if len(dic[image]) > 1:
return True
return False
| true |
f660cb27588393aea1544f587b1791c3865499c3 | Python | Nordenbox/Nordenbox_Python_Fundmental | /闰年.py | UTF-8 | 206 | 3.609375 | 4 | [] | no_license |
temp = input("输入年份:")
YEAR = int(temp)
if (YEAR % 4 == 0 and YEAR % 100 != 0) or (YEAR % 400 == 0 and YEAR % 3200 != 0) or YEAR % 172800 == 0:
print ("闰年")
else:
print ("非闰年") | true |
f59daa123a5de65e88cd2bef8717c678a0017fcb | Python | HunterCSci127/HunterCSci127.github.io | /files/cunyLocations.py | UTF-8 | 1,067 | 3.25 | 3 | [] | no_license | import folium
import pandas as pd
import webbrowser #display html file
import os #use to find directory
#Use pandas (alias pd) to read a csv file,
#save the return data frame object in variable cuny.
cuny = pd.read_csv('cunyLocations.csv')
#Create a map object centered at 40.75, -74.125,
#save in variable mapCUNY.
mapCUNY = folium.Map(location = [40.75, -74.125])
for index, row in cuny.iterrows():
lat = row["Latitude"]
lon = row["Longitude"]
name = row["Campus"]
if row["College or Institution Type"] == "Senior Colleges":
collegeIcon = folium.Icon(color="purple")
else: collegeIcon = folium.Icon(color="blue")
#create a marker, sepcify its latitude, longitude,
#pop up name, and icon, save in variable newMarker.
newMarker = folium.Marker([lat, lon], popup=name, icon=collegeIcon)
newMarker.add_to(mapCUNY)
filename = 'cunyLocationsSenior.html'
#save mapCUNY to filename
mapCUNY.save(outfile = filename)
#display html using open method of webbrowser class
webbrowser.open('file://' + os.path.realpath(filename))
| true |
910e516bbeafd1aa15ce11925d57fb648257014d | Python | S0c5/learningpython | /ex18-21.py | UTF-8 | 563 | 3.234375 | 3 | [] | no_license | # This is a file of execerices 18-19-20-21 of learning python.
from sys import argv
from os.path import exists
def print_file(f):
print f.read()
def line(f):
return f.readline()
def print_exist(file_name):
flag = exists(file_name)
print "Exist file name? ",flag
return flag
def rewind(f):
f.seek(0)
script_name, file_name = argv
print "the script name is ", script_name
file_tmp = open(file_name)
print_exist(file_name)
print "line : ",line(file_tmp),"-"*20
rewind(file_tmp)
print "\t*content file: \n","#"*50
print_file(file_tmp)
print "#"*50
| true |
46269e12ceecc692da7ea0de00b1a8f424b8f4d2 | Python | robin0371/servem | /server/validate.py | UTF-8 | 884 | 2.890625 | 3 | [] | no_license | from cerberus import Validator
# Схема валидации тела запроса
STATUS_SCHEMA = {
'device_id': {
'type': 'string', 'regex': '^[a-z]{1,10}[_]{1}\d+$', 'required': True},
'request_id': {'type': 'string', 'min': 16, 'max': 16, 'required': True},
'status': {'type': 'string', 'required': True},
'data': {'type': 'dict', 'required': True},
}
def validate_status_request(body):
"""Валидирует тело запроса статуса устройства.
:param body: Словарь тела запроса
:type body: dict
:return Результат валидации и словарь ошибок, если значение не валидно
:rtype tuple(bool, dict)
"""
validator = Validator()
is_validate = validator.validate(body, STATUS_SCHEMA)
return is_validate, validator.errors
| true |
47b3f170ee269dcd95e24ce5c44c277ca28098b2 | Python | Aly-Elgharabawy/TestCNN | /CNN.py | UTF-8 | 2,717 | 3.0625 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import torchvision
import torchvision.transforms as transforms
torch.set_printoptions(linewidth=120)
torch.set_grad_enabled(True)
train_set = torchvision.datasets.FashionMNIST(
root = './data/FashionMNIST',
train = True,
download = True,
transform = transforms.Compose([transforms.ToTensor()])
)
class Network(nn.Module):
def __init__ (self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels = 6, kernel_size = 5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels = 12, kernel_size = 5)
self.fc1 = nn.Linear(in_features = 192, out_features = 120)
self.fc2 = nn.Linear(in_features = 120, out_features = 60)
self.out = nn.Linear(in_features = 60, out_features = 10)
def forward(self,t):
t = F.relu(self.conv1(t)) #all <0 becomes 0
t= F.max_pool2d(t, kernel_size = 2, stride = 2) #represents each region as its max i.e. 2x2 region --> 1x1 max val
t = F.relu(self.conv2(t))
t= F.max_pool2d(t, kernel_size = 2, stride = 2)
t = F.relu(self.fc1(t.reshape(-1,192)))
t = F.relu(self.fc2(t))
t = self.out(t)
return t
def get_num_correct(predictions,targets):
return predictions.argmax(dim=1).eq(targets).sum().item()
network = Network()
data_loader = torch.utils.data.DataLoader(train_set,batch_size=100)
optimizer = optim.Adam(network.parameters(),lr=0.01)
total_loss = 0
total_correct = 0
i = 0
losses = []
for epoch in range(5):
for batch in data_loader:
images,labels = batch #acquires training data and target from batch
preds = network(images) #Transforms input into output throughout NN layers
loss = F.cross_entropy(preds,labels) #Calculates cross entropy using predictions and labels
optimizer.zero_grad() #resets grad to 0 as pytorch does not reset them automatically
loss.backward() #Calculates gradients through backward traversal of comp graph and adds them to previous gradient value
optimizer.step() #Proceeds with gradient descent now that gradient is calculated
total_loss += loss.item() #sums loss values throughout batches
losses.append(loss.item())
i = i+1
total_correct += get_num_correct(preds,labels)#sums correct guesses throughout batches
accuracy = total_correct/60000 * 100
print("epoch :",str(0), "total_correct: ",str(total_correct),"loss: ",str(total_loss))
print("\n Accuracy = " + str(accuracy) + "%")
for j in range(1,len(losses)):
print(str(losses[j])+ "\n")
| true |
78d62f17ad76341ed05ecab509e512768e12a2ba | Python | chrysalisDVT/python-basics | /merge_sort.py | UTF-8 | 1,266 | 3.828125 | 4 | [] | no_license | def split(base_list):
""" Splits the list and returns the left and right sub list"""
list_mid_pointer=len(base_list)//2
return base_list[:list_mid_pointer],base_list[list_mid_pointer:]
def merge_sorted_list(left_sublist,right_sublist):
""" Merges the sorted list provided and returns the sorted list"""
left_index=right_index=0
sorted_list=[]
base_list_length=len(left_sublist)+len(right_sublist)
while len(sorted_list)<base_list_length:
if left_sublist[left_index]<right_sublist[right_index]:
sorted_list.append(left_sublist[left_index])
left_index+=1
else:
sorted_list.append(right_sublist[right_index])
right_index+=1
if left_index==len(left_sublist):
sorted_list+=right_sublist[right_index:]
break
if right_index==len(right_sublist):
sorted_list+=left_sublist[left_index:]
break
return sorted_list
def merge_sort(target_data):
if len(target_data)==1:
return target_data
else:
left_sub_list,right_sub_list=split(target_data)
return merge_sorted_list(merge_sort(left_sub_list),merge_sort(right_sub_list))
print(merge_sort([99,88,77,66]))
| true |
0e38835a2fad49506313eff19c21518ba49be086 | Python | thiagoabreu93/ed-not-2021-2 | /cursoemvideo/exercicios/ex007.py | UTF-8 | 158 | 4.09375 | 4 | [] | no_license | n1 = float(input('Digite a Nota 1: '))
n2 = float(input('Digite a Nota 2: '))
print('A média entre {:.1f} e {:.1f} é: {:.1f}'.format(n1, n2, (n1+n2)/2))
| true |
b1ef59f157f1eb864bd4e9a2e793d7de0c133de1 | Python | Solotzy/Scraping | /PythonScraping/ch5/04TableToCsv.py | UTF-8 | 684 | 2.828125 | 3 | [] | no_license | # coding: utf-8
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("https://zh.wikipedia.org/wiki/%E6%96%87%E4%BB%B6%E7%BC%96%E8%BE%91%E5%99%A8%E6%AF%94%E8%BE%83")
bsObj = BeautifulSoup(html, "html.parser")
# 主对比表格是当前页面上的第一个表格
table = bsObj.findAll("table", {"class":"wikitable"})[0]
rows = table.findAll("tr")
csvFile = open("files/editors.csv", 'wt', newline='', encoding='utf-8')
writer = csv.writer(csvFile)
try:
for row in rows:
csvRow = []
for cell in row.findAll(['td', 'th']):
csvRow.append(cell.get_text())
writer.writerow(csvRow)
finally:
csvFile.close() | true |
6a3b27886c99f76914707391db58b61b8d7716e0 | Python | RounakChatterjee/SEM2_Assignment4 | /Codes/Chi_Sq_Test.py | UTF-8 | 2,148 | 3.875 | 4 | [] | no_license | '''
CHI SQUARED TEST FOR RANDOM NUMBERS
=============================================================
Author : Rounak Chatterjee
Date : 01/06/2020
=============================================================
The Chi squared test is one of the ways to check whether a random number generator's
performance. If we consider a problem in which we know the analyitcal probability of
outcome of an event as p_i and by the random number genrator we produce
n samples using the requisite probability distribution and get event i as
Y_i times, then we define a value V as:
V = Sum{i = 1 to N}{(Y_i-n*p_i)^2/n*p_i}, where n is the number of samples
generated by the random numer and N is the totalnumber of possible events.
Once we have V we can use it to calculate The Chi Squared statistics from it.
To to that we can use scipy module stats.chi2.cdf and depending on the output we can
write whether the genrator is good or bad.
The criterions are :
if value of V = v(obtained)
then p(V>v) = 1.0 - chi squared stat(V) = x (say)
then:
x<0.01 : "Not Sufficient"
0.01<x<0.05 : "suspect"
0.05<x<0.1 : "almost suspect"
0.1<x<0.9 : "sufficiently Random"
For our problem we have twpo dice system so N = 12 and given two observed
counts. On them we perform the chi square test.
'''
import numpy as np
import scipy.stats as s
def criterion(v):
x = 1.0 - s.chi2.cdf(v,10.0)
if(x<0.01):
return "Not Sufficiently Random"
elif(0.01<x and x<0.05):
return "Suspect"
elif(0.05<x and x<0.1):
return"Almost Suspect"
else:
return "Sufficiently Random"
dice_prob = np.array([1.0,2.0,3.0,4.0,5.0,6.0,5.0,4.0,3.0,2.0,1.0],dtype = np.float64)
dice_prob = dice_prob/36.0
Y_1 = np.array([4,10,10,13,20,18,18,11,13,14,13],dtype = np.float64)
Y_2 = np.array([3,7,11,15,19,24,21,17,13,9,5],dtype = np.float64)
n1 = sum(Y_1)
n2 = sum(Y_2)
v1 = 0.0
v2 = 0.0
for i in range(len(dice_prob)):
v1 = v1+ (Y_1[i]-n1*dice_prob[i])**2.0/(n1*dice_prob[i])
v2 = v2+ (Y_2[i]-n2*dice_prob[i])**2.0/(n2*dice_prob[i])
print("The Test results are :\nSet1 :",criterion(v1))
print("Set2 : ",criterion(v2))
| true |
c5b6cbf00a0b6690696f80f18bee9ab6318b7832 | Python | ibrahim272941/python_projects | /combination.py | UTF-8 | 171 | 3.078125 | 3 | [] | no_license | new=[1,2,3]
new_1=[]
r,l=0,0
for i in new:
new_1.append(new[l:]+new[:r])
a=list(reversed(new))
new_1.append(a[l:]+a[:r])
l+=1
r+=1
print(sorted(new_1)) | true |
42281f6ce39543ce372b92e8e6d9de1215f79a11 | Python | yutianji888/CV-Python-Basic | /ch25-Hough直线变换/25.1-OpenCV中的霍夫变换-HoughLines.py | UTF-8 | 1,298 | 3.53125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# __author__ = 'corvin'
"""
cv2.HoughLines()。
返回值就是( ρ; θ)。 ρ 的单位是像素, θ 的单位是弧度。这个函数的第一个参
数是一个二值化图像,所以在进行霍夫变换之前要首先进行二值化,或者进行
Canny 边缘检测。第二和第三个值分别代表 ρ 和 θ 的精确度。第四个参数是
阈值,只有累加其中的值高于阈值时才被认为是一条直线,也可以把它看成能
检测到的直线的最短长度(以像素点为单位)
"""
import cv2
import numpy as np
img = cv2.imread('../data/sudoku.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 10, 50, apertureSize=3)
cv2.imshow("edges", edges)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 180)
print("Len of lines:", len(lines))
for line in lines:
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
# cv2.imwrite('houghlines3.jpg',img)
cv2.imshow("houghlines3.jpg", img)
cv2.waitKey(1000)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
ca2425801ca87e2e193b607ea567e755cae07f8f | Python | HuZhenghang/Coursera-practices | /memory.py | UTF-8 | 1,951 | 3.078125 | 3 | [] | no_license | import simplegui
import random
list= [1,2,2,3,7,1,5,8,3,5,4,4,6,7,6,8]
list_turn=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
state=0
last_number=-1
def mouse_handler(position):
global state
global list_turn
global last_number
number=position[0]/50
if state==0:
state=1
list_turn[number]=1
last_number=number
elif state==1:
state=2
list_turn[number]=1
if list[last_number]==list[number]:
list_turn[number]=2
list_turn[last_number]=2
else :
count=0
for i in list_turn:
if i==1 :
list_turn[count]=0
count=count+1
state=1
list_turn[number]=1
last_number=number
def button_handler():
global state
global list
global list_turn
list=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
list_turn=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
j=0
state=0
for i in list:
count=3
while count>=2:
count=0
rnumber=random.randrange(1,9)
for k in list:
if k==rnumber:
count=count+1
if count<=1:
list[j]=rnumber
j=j+1
def draw_handler(canvas):
width=20
cishu=0
for i in list :
if list_turn[cishu] == 1 or list_turn[cishu]==2:
canvas.draw_text(str(i), (width, 60), 40, 'white')
canvas.draw_line((width+30, 0), (width+30, 100), 1, 'orange')
else :
canvas.draw_line((width+30, 0), (width+30, 100), 1, 'orange')
width+=50
cishu+=1
frame = simplegui.create_frame("Memory", 800, 100)
frame.set_draw_handler(draw_handler)
button1 = frame.add_button('Reset', button_handler)
frame.set_mouseclick_handler(mouse_handler)
frame.start() | true |
6b507a11623f7a3da6673a8ba426fa7a05df176a | Python | jhubar/PI | /Python/SEIR_extended.py | UTF-8 | 21,718 | 2.875 | 3 | [] | no_license | import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import minimize
import math
"""
=======================================================================================================
Meilleure version à l'heure actuel (sec_methode)
ATTENTION probleme de convergeance
=======================================================================================================
"""
class SEIR():
def __init__(self):
# Parameter's values
self.N = 999999
self.beta = 0.3865
self.gamma = 1/7 # 7 days average to be cure
self.sigma = 1/3 # 3 days average incubation time
self.hp = 1/20.89 # Hospit probability Proportion of infected people who begin hospitalized
self.hcr = 0 # Hospit Cure Rate: proba de guérir en hospitalisation
def set_hospit_prop(self, hospit_prop):
self.hospit_prop = hospit_prop
def differential_bis(self, state, time, beta, gamma, sigma, hp, hcr):
"""
Differential equations of the model
"""
S, E, I, H, R = state
dS = -(beta * S * I) / (S + E + I + H + R)
dE = (beta * S * I) / (S + E + I + H + R) - E * sigma
dI = (1 - hp) * (E * sigma) - (gamma * I) # Note: on retire la proportion des hospitalisés, ils ne participent plus à la contagion
dH = hp * (E * sigma) - hcr * H
dR = gamma * I + hcr * H
return dS, dE, dI, dH, dR
def differential(self, state, time, beta, gamma, sigma, hp, hcr):
"""
Differential equations of the model
"""
S, E, I, H, R = state
dS = -(beta * S * I) / (S + E + I + H + R)
dE = (beta * S * I) / (S + E + I + H + R) - E * sigma
dI = (E * sigma) - (gamma * I) - (I * hp * gamma)
dH = (I * hp * gamma) - hcr * H
dR = gamma * I + hcr * H
return dS, dE, dI, dH, dR
def predict(self, S_0, E_0, I_0, H_0, R_0, duration):
"""
Predict epidemic curves
"""
# Initialisation vector:
initial_state = (S_0, E_0, I_0, H_0, R_0)
# Time vector:
time = np.arange(duration)
# Parameters vector
parameters = (self.beta, self.gamma, self.sigma, self.hp, self.hcr)
# Solve differential equations:
predict = odeint(func=self.differential,
y0=initial_state,
t=time,
args=parameters)
return np.vstack((time, predict[:, 0], predict[:, 1], predict[:, 2], predict[:, 3], predict[:, 4])).T
def fit(self, dataset):
# Set initial state:
H_0 = df_np[0][7]
E_0 = 3 * df_np[1][1] # Vu qu'un tiers de ce nombre devront être positifs à t+1
I_0 = df_np[0][1] - H_0 # Les hospitalisés ne participent plus à la contagion
S_0 = 999999 - H_0 - I_0 - E_0
R_0 = 0
initial_state = (S_0, E_0, I_0, H_0, R_0)
time = dataset[:, 0]
# Optimisation:
method = 'fit_on_hospit'
start_values = [self.beta, self.gamma, self.sigma, self.hp, self.hcr]
bounds = [(0, 1), (1/7, 1/7), (1/3, 1/3), (1/20.89, 1/20.89), (0, 1)]
res = minimize(self.SSE, np.asarray(start_values), args=(initial_state, time, dataset, method), method='L-BFGS-B', bounds=bounds)
print(res)
# Set new parameters:
self.beta = res.x[0]
self.hcr = res.x[4]
def fit_beta(self, dataset):
# Set initial state:
H_0 = dataset[0][7]
E_0 = 3 * dataset[1][1] # Vu qu'un tiers de ce nombre devront être positifs à t+1
I_0 = dataset[0][1] - H_0 # Les hospitalisés ne participent plus à la contagion
S_0 = 999999 - H_0 - I_0 - E_0
R_0 = 0
initial_state = (S_0, E_0, I_0, H_0, R_0)
time = dataset[:, 0]
# Optimisation ittérative:
range_size = 1000
beta_range = np.linspace(0, 1, range_size)
best = (math.inf, 0)
SSE = []
for b in range(0, range_size):
parameters = (beta_range[b], self.gamma, self.sigma, self.hp, self.hcr)
sse = self.SSE(parameters, initial_state, time, dataset, method='fit_on_cumul_positive')
SSE.append(sse)
if sse < best[0]:
best = (sse, beta_range[b])
print("Iterative best fit. Best value for beta = {} with sse = {}".format(best[1], best[0]))
# Set the best value of beta:
self.beta = best[1]
# print graph of beta evolution with sse:
plt.plot(beta_range, SSE, c='blue', label='SSE evolution')
plt.yscale('log')
plt.xlabel('beta value')
plt.show()
def fit_hcr(self, dataset):
# Set initial state:
H_0 = dataset[0][7]
E_0 = 3 * dataset[1][1] # Vu qu'un tiers de ce nombre devront être positifs à t+1
I_0 = dataset[0][1] - H_0 # Les hospitalisés ne participent plus à la contagion
S_0 = 999999 - H_0 - I_0 - E_0
R_0 = 0
initial_state = (S_0, E_0, I_0, H_0, R_0)
time = dataset[:, 0]
# Optimisation ittérative:
range_size = 1000
hcr_range = np.linspace(0, 1, range_size)
best = (math.inf, 0)
SSE = []
for b in range(0, range_size):
parameters = (self.beta, self.gamma, self.sigma, self.hp, hcr_range[b])
sse = self.SSE(parameters, initial_state, time, dataset, method='fit_on_hospit')
SSE.append(sse)
if sse < best[0]:
best = (sse, hcr_range[b])
print("Iterative best fit. Best value for hcr = {} with sse = {}".format(best[1], best[0]))
# Set the best value of beta:
self.hcr = best[1]
# print graph of beta evolution with sse:
plt.plot(hcr_range, SSE, c='blue', label='SSE evolution')
plt.yscale('log')
plt.xlabel('hcr value')
plt.show()
def fit_scipy(self, dataset):
"""
On commence par fitter les paramètres beta (dans un interval de 0 à 1, pas d'indications pour lui),
gamma dans un interval de 1/10 à 1/4 donné dans l'énnoncé, et sigma dans un interval de 1/5 à 1
(également donné).
Nous fittons les données de testage cummulées sur la somme des courbes I, H et R.
"""
time = dataset[:, 0]
# Set initial state:
initial_state = (1000000 - dataset[0][1] - 3*dataset[2][1], 3 * dataset[2][1], dataset[0][1], dataset[0][4], 0)
start_values = [self.beta, self.gamma, self.sigma, self.hp, self.hcr]
bounds = [(0, 1), (1/10, 1/4), (1/5, 1), (0, 1), (self.hcr, self.hcr)]
res = minimize(self.SSE, np.asarray(start_values), args=(initial_state, time, dataset, 'fit_on_cumul_mixt'),
method='L-BFGS-B', bounds=bounds)
print(res)
self.beta = res.x[0]
self.gamma = res.x[1]
self.sigma = res.x[2]
"""
Deuxième étape, il ne reste plus qu'à fitter hcr sur la courbe des hospitalisations (non cumulées)
NOTE : Je ne sais pas pourquoi, mais ça converge pas quand j'utilise minimize alors que ça converge très bien
en itérant toutes les valeurs
"""
self.fit_hcr(dataset)
def fit_on_sigma(self, dataset):
# Set initial state:
H_0 = dataset[0][7]
E_0 = 3 * dataset[1][1] # Vu qu'un tiers de ce nombre devront être positifs à t+1
I_0 = dataset[0][1] - H_0 # Les hospitalisés ne participent plus à la contagion
S_0 = 999999 - H_0 - I_0 - E_0
R_0 = 0
initial_state = (S_0, E_0, I_0, H_0, R_0)
time = dataset[:, 0]
# Optimisation ittérative:
range_size = 1000
sigma_range = np.linspace(0, 1, range_size)
best = (math.inf, 0)
SSE = []
done1 = False
for b in range(0, range_size):
parameters = (self.beta, self.gamma, sigma_range[b], self.hp, self.hcr)
sse = self.SSE(parameters, initial_state, time, dataset, method='fit_on_cumul_positive')
SSE.append(sse)
if sigma_range[b] >= 0.3333 and not done1:
done1 = True
print("SSE for sigma = {} = {}".format(sigma_range[b], sse))
if sse < best[0]:
best = (sse, sigma_range[b])
print("Iterative best fit. Best value for sigma = {} with sse = {}".format(best[1], best[0]))
# Set the best value of beta:
self.sigma = best[1]
# print graph of beta evolution with sse:
plt.plot(sigma_range, SSE, c='blue', label='SSE evolution')
plt.yscale('log')
plt.xlabel('sigma value')
plt.show()
def fit_on_gamma(self, dataset):
# Set initial state:
H_0 = dataset[0][7]
E_0 = 3 * dataset[1][1] # Vu qu'un tiers de ce nombre devront être positifs à t+1
I_0 = dataset[0][1] - H_0 # Les hospitalisés ne participent plus à la contagion
S_0 = 999999 - H_0 - I_0 - E_0
R_0 = 0
initial_state = (S_0, E_0, I_0, H_0, R_0)
time = dataset[:, 0]
# Optimisation ittérative:
range_size = 1000
gamma_range = np.linspace(0, 1, range_size)
best = (math.inf, 0)
SSE = []
done1 = False
for b in range(0, range_size):
parameters = (self.beta, self.gamma, gamma_range[b], self.hp, self.hcr)
sse = self.SSE(parameters, initial_state, time, dataset, method='fit_on_cumul_positive')
SSE.append(sse)
if gamma_range[b] >= 0.3333 and not done1:
done1 = True
print("SSE for gamma = {} = {}".format(gamma_range[b], sse))
if sse < best[0]:
best = (sse, gamma_range[b])
print("Iterative best fit. Best value for gamma = {} with sse = {}".format(best[1], best[0]))
# Set the best value of beta:
self.gamma = best[1]
# print graph of beta evolution with sse:
plt.plot(gamma_range, SSE, c='blue', label='SSE evolution')
#plt.yscale('log')
plt.xlabel('gamma value')
plt.show()
def SSE(self, parameters, initial_state, time, data, method='fit_on_hospit'):
# Set parameters:
params = tuple(parameters)
# Make predictions:
predict = odeint(func=self.differential,
y0=initial_state,
t=time,
args=params)
if method == 'fit_on_hospit':
# On fit alors que sur la courbe de hospitalisations
sse = 0.0
for i in range(0, len(time)):
sse += (data[i][3] - predict[i][3])**2
return sse
if method == 'fit_on_hospit_cumul':
# si hcr est = à 0, on peut fiter la courbe des hospit avec cumul hopit car pas de guérison
sse = 0.0
for i in range(0, len(time)):
sse += (data[i][4] - predict[i][3])**2
return sse
if method == 'fit_on_cumul_positive':
sse = 0.0
for i in range(0, len(time)):
sse += (data[i][7] - predict[i][2] - predict[i][3] - predict[i][4])**2
return sse
if method == 'fit_on_cumul_mixt':
sse = 0.0
for i in range(0, len(time)):
sse += (data[i][7] - predict[i][2] - predict[i][3] - predict[i][4]) ** 2
sse += (data[i][4] - predict[i][3]) ** 2
return sse
def plot_predict_and_compare(df, pred, args='predict'):
if 'predict' in args:
# just print predicted epidemic curves
if "no_S" not in args:
plt.plot(pred[:, 0], pred[:, 1], c='black', label="S")
plt.plot(pred[:, 0], pred[:, 2], c='yellow', label="E")
plt.plot(pred[:, 0], pred[:, 3], c='red', label="I")
plt.plot(pred[:, 0], pred[:, 4], c='purple', label="H")
plt.plot(pred[:, 0], pred[:, 5], c='blue', label='R')
plt.show()
if 'compare' in args:
plt.scatter(df['Day'], df['cumul_positive'], c='red')
plt.scatter(df['Day'], df['num_hospitalised'], c='blue')
cumul_positive = []
hospit = []
for i in range(0, len(df['Day'].to_numpy())):
cumul_positive.append(pred[i][3] + pred[i][4] + pred[i][5]) # somme de I, H et R
hospit.append(pred[i][4])
plt.plot(df['Day'], cumul_positive, c='red')
plt.plot(df['Day'], hospit, c='blue')
if "log" in args:
plt.yscale('log')
plt.show()
def first_method():
"""
Import the dataset and add informations
"""
url = "https://raw.githubusercontent.com/ADelau/proj0016-epidemic-data/main/data.csv"
df = pd.read_csv(url, sep=",", header=0)
# Delete the first line:
df = df.drop([0], axis=0)
# Insert cumul_positive column at the end
cumul_positive = df["num_positive"].to_numpy()
cumul_positive[0] = 20
for i in range(1, len(cumul_positive)):
cumul_positive[i] += cumul_positive[i - 1]
df.insert(7, "cumul_positive", cumul_positive)
# print(df)
# Make a numpy version of the dataframe:
df_np = df.to_numpy()
# Init the model:
model = SEIR()
# Set initial state
H_0 = df_np[0][7]
E_0 = 3 * df_np[1][1] # Vu qu'un tiers de ce nombre devront être positifs à t+1
I_0 = df_np[0][1] - H_0 # Les hospitalisés ne participent plus à la contagion
S_0 = 999999 - H_0 - I_0 - E_0
R_0 = 0
""" *****************************************************************************
ETAPE 1:
Paramètres donnés par le prof:
- Gamma: 1/7
- Sigma: 1/3
Première étape = déterminer la relation entre les hospitalisations et les infectés
Pour ça on analyse la proportion moyenne entre la courbe cumulée des tests positifs
et la courbe cumulée des hospitalisés.
On remarque que à partir de j 15, ce rapport est constant et que la courbe des positifs est
20.896229143831444 * celle des hospitalisés. (Standard deviation = 0.9312720467999785)
On a donc notre pramètre hp.
*****************************************************************************
"""
# Find the linear relation between the two curves:
posit = df['cumul_positive'].to_numpy()
hospit = df['num_cumulative_hospitalizations'].to_numpy()
factor = []
for i in range(15, len(posit)): # Begin after 15 days because stabilisation of the rapport
factor.append(posit[i] / hospit[i])
factor = np.array(factor)
# Predict positive curve from this:
predict_cumul_positive = df['num_cumulative_hospitalizations'].to_numpy()
predict_cumul_positive = predict_cumul_positive * np.mean(factor)
# Set the value to the model.
model.set_hospit_prop(1 / np.mean(factor))
print('"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""')
print(" Analyze of the proportion of hospitalized in the positives case")
print(" Average = {}".format(np.mean(factor)))
print(" Standard deviation = {}".format(np.std(factor)))
plt.plot(df['Day'], df['cumul_positive'], c='blue', label='Cumul_positive')
plt.plot(df['Day'], df['num_cumulative_hospitalizations'], c='red', label='Cumul_hospit')
plt.plot(df['Day'], predict_cumul_positive, c='green', label='Predicted positives from hospit')
plt.ylabel('nb people')
plt.xlabel('time in days')
plt.title('Compare cumul positive and cumul hospit in dataset')
plt.show()
""" *****************************************************************************
ETAPE 2:
Maintenant que nous savons la proportion de contaminations qui finissent
hospitalisées (et donc qui ne participent plus à la contamination, nous pouvons
fiter le paramètre Beta.
Nous pouvons le fitter sur le cumul des tests positifs à partir de la somme des
courbes I, H, et R. On peut le faire avec une valeur de hcr = 0 (pas de guérison
pour les hospitalisés, car sa valeur n'entre pas en compte ici.
*****************************************************************************
"""
model.fit_beta(df_np)
""" *****************************************************************************
ETAPE 3:
Il ne reste plus qu'à fiter le parametre hcr, qui est le taux de guérison
chez les hospitalisés
*****************************************************************************
"""
model.fit_hcr(df_np)
""" *****************************************************************************
Nous pouvons maintenant comparer les simulations et les données ainsi que
dessiner des prédictions à long terme.
*****************************************************************************
"""
predictions = model.predict(S_0, E_0, I_0, H_0, R_0, duration=150)
plot_predict_and_compare(df, predictions, args='predict compare')
predictions = model.predict(S_0, E_0, I_0, H_0, R_0, duration=50)
plot_predict_and_compare(df, predictions, args='predict compare no_S log')
print("final value for model's parameters: ")
print(" beta = {}".format(model.beta))
print(" gamma = {}".format(model.gamma))
print(" sigma = {}".format(model.sigma))
print(" hp = {}".format(model.hp))
print(" hcr = {}".format(model.hcr))
def sec_method():
"""
Import the dataset and add informations
"""
url = "https://raw.githubusercontent.com/ADelau/proj0016-epidemic-data/main/data.csv"
df = pd.read_csv(url, sep=",", header=0)
# Delete the first line:
df = df.drop([0], axis=0)
# Insert cumul_positive column at the end
cumul_positive = df["num_positive"].to_numpy()
for i in range(1, len(cumul_positive)):
cumul_positive[i] += cumul_positive[i - 1]
df.insert(7, "cumul_positive", cumul_positive)
# print(df)
# Make a numpy version of the dataframe:
df_np = df.to_numpy()
# Init the model:
model = SEIR()
# Set initial state
H_0 = df_np[0][7]
E_0 = 3 * df_np[1][1] # Vu qu'un tiers de ce nombre devront être positifs à t+1
I_0 = df_np[0][1] - H_0 # Les hospitalisés ne participent plus à la contagion
S_0 = 999999 - H_0 - I_0 - E_0
R_0 = 0
""" *****************************************************************************
ETAPE 1:
Trouver le paramètre hp, de la même façon que dans methode 1
*****************************************************************************
"""
# Find the linear relation between the two curves:
posit = df['cumul_positive'].to_numpy()
hospit = df['num_cumulative_hospitalizations'].to_numpy()
factor = []
for i in range(15, len(posit)): # Begin after 15 days because stabilisation of the rapport
factor.append(posit[i] / hospit[i])
factor = np.array(factor)
# Predict positive curve from this:
predict_cumul_positive = df['num_cumulative_hospitalizations'].to_numpy()
predict_cumul_positive = predict_cumul_positive * np.mean(factor)
# Set the value to the model.
model.set_hospit_prop(1 / np.mean(factor))
print('"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""')
print(" Analyze of the proportion of hospitalized in the positives case")
print(" Average = {}".format(np.mean(factor)))
print(" Standard deviation = {}".format(np.std(factor)))
plt.plot(df['Day'], df['cumul_positive'], c='blue', label='Cumul_positive')
plt.plot(df['Day'], df['num_cumulative_hospitalizations'], c='red', label='Cumul_hospit')
plt.plot(df['Day'], predict_cumul_positive, c='green', label='Predicted positives from hospit')
plt.ylabel('nb people')
plt.xlabel('time in days')
plt.title('Compare cumul positive and cumul hospit in dataset')
plt.show()
""" *****************************************************************************
ETAPE 2:
On fit le modèle:
- D'abord les paramètres beta, gamma et sigma. Le paramètre hp a été fixé
à l'étape 1 et le paramètre hcr est fixé à zéro car n'intervient pas
dans le cas ou l'on fit sur les courbes I+R+H
- Après on fit hcr sur la courbe des hospitalisations.
*****************************************************************************
"""
model.fit_scipy(df_np)
model.fit_on_sigma(df_np)
#model.fit_beta(df_np)
#model.fit_on_gamma(df_np)
model.fit_scipy(df_np)
""" *****************************************************************************
Nous pouvons maintenant comparer les simulations et les données ainsi que
dessiner des prédictions à long terme.
*****************************************************************************
"""
predictions = model.predict(S_0, E_0, I_0, H_0, R_0, duration=150)
plot_predict_and_compare(df, predictions, args='predict compare')
predictions = model.predict(S_0, E_0, I_0, H_0, R_0, duration=50)
plot_predict_and_compare(df, predictions, args='predict compare no_S log')
print("final value for model's parameters: ")
print(" beta = {}".format(model.beta))
print(" gamma = {}".format(model.gamma))
print(" sigma = {}".format(model.sigma))
print(" hp = {}".format(model.hp))
print(" hcr = {}".format(model.hcr))
if __name__ == "__main__":
#first_method()
sec_method()
| true |
c0d661ba7c34b4cd1fe7a0d90f87043f8bd48b22 | Python | prostomusa/Solutionpython | /task_4/SRC/task4.py | UTF-8 | 804 | 3.671875 | 4 | [] | no_license | def compare_two_string(x, y):
if "*" in x:
return("Неверный формат данных")
if "*" not in y and len(x) != len(y):
return ("KO")
tern = y.split("*")
p = 0
if tern[0] != "":
if tern[0] != x[:len(tern[0]):]:
return("KO")
if tern[-1] != "":
if tern[-1] != x[-len(tern[0])::]:
return("KO")
for i in tern:
if i == "":
continue
else:
try:
z = x.index(i, p)
p = z + len(i)
except ValueError:
return("KO")
return("OK")
if __name__ == "__main__":
x = input("Введите первую строку: ")
y = input("Введите вторую строку: ")
print(compare_two_string(x, y))
| true |
4b6227deb3bbea45f6b015ffd559448c95b39e9d | Python | frix360/nlp-project | /Questions.py | UTF-8 | 970 | 3.421875 | 3 | [] | no_license | class Questions:
def __init__(self):
self.questions = {}
self.__init_questions()
def __init_questions(self):
self.questions = {
'color': [
'What is the color of the button?',
'What is the button\'s color?',
'Color of the button is?',
'Button\'s color is?'
],
'size': [
'What is the size of a button?',
'Button size is?',
'What is the button\'s size?',
'What is the size?',
'Size is?'
],
'text': [
'What does the button say?',
'What is the text of the button?',
'What is written on the button?'
]
}
def get_types(self):
return list(self.questions.keys())
def get_questions_by_type(self, question_type):
return self.questions[question_type]
| true |
022b75b0a2c69d5491ced09b58bdd4dfe03472df | Python | rhbelson/WhenInRome | /main.py | UTF-8 | 9,182 | 2.859375 | 3 | [] | no_license | import chord_quality_identifier
import chroma_to_notes
import convert_labels_to_roman_numerals
import csv
import notes_to_chroma
import os
import runMelisma
import sys
import transposition
if len(sys.argv) != 2:
print "usage: python main.py [midifile]"
quit()
args = sys.argv
midifile = args[1]
keys = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
highest_score = 0
correct_key = ""
correct_romanNumerals = []
chordsAndPitches, lowestNotesInChord, start_times = runMelisma.get_chords_and_pitches(midifile)
# print start_times
# for i in chordsAndPitches:
# print i
# Check if Transposition is Necessary: Get rid of funky chords
num_transposition_steps = 0
for l in range(len(chordsAndPitches)):
if "bb" in chordsAndPitches[l][0]:
num_transposition_steps = 1
new_chords = []
for i in range(len(chordsAndPitches)):
new_chord_label = transposition.transpose(chordsAndPitches[i][0], num_transposition_steps)
new_notes = []
for note in chordsAndPitches[i][1]:
new_notes.append(transposition.transpose(note, num_transposition_steps))
new_chords.append((new_chord_label, new_notes))
chordsAndPitches = new_chords
break # leave function
if "##" in chordsAndPitches[l][0]:
num_transposition_steps = -1
new_chords = []
for i in range(len(chordsAndPitches)):
new_chord_label = transposition.transpose(chordsAndPitches[i][0], num_transposition_steps)
new_notes = []
for note in chordsAndPitches[i][1]:
new_notes.append(transposition.transpose(note, num_transposition_steps))
new_chords.append((new_chord_label, new_notes))
chordsAndPitches = new_chords
break # leave function
# print chords
# update chordsAndPitches to reflect transposition
# if len(chords) > 0:
# chordsAndPitches1 = []
# for m in range(len(chordsAndPitches)):
# chordsAndPitches1.append((chords[m], chordsAndPitches[m][1]))
# chordsAndPitches = chordsAndPitches1
# for i in chordsAndPitches:
# print i
for i in range(len(keys)):
chordsWithQuality = []
for j in range(len(chordsAndPitches)):
chordsWithQuality.append(
chord_quality_identifier.chord_quality_identifier(chordsAndPitches[j][0], chordsAndPitches[j][1]))
# romanNumerals = convert_labels_to_roman_numerals.label_to_rn(chordsWithQuality, lowestNotesInChord, 'C')
romanNumerals = convert_labels_to_roman_numerals.label_to_rn(chordsWithQuality, lowestNotesInChord, keys[i])
# print 'key: ' + keys[i] + ';Roman Numerals: ',
# print romanNumerals
# Scoring Is Calculated Here
num_minor_tonic = 0
num_major_tonic = 0
score = 0
if romanNumerals[0]=="I":
score+=5
for k in range(len(romanNumerals)):
if romanNumerals[k] is None:
continue
if romanNumerals[k][0] == "I":
if len(romanNumerals[k]) > 1:
if romanNumerals[k][1] != "I" and romanNumerals[k][1] != "V":
score += 1
num_major_tonic += 1
else:
score += 1
num_major_tonic += 1
if romanNumerals[k][0] == "i":
if len(romanNumerals[k]) > 1:
if romanNumerals[k][1] != "i" and romanNumerals[k][1] != "v":
score += 1
num_major_tonic += 1
else:
score += 1
num_major_tonic += 1
if romanNumerals[k][0] == "V" or romanNumerals[k][0] == "v":
if len(romanNumerals[k]) > 1:
if romanNumerals[k][1] != "i" and romanNumerals[k][1] != "I":
score += 1
else:
score += 1
if romanNumerals[k][0:1] == "IV" or romanNumerals[k][0:1] == "iv":
score += 1
if romanNumerals[k][0:1] == "IV" or romanNumerals[k][0:1] == "iv":
score += 1
if k >= 0 and romanNumerals[k - 1] is not None and romanNumerals[k] is not None:
if romanNumerals[k - 1][0] == "V" and romanNumerals[k][0] == "I":
actuallyV = False
if len(romanNumerals[k - 1]) > 1:
if romanNumerals[k - 1][1] != "I":
actuallyV = True
else:
actuallyV = True
actuallyI = False
if len(romanNumerals[k]) > 1:
if romanNumerals[k][1] != "I" and romanNumerals[k][1] != "V":
actuallyI = True
else:
actuallyI = True
if actuallyV and actuallyI:
score += 2
elif romanNumerals[k - 1][0] == "V" and romanNumerals[k][0] == "i":
actuallyV = False
if len(romanNumerals[k - 1]) > 1:
if romanNumerals[k - 1][1] != "I":
actuallyV = True
else:
actuallyV = True
actuallyI = False
if len(romanNumerals[k]) > 1:
if romanNumerals[k][1] != "i" and romanNumerals[k][1] != "v":
actuallyI = True
else:
actuallyI = True
if actuallyV and actuallyI:
score += 2
# print keys[i], score, romanNumerals
if score > highest_score:
highest_score = score
correct_romanNumerals = romanNumerals
correct_key = keys[i]
# Check Transposition
if num_transposition_steps == 1:
orig_key = notes_to_chroma.ntc(correct_key)
orig_key = int(orig_key + 1)
correct_key = chroma_to_notes.ctn(orig_key)
elif num_transposition_steps == -1:
orig_key = notes_to_chroma.ntc(correct_key)
orig_key = int(orig_key - 1)
correct_key = chroma_to_notes.ctn(orig_key)
if num_major_tonic > num_minor_tonic:
correct_key += ' major'
else:
correct_key += ' minor'
os.system('midicsv-1.1/midicsv ' + midifile + ' > output.csv')
with open('output.csv') as midiCsv:
csvReader = csv.reader(midiCsv, delimiter=',')
outputFile = open('labeled_output.csv', 'wb')
trackNumber = 0
tempo = 0
ppqn = 24
endTime = 0
start_times_in_ticks = []
current_start_time = 0
for row in csvReader:
row = [x.strip(' ') for x in row]
if row[0] == '0' and row[1] == '0' and row[2] == 'Header':
trackNumber = int(row[4])
ppqn = int(row[5])
string_to_write = str(row[0]) + ', ' + str(row[1]) + ', ' + str(row[2]) + ', ' + str(row[3]) + ', ' + str(
trackNumber + 1) + ', ' + str(row[5])
outputFile.write(string_to_write + '\n')
elif row[0] == '1' and row[1] == '0' and row[2] == 'Tempo':
tempo = int(row[3])
string_to_write = str(row[0]) + ', ' + str(row[1]) + ', ' + str(row[2]) + ', ' + str(row[3])
outputFile.write(string_to_write + '\n')
for i in range(len(correct_romanNumerals)):
bpm = int(60000000. / tempo)
start_time_in_ticks = int(start_times[i] / ((1./float(ppqn)) * (1./bpm) * (1000*60)))
start_times_in_ticks.append(start_time_in_ticks)
# print len(start_times_in_ticks)
# print len(correct_romanNumerals)
elif row[0] == str(trackNumber):
while current_start_time < len(start_times_in_ticks) and \
start_times_in_ticks[current_start_time] < int(row[1]):
string_to_write = ""
if current_start_time == 0:
string_to_write = str(row[0]) + ', ' + str(
start_times_in_ticks[current_start_time]) + ', Text_t, \"' + correct_key + ': ' + str(
correct_romanNumerals[current_start_time]) + '\"'
else:
string_to_write = str(row[0]) + ', ' + str(
start_times_in_ticks[current_start_time]) + ', Text_t, \"' + str(
correct_romanNumerals[current_start_time]) + '\"'
outputFile.write(string_to_write + '\n')
current_start_time += 1
string_to_write = ""
for i in range(len(row)-1):
string_to_write += row[i] + ', '
string_to_write += row[len(row)-1]
outputFile.write(string_to_write + '\n')
else:
endTime = int(row[1])
string_to_write = ""
for i in range(len(row)-1):
string_to_write += row[i] + ', '
string_to_write += row[len(row)-1]
outputFile.write(string_to_write + '\n')
outputFile.close()
# print midifile
new_file_name = midifile.strip('melisma2003/midifiles/kp/')
# print new_file_name
os.system('midicsv-1.1/csvmidi labeled_output.csv > ' + str(new_file_name) + 'mid')
print 'This excerpt is in the key of: ' + correct_key
print correct_romanNumerals
| true |
f845d1ad54cc6c868ed411f0d47b21ea5f6c5ac9 | Python | kunjur-shreesha/HackerEarth | /Basic-Programming/Palindromic String.py | UTF-8 | 119 | 3.640625 | 4 | [] | no_license | def rev(x):
return x[::-1]
str1=input()
str2=rev(str1)
if str1==str2:
print("YES")
else:
print("NO") | true |
2eff5caf1dc664376f158305b42adb1cbe4f2937 | Python | heiye1024/Django_Blog | /blog_run订单系统基本完成,版本6/system/forms.py | UTF-8 | 2,096 | 2.5625 | 3 | [] | no_license | import re
from django import forms
class LinkForm(forms.Form):
txtTitle = forms.CharField(label='网站名称',max_length=24,error_messages={
'required':'请输入网站名称'
})
txtUserName = forms.CharField(label='联系人姓名',max_length=6,error_messages={
'required':'请输入联系人姓名'
})
txtUserTel = forms.CharField(label='联系人电话',max_length=11,required=False,error_messages={
'max_length':'请输入11位数的手机号码'
})
txtEmail = forms.EmailField(label='邮箱地址',required=False,error_messages={
'invalid':'邮箱格式不对'
})
txtSiteUrl = forms.URLField(label='网站网址',error_messages={
'required':'请输入网站网址'
})
txaArticle = forms.CharField(label='网站描述',widget=forms.TextInput,error_messages={
'required':'请输入网站描述'
})
def clean_txtUserTel(self):
txtUserTel = self.cleaned_data.get('txtUserTel')
pattern = r'^1[3-9][0-9]{9}$'
if not re.search(pattern, txtUserTel):
raise forms.ValidationError('请输入正确的手机号码')
return txtUserTel
def clean_txaArticle(self):
txaArticle = self.cleaned_data.get('txaArticle')
if len(txaArticle) > 500:
raise forms.ValidationError('网站描述不得超过500个字')
return txaArticle
# 这个应该在forms.Model这个底下才有效
# def save(self):
# obj = super().save()
# txtTitle = self.cleaned_data.get('txtTitle')
# txtUserName = self.cleaned_data.get('txtUserName')
# txtUserTel = self.cleaned_data.get('txtUserTel')
# txtEmail = self.cleaned_data.get('txtEmail')
# txtSiteUrl = self.cleaned_data.get('txtSiteUrl')
# txaArticle = self.cleaned_data.get('txaArticle')
# obj.web_name = txtTitle
# obj.contact_man = txtUserName
# obj.phone = txtUserTel
# obj.email = txtEmail
# obj.web_link = txtSiteUrl
# obj.web_description = txaArticle
# obj.save()
| true |
b78b46437ef59bf050ddee2df7d37c5c1530ca39 | Python | hjorthjort/advent2020 | /day11/11.py | UTF-8 | 1,961 | 2.96875 | 3 | [] | no_license | from copy import deepcopy
with open('input.txt') as f:
inp = f.read()
arounds1 = {}
def around1(pos_x, pos_y, max_x, max_y):
if (pos_x, pos_y) in arounds1:
return arounds1[(pos_x, pos_y)]
positions = [(x, y) for x in range(pos_x-1, pos_x+2) for y in range(pos_y-1, pos_y+2) if max_x > x >= 0 and max_y > y >= 0 and not (x == pos_x and y == pos_y) ]
arounds1[(pos_x, pos_y)] = positions
return positions
def main(tolerance, around):
grid = inp.splitlines()
grid = [ [c for c in s] for s in grid]
new = deepcopy(grid)
unstable = True
while unstable:
unstable = False
for y in range(len(grid)):
for x in range(len(grid[y])):
a = around(x, y, len(grid[y]), len(grid))
curr = grid[y][x]
status = [grid[y][x] for (x,y) in a]
occ = status.count('#')
free = status.count('L')
if curr == '.':
pass
elif occ == 0 and curr == 'L':
new[y][x] = '#'
unstable = True
elif occ >= tolerance and curr == '#':
new[y][x] = 'L'
unstable = True
grid = new
new = deepcopy(grid)
tot = 0
for l in grid:
tot += l.count('#')
print(tot)
main(4, around1)
arounds2 = {}
ds = [(x, y) for x in range(-1, 2) for y in range(-1, 2) if not (x == 0 and y == 0)]
grid = inp.splitlines()
def around2(x, y, max_x, max_y):
if (x, y) in arounds2:
return arounds2[(x,y)]
positions = []
(cx, cy) = (x, y)
for (dx, dy) in ds:
(cx, cy) = (x + dx, y + dy)
while 0 <= cx < max_x and 0 <= cy < max_y:
if not grid[cy][cx] == '.':
positions.append((cx, cy))
break
cx += dx
cy += dy
arounds2[(x, y)] = positions
return positions
main(5, around2)
| true |
340bf6eaf2ff862f412a10f5e065252b53caca4c | Python | canberkaslan/pythonexamples | /example_sets_002/05_for_loops.py | UTF-8 | 809 | 3.40625 | 3 | [] | no_license | #names = ['ali','veli','murtaza']
#for x in names:
# print(f'my name is {x}')
#
#name = 'Cabbar Can'
#
#for x in name:
# print(x)
#
#tuple = [(1,2),(3,4),(5,6),(7,8)]
#for x in tuple:
# for y in x:
# print(y)
#tuple = [(1,2),(3,4),(5,6),(7,8)]
#for x,y in tuple:
# print(x,y)
#
#x = {'x1':1,'x2':2,'x3':3,'x4':4} # Dictionary
#for key,value in x.items():
# print(key)
# print(value)
urunler = [
{'name':'samsung s6','price':'3000'},
{'name':'samsung s7','price':'4000'},
{'name':'samsung s8','price':'5000'},
{'name':'samsung s9','price':'6000'},
{'name':'samsung s10','price':'7000'}
]
# Ürün fiyatlarının toplamını bulun
toplam = 0
for urun in urunler:
fiyat = int(urun['price'])
toplam += fiyat
print('toplam ürün fiyatı : ', toplam)
| true |
5ec34640c3926a265e028b3253ac8891abf85785 | Python | charan3/NoteBook | /models/BlogModel.py | UTF-8 | 1,581 | 2.59375 | 3 | [] | no_license | from google.appengine.ext import db
from datetime import datetime
from .UserModel import UserModel
import logging
class BlogModel(db.Model):
title = db.StringProperty(required=True) # type: str
content = db.TextProperty(required=True) # type: str
# username of writer
author = db.ReferenceProperty(UserModel, collection_name='posts') # type:UserModel
# to count the likes for the post and store the user_id in the list
likes = db.ListProperty(item_type=long, write_empty_list=True) # type: list
likescount = db.IntegerProperty(default=0)
created_time = db.DateTimeProperty(auto_now_add=True) # type:datetime
modified_time = db.DateTimeProperty(auto_now=True) # type:datetime
def get_content(self, truncate=False):
if truncate:
return ("\n".join(self.content.split('\n')[:10])).replace('\n', "<br/>")
return self.content.replace("\n", "<br/>")
def get_created_time(self):
return self.created_time.strftime("%b %d, %y at %I:%M %p")
def get_perma_link(self):
return "/blog/%s" % (self.key().id())
def get_author_name(self):
return "Anonymous" if not self.author else self.author.name
def get_modified_time(self):
return self.modified_time.strftime("%b %d, %y at %I:%M %p")
def get_comments_ordered(self):
# return comments ordered based on their created time
return self.comments.order('created_time')
def get_likes_count(self):
"""
:return: number of likes for the post
"""
return len(self.likes)
| true |
8cfb02f119fd628fcb16ed4d41dda260c352b0b0 | Python | raymondbutcher/pretf | /pretf/pretf/collections.py | UTF-8 | 3,170 | 2.765625 | 3 | [
"MIT"
] | permissive | from functools import wraps
from typing import Any, Callable, Generator, Iterable, Sequence, Union
from .parser import get_outputs_from_block
from .render import call_pretf_function, unwrap_yielded
from .variables import VariableStore, VariableValue, get_variable_definitions_from_block
class Collection(Iterable):
def __init__(
self, blocks: Sequence[Union[dict, "Collection"]], outputs: dict
) -> None:
self.__blocks = blocks
self.__outputs = outputs
def __getattr__(self, name: str) -> Any:
if name in self.__outputs:
return self.__outputs[name]
raise AttributeError(f"output not defined: {name}")
def __iter__(self) -> Generator[dict, Any, None]:
for block in self.__blocks:
if isinstance(block, Collection):
yield from block
else:
yield block
def collect(func: Callable) -> Callable:
"""
This is a decorator used to create a collection. Collections are similar
to Terraform modules except the resources are included in the root
module rather than under a named module.
Decorated functions should:
* Accept a single argument "var"
* Yield pretf.api.tf blocks
* Optionally including "variable" blocks to define inputs
* Optionally including "output" blocks to define outputs
When using a collection, any required inputs defined by variable blocks
must be passed in as keyword arguments. Any outputs defined by output
blocks can be accessed as attributes of the collection.
"""
@wraps(func)
def wrapped(**kwargs: dict) -> Collection:
# Create a store to track variables.
var_store = VariableStore()
# Load variable values from kwargs passed into the collection function.
for key, value in kwargs.items():
var_value = VariableValue(name=key, value=value, source="kwargs")
var_store.add(var_value)
# Call the collection function, passing in "path", "terraform" and "var" if required.
gen = call_pretf_function(func=func, var=var_store.proxy(func.__name__))
blocks = []
outputs = {}
yielded = None
while True:
try:
yielded = gen.send(yielded)
except StopIteration:
break
for block in unwrap_yielded(yielded):
# Use variable blocks to update the variable store.
var_def = None
for var_def in get_variable_definitions_from_block(
block, func.__name__
):
var_store.add(var_def)
# Use output blocks to update the output values.
output = None
for output in get_outputs_from_block(block):
name = output["name"]
value = output["value"]
outputs[name] = value
# Use any other blocks in the resulting JSON.
if not var_def and not output:
blocks.append(block)
return Collection(blocks, outputs)
return wrapped
| true |
35a9971e9ac361001bdb83255d50bc4977392f2c | Python | MacIver-Lab/Ergodic-Information-Harvesting | /SimulationCode/ErgodicHarvestingLib/ergodic.py | UTF-8 | 14,918 | 2.625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import numpy as np
from scipy.integrate import trapz, quad, solve_ivp
from scipy.interpolate import interp1d
from ErgodicHarvestingLib.utils import matmult
class ProjectionBasedOpt(object):
def __init__(self, nx, nu, R, time, Quinit):
"""
Class to represent an optimization problem for a system with dynamic constraints.
:nx dimension of state
:nu dimension of the control
"""
self.nx = nx # Dimension of State X
self.nu = nu # Dimension of Control U
self.mass = 1.0 # Mass of the Dynamics Model
self.Q = np.eye(self.nx)
self.R = R * np.eye(self.nu)
self.Quinit = Quinit # weight for the initial control
self.P1 = 1.0
self.Qn = 1.0
self.Rn = 1.0
self.Qk = 1.0
self.Rk = 1.0
self.time = time
odeDeltaT = time[1] - time[0]
self.odeParam = {
"method": "RK23",
"t_span": (time[0], time[-1]),
"rtol": 1e-4,
"atol": 1e-7,
"t_eval": time,
"min_step": odeDeltaT,
"first_step": odeDeltaT,
"max_step": odeDeltaT,
}
self.odeIntegrator = lambda fun, y0: solve_ivp(
fun, y0=y0, **self.odeParam
).y.flatten()
def peqns(self, t, pp, Al, Bl, Rn, Qn):
if t > self.time[-1] or t < self.time[0]:
return 0
pp = np.array(pp).flatten()
pp = pp.reshape(self.nx, self.nx)
matdiffeq = (
matmult(pp, Al(t)) + matmult(Al(t), pp) - matmult(pp, Bl(t), Bl(t), pp) + Qn
)
return matdiffeq.flatten()
def reqns(self, t, rr, Al, Bl, a, b, Psol, Rn, Qn):
if t > self.time[-1] or t < self.time[0]:
return np.array([0.0])
t = self.time[-1] - t
matdiffeq = (
matmult((Al(t) - matmult(Bl(t), Bl(t), Psol(t))), rr)
+ a(t)
- matmult(Psol(t), Bl(t), b(t))
)
return matdiffeq.flatten()
def veqns(self, zz, Al, Bl, a, b, Psol, Rsol, Rn, Qn):
vmatdiffeq = matmult(-Bl, Psol, zz) - matmult(Bl, Rsol) - b
return vmatdiffeq
def zeqns(self, t, zz, Al, Bl, a, b, Psol, Rsol, Rn, Qn):
if t > self.time[-1] or t < self.time[0]:
return 0
vmateq = self.veqns(zz, Al(t), Bl(t).T, a(t), b(t), Psol(t), Rsol(t), Rn, Qn)
matdiffeq = matmult(Al(t), zz) + matmult(Bl(t), vmateq)
return matdiffeq.flatten()
def Ksol(self, X, U):
time = self.time
P1 = np.array([1.0])
soln = self.odeIntegrator(
lambda t, y: self.peqns(
t, y, self.A_interp, self.B_interp, self.Rk, self.Qk
),
P1,
)
# Convert the list to a numpy array.
psoln = np.array(soln).reshape(len(soln), 1)
K = np.empty((time.shape[0], self.nx))
for tindex, t in np.ndenumerate(time):
K[tindex, :] = matmult(self.B_current[tindex, 0], psoln[tindex])
self.K = K
return K
def Psol(self, X, U, time):
P1 = np.array([1.0])
soln = self.odeIntegrator(
lambda t, y: self.peqns(
t, y, self.A_interp, self.B_interp, self.Rn, self.Qn
),
P1,
)
soln = np.array(soln).reshape(len(soln), 1)
return soln
def Rsol(self, X, U, P_interp, time):
rinit2 = np.array([0])
Qn = 1.0
Rn = 1.0
soln = self.odeIntegrator(
lambda t, y: self.reqns(
t,
y,
self.A_interp,
self.B_interp,
self.a_interp,
self.b_interp,
P_interp,
Rn,
Qn,
),
rinit2,
)
soln = np.array(soln)
soln = np.flip(soln, 0).reshape(len(soln), 1)
return soln
# pointwise dynamics linearizations
def fofx_pointwise(self, X, U):
return U
def fofx(self, t, X, U):
if t > self.time[-1] or t < self.time[0]:
return 0
return U(t)
def dfdx_pointwise(self, x, u):
return np.array([0])
def dfdx(self):
time = self.time
dfdxl = np.empty([time.shape[0], self.nx])
for tindex, _ in np.ndenumerate(time):
dfdxl[tindex, :] = np.array([0])
self.A_current = dfdxl
return dfdxl
def dfdu_pointwise(self, x, u):
return np.array([1])
def dfdu(self):
time = self.time
dfdul = np.empty([time.shape[0], self.nx])
for tindex, _ in np.ndenumerate(time):
dfdul[tindex, :] = np.array([1])
self.B_current = dfdul
return dfdul
def cost_pointwise(self, x, u):
R = self.R
return 0.5 * matmult(u, R, u)
def cost(self, X, U):
cost = np.empty(self.time.shape[0])
for tindex, _ in np.ndenumerate(self.time):
cost[tindex] = self.cost_pointwise(X[tindex], U[tindex])
return trapz(cost, self.time) # Integrate over time
def eval_cost(self):
# return the evaluated cost function
return self.cost(self.X_current, self.U_current)
def dldu_pointwise(self, x, u):
# return the pointwise linearized cost WRT state
return matmult(self.R, u)
def dldx_pointwise(self, x, u):
# return pointwise linearized cost WRT input
return np.array([0.0])
def dldx(self):
# evaluate linearized cost WRT state
X = self.X_current
U = self.U_current
time = self.time
dldxl = np.empty((time.shape[0], self.nx))
for tindex, _ in np.ndenumerate(time):
dldxl[tindex, :] = self.dldx_pointwise(X[tindex], U[tindex])
self.a_current = dldxl
return self.a_current
def dldu(self):
# evaluate linearized cost WRT input
X = self.X_current
U = self.U_current
time = self.time
dldul = np.empty((time.shape[0], 1))
for tindex, _ in np.ndenumerate(time):
dldul[tindex, :] = self.dldu_pointwise(X[tindex], U[tindex])
dldul[0, :] += self.uinit * self.Quinit # initial control
self.b_current = dldul
return dldul
def dcost(self, descdir):
# evaluate directional derivative
dX = descdir[0]
dU = descdir[1]
time = self.time
dc = np.empty(time.shape[0])
for tindex, _ in np.ndenumerate(time):
dc[tindex] = matmult(self.a_current[tindex], dX[tindex]) + matmult(
self.b_current[tindex], dU[tindex]
)
intdcost = trapz(dc, time)
return intdcost
def descentdirection(self):
# solve for the descent direction by
X = self.X_current
U = self.U_current
time = self.time
Ps = self.Psol(X, U, time)
self.P_current = Ps
P_interp = interp1d(time, Ps.T)
Rs = self.Rsol(X, U, P_interp, time).flatten()
self.R_current = Rs
r_interp = interp1d(time, Rs.T)
zinit = -matmult(P_interp(0) ** -1, r_interp(0))
soln = self.odeIntegrator(
lambda t, y: self.zeqns(
t,
y,
self.A_interp,
self.B_interp,
self.a_interp,
self.b_interp,
P_interp,
r_interp,
self.Rn,
self.Qn,
),
zinit,
)
# Convert the list to a numpy array.
zsoln = np.array(soln)
zsoln = zsoln.reshape(time.shape[0], 1)
vsoln = np.empty(U.shape)
for tindex, t in np.ndenumerate(time):
vsoln[tindex] = self.veqns(
zsoln[tindex],
self.A_current[tindex],
self.B_current[tindex],
self.a_current[tindex],
self.b_current[tindex],
Ps[tindex],
Rs[tindex],
self.Rn,
self.Qn,
)
return [zsoln, vsoln]
def simulate(self, X0, U):
time = self.time
U_interp = interp1d(time, U.T)
# Solve ODE
soln = self.odeIntegrator(lambda t, y: self.fofx(t, y, U_interp), X0)
# Convert the list to a numpy array.
xsoln = np.array(soln).reshape(len(soln), 1)
return xsoln
def proj(self, t, X, K, mu, alpha):
if type(X) is float:
X = np.array(X)
if t > self.time[-1] or t < self.time[0]:
return 0
uloc = mu(t) + matmult(K(t), (alpha(t).T - X.T))
return uloc
def projcontrol(self, X, K, mu, alpha):
uloc = mu + matmult(K, (alpha.T - X.T))
return uloc
def project(self, X0, traj):
time = self.time
alpha = traj[0]
mu = traj[1]
# solve for riccatti gain
Ks = self.Ksol(alpha, mu)
K_interp = interp1d(time, Ks.T)
mu_interp = interp1d(time, mu.T)
alpha_interp = interp1d(time, alpha.T)
# Solve ODE
soln = self.odeIntegrator(
lambda t, y: self.proj(t, y, K_interp, mu_interp, alpha_interp), X0
)
# Convert the list to a numpy array.
xsoln = np.array(soln).reshape(len(soln), 1)
usoln = np.empty(mu.shape)
for tindex, _ in np.ndenumerate(time):
usoln[tindex, :] = self.projcontrol(
xsoln[tindex], Ks[tindex], mu[tindex], alpha[tindex]
)
return np.array([xsoln, usoln])
def update_traj(self, X, U):
self.X_current = X
self.U_current = U
self.dfdx()
self.dfdu()
self.dldx()
self.dldu()
self.A_interp = interp1d(self.time, self.A_current.T)
self.B_interp = interp1d(self.time, self.B_current.T)
self.a_interp = interp1d(self.time, self.a_current.T)
self.b_interp = interp1d(self.time, self.b_current.T)
class ErgodicOpt(ProjectionBasedOpt):
def __init__(self, nx, nu, ergParam, uinit):
super().__init__(
nx, nu, R=ergParam.wControl, time=ergParam.time, Quinit=ergParam.wInitCtrl
)
self.barrcost = ergParam.wBarrCost
self.ergcost = ergParam.wErgCost
self.Nfourier = ergParam.nFourier
self.uinit = uinit
self.dimw = 1 # workspace dimension
self.wlimit = 1.0
self.tRes = ergParam.tRes # Time Resolution
self.res = ergParam.res # EID spatial resolution
self.eidTime = ergParam.eidTime
self.xlist = np.linspace(0.0, 1.0, self.tRes)
# set up a grid over the frequency
klist = np.arange(self.Nfourier)
# do some ergodic stuff
s = (float(self.dimw) + 1.0) / 2.0
self.Lambdak = 1.0 / (1.0 + klist ** 2) ** s
self.klist = klist / self.wlimit * np.pi
self.hk = np.zeros(self.Nfourier).flatten()
for index in range(self.Nfourier):
integ = quad(lambda x: (np.cos(x * self.klist[index])) ** 2, 0.0, 1.0)
self.hk[index] = np.sqrt(integ[0])
def normalize_pdf(self):
# function to normalize a pdf
self.pdf /= np.sum(self.pdf) / np.product(self.pdf.shape)
def set_pdf(self, pdf):
# input pdf
pdfInterp = interp1d(self.eidTime, pdf)
self.pdf = pdfInterp(self.xlist)
self.normalize_pdf()
self.calculate_uk(self.pdf)
pass
def calculate_ergodicity(self):
# evaluate the ergodic metric (ck, uk, need to be calculated already)
self.erg = np.sum(self.Lambdak * (self.ck - self.uk) ** 2)
return self.erg
def barrier(self, xk):
barr_cost = np.zeros(xk.shape[0])
xk = xk.flatten()
too_big = xk[np.where(xk > self.wlimit)]
barr_cost[np.where(xk > self.wlimit)] = np.square(too_big - self.wlimit)
too_small = xk[np.where(xk < 0)]
barr_cost[np.where(xk < 0)] += np.square(too_small)
barr_cost = trapz(barr_cost, self.time)
return barr_cost
def Dbarrier(self, xk):
xk = xk.flatten()
dbarr_cost = np.zeros(xk.shape).reshape(xk.size, 1)
too_big = xk[np.where(xk > self.wlimit)]
dbarr_cost[np.where(xk > self.wlimit), 0] = 2.0 * (too_big - self.wlimit)
too_small = xk[np.where(xk < 0)]
dbarr_cost[np.where(xk < 0), 0] = 2.0 * too_small
return dbarr_cost
def calculate_uk(self, pdf):
# calculate Fourier coefficients of the distribution
self.uk = np.zeros(self.Nfourier).flatten()
for index in range(len(self.uk)):
uk_interior = pdf / self.hk[index]
basis_part = np.cos(self.klist[index] * self.xlist)
uk_interior *= self.wlimit / self.res * basis_part
self.uk[index] = np.sum(uk_interior)
def ckeval(self):
X = self.X_current
time = self.time
T = time[-1]
# change coordinates from configuration to ergodic workspace
W = X.flatten()
self.ck = np.zeros(self.Nfourier).flatten()
for index in range(len(self.ck)):
ck_interior = 1.0 / (self.hk[index] * T)
basis_part = np.cos(self.klist[index] * W)
ck_interior = ck_interior * basis_part
self.ck[index] = trapz(ck_interior, time)
def akeval(self):
X = self.X_current
time = self.time
T = time[-1]
xlist = X.flatten()
outerchain = 2.0 * self.Lambdak * (self.ck - self.uk) / (self.hk * T)
ak = []
for index in range(self.Nfourier):
# these are chain rule terms, get added
term = outerchain[index]
basis_part = -self.klist[index] * np.sin(self.klist[index] * xlist)
term *= basis_part
ak.append(term)
summed_ak = np.sum(np.array(ak), axis=0)
self.ak = np.array(summed_ak).reshape(summed_ak.size, 1)
return self.ak
def evalcost(self):
cost = self.cost(self.X_current, self.U_current)
barr_cost = self.barrcost * self.barrier(self.X_current)
erg_cost = self.ergcost * self.calculate_ergodicity()
J = barr_cost + erg_cost + cost
return J
def dldx(self):
X = self.X_current
self.a_current = self.ergcost * self.ak + self.barrcost * self.Dbarrier(X)
return self.a_current
def update_traj(self, X, U):
self.X_current = X
self.U_current = U
self.ckeval()
self.akeval()
self.dfdx()
self.dfdu()
self.dldx()
self.dldu()
self.A_interp = interp1d(self.time, self.A_current.T)
self.B_interp = interp1d(self.time, self.B_current.T)
self.a_interp = interp1d(self.time, self.a_current.T)
self.b_interp = interp1d(self.time, self.b_current.T)
| true |
243ba7f7644d3f65c9644a2077f42b3321e07fa3 | Python | LucasSQPardo/coursera-Python_Data_Structure | /python_data_structure/open_read_write_file/processing_files.py | UTF-8 | 1,051 | 3.8125 | 4 | [] | no_license | def lengthOfFile(fileVectorVar):
length = len(fileVectorVar)
return length
def foundSomething(fileVariable):
searchTerm = input("Keyword: ")
for lines in fileVariable:
if searchTerm in lines:
print(lines.strip())
return
def readIt(fileVariable):
for line in fileVariable:
print(line)
return
def main():
fileName = input('Put the name of the file you want to open: ')
if '.' not in fileName:
print('since you did not specified wich extension you want to open, the .txt will be added for default')
fileName = fileName+'.txt'
try:
fHand = open(fileName)
except:
print(f'there was no file: {fileName} ')
quit()
#readIt(fHand) #call the function that will print it as strings
#fileVectorVar = fHand.read() #this is a single vector with all the characters of the file, pretty usefull to know the length of the file or to print certain numbers of characters
foundSomething(fHand)
if __name__ == "__main__":
main()
| true |
4e34cdcd7e5670b6d28090f6f6a0b30367ed7004 | Python | mukutkhandelwal/Face-Detection-Using-Deep-learning | /video_detection.py | UTF-8 | 2,276 | 2.734375 | 3 | [] | no_license | # required libraries
import numpy as np
import argparse
import imutils
import time
import cv2
# creating the command line argument for passing the image,model,weights of model and optional Confidence
ap = argparse.ArgumentParser()
# ap.add_argument('-i','--image',required=True,help = 'path of img')
ap.add_argument('-p','--prototxt',required=True,help = 'path to prototxt file ')
ap.add_argument('-m','--model',required=True,help='path to model')
ap.add_argument('-c','--confidence',type = float,default= 0.5,help='min filter')
args = vars(ap.parse_args())
# loading the model and weights
print("[info] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"],args["model"])
# assigning the video cam to the vs
print("[info] starting video stream")
vs= cv2.VideoCapture(0)
time.sleep(2)
# looping ove the frame
while True:
# taking the videostream into frame and resizing it into the frame of 400 X 400
_,frame = vs.read()
frame = imutils.resize(frame,400)
(h,w) = frame.shape[:2]
# taking the frame size and converting into blob object
blob = cv2.dnn.blobFromImage(cv2.resize(frame,(300,300)),1.0,(300,300),(104.0,177.0,123.0))
# passing the blob to the model and predecting the face
net.setInput(blob)
detections = net.forward()
# making the sequare box and writing the probability of the face detected in the video stream
for i in range(0,detections.shape[2]):
confidence = detections[0,0,i,2]
if confidence< args['confidence']:
continue
box = detections[0,0,i,3:7] * np.array([w,h,w,h])
(startX,startY,endX,endY) = box.astype("int")
text = "{:.2f}%".format(confidence*100)
y = startY-10 if startY-10>10 else startY +10
# drawing the rectangle
cv2.rectangle(frame,(startX,startY),(endX,endY),(0,0,255),2)
# writing the probablity
cv2.putText(frame,text,(startX,y),cv2.FONT_HERSHEY_SIMPLEX,0.45,(0,0,255),2)
cv2.imshow("Frame",frame)
key = cv2.waitKey(1) & 0xFF
# press "q" to quit from the video frame
if key ==ord("q"):
break
# releasing the video and destroying the video
vs.release()
cv2.destroyAllWindows()
# vs.()
| true |
d9c743fa60434ce1b11e7488da5c26364866cd18 | Python | Irou1/Bridge-Application | /Logic/New_Game.py | UTF-8 | 9,482 | 2.546875 | 3 | [
"MIT"
] | permissive | from tkinter import *
import tkinter as tk
import random
root = tk.Tk()
root.geometry("800x800")
canvas = tk.Canvas(root,width=800,height=800)
canvas.pack()
#Heart
h1 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart1.gif')
h2 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart2.gif')
h3 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart3.gif')
h4 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart4.gif')
h5 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart5.gif')
h6 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart6.gif')
h7 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart7.gif')
h8 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart8.gif')
h9 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart9.gif')
h10 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart10.gif')
h11 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart11.gif')
h12 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart12.gif')
h13 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Heart13.gif')
#Diamond
d1 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond1.gif')
d2 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond2.gif')
d3 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond3.gif')
d4 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond4.gif')
d5 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond5.gif')
d6 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond6.gif')
d7 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond7.gif')
d8 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond8.gif')
d9 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond9.gif')
d10 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond10.gif')
d11 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond11.gif')
d12 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond12.gif')
d13 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Diamond13.gif')
#Club
c1 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club1.gif')
c2 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club2.gif')
c3 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club3.gif')
c4 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club4.gif')
c5 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club5.gif')
c6 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club6.gif')
c7 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club7.gif')
c8 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club8.gif')
c9 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club9.gif')
c10 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club10.gif')
c11 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club11.gif')
c12 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club12.gif')
c13 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Club13.gif')
#Spades
s1 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades1.gif')
s2 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades2.gif')
s3 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades3.gif')
s4 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades4.gif')
s5 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades5.gif')
s6 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades6.gif')
s7 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades7.gif')
s8 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades8.gif')
s9 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades9.gif')
s10 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades10.gif')
s11 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades11.gif')
s12 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades12.gif')
s13 = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\Spades13.gif')
#suit
heart = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\heart.gif')
spades = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\spades.gif')
club = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\club.gif')
diamond = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\diamond.gif')
nt = tk.PhotoImage(file='C:\\Users\\JORGEALEJANDRO\\OneDrive\\Python_Tkinter\\deck\\nt.gif')
#here cards are created and shuffled
ranks = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
suits = ['c', 'd', 'h', 's']
cards = [[rank, suit] for rank in ranks for suit in suits]
random.shuffle(cards)
player1 = []
player2 = []
player3 = []
player4 = []
#here cards are dealt to each player
for bycard in range(52):
if bycard % 4 == 0:
player1.append(cards[bycard])
if bycard % 4 == 1:
player2.append(cards[bycard])
if bycard % 4 == 2:
player3.append(cards[bycard])
if bycard % 4 == 3:
player4.append(cards[bycard])
#Each player hand is sorted
player1.sort(key=lambda row: (row[1],row[0]))
player2.sort(key=lambda row: (row[1],row[0]))
player3.sort(key=lambda row: (row[1],row[0]))
player4.sort(key=lambda row: (row[1],row[0]))
def getImage(ls):
return ls[1] + str(ls[0])
#player1 hand is diaplay
card1 = canvas.create_image(280, 700, image=eval(getImage(player1[0])))
card2 = canvas.create_image(300, 700, image=eval(getImage(player1[1])))
card3 = canvas.create_image(320, 700, image=eval(getImage(player1[2])))
card4 = canvas.create_image(340, 700, image=eval(getImage(player1[3])))
card5 = canvas.create_image(360, 700, image=eval(getImage(player1[4])))
card6 = canvas.create_image(380, 700, image=eval(getImage(player1[5])))
card7 = canvas.create_image(400, 700, image=eval(getImage(player1[6])))
card8 = canvas.create_image(420, 700, image=eval(getImage(player1[7])))
card9 = canvas.create_image(440, 700, image=eval(getImage(player1[8])))
card10 = canvas.create_image(460, 700, image=eval(getImage(player1[9])))
card11 = canvas.create_image(480, 700, image=eval(getImage(player1[10])))
card12 = canvas.create_image(500, 700, image=eval(getImage(player1[11])))
card13 = canvas.create_image(520, 700, image=eval(getImage(player1[12])))
#class for bidding table
class Popout(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent, background="black", padx=10, pady=10)
title = tk.Label(self, text="Bidding Table", font=("Helvetica", 16),
background="black", foreground="white")
double_btn = tk.Button(self, text="Double", background="black", foreground="white")
pass_btn = tk.Button(self, text="Next", background="black", foreground="white")
close_btn = tk.Button(self, text="Close", background="black", foreground="white")
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=1)
title.grid(row=0, column=0, columnspan=6, sticky="nsew")
for a in range(7):
tk.Label(self, text=str(a+1), font=("Helvetica", 12),
background="black", foreground="white").grid(row=(a+1), column=0)
tk.Button(self, image=club).grid(row=(a+1), column=(1))
tk.Button(self, image=diamond).grid(row=(a+1), column=(2))
tk.Button(self, image=heart).grid(row=(a+1), column=(3))
tk.Button(self, image=club).grid(row=(a+1), column=(4))
tk.Button(self, image=nt).grid(row=(a+1), column=(5))
double_btn.grid(row=8, column=0, columnspan=2)#sticky="ew")
pass_btn.grid(row=8, column=2, columnspan=2)#sticky="ew")
close_btn.grid(row=8, column=4, columnspan=2)#sticky="ew", padx=10)
p = Popout(root)
p.place(relx=.5, rely=.5, anchor="center")
def click(event):
if canvas.find_withtag(CURRENT):
canvas.coords(CURRENT, 400, 400)
canvas.bind('<Button-1>', click)
root.mainloop()
| true |
8b11962ac004d8a745989786a8297508660a6d1e | Python | SrtaCamelo/TextMining2018.2 | /Mineracao_L02/1qst_l02.py | UTF-8 | 3,413 | 3.1875 | 3 | [] | no_license | #Mineração de Texto 2018.2
#Raissa Camelo Salhab
#Lista 02, Questão 01
#--------------------------------------Word Clouds with NLTK-----------------------------------------------------
#-------------Imports-----------
import nltk
from nltk.stem import WordNetLemmatizer
#from nltk.corpus import stopwords
from nltk.tree import Tree
import nltk.parse.api
from pycorenlp import StanfordCoreNLP
#---------------------------------------Itens 1 & 2 & 3--------------------------------------------------------------
#------------Function Definitions---------
#----------File Opener----------------
def openFile(path):
f = open(path, 'r+')
my_file_data = f.read()
f.close()
return my_file_data
#---------StopWords Definition--------
def extractStopWords(data,stopWords):
stopWords = nltk.word_tokenize(stopWords)
filteredWords = []
for word in data:
if word not in stopWords:
filteredWords.append(word)
return filteredWords
#-------Open / Read File-------- #Switch file names for different quest itens
path = 'C:/Users/Familia Camelo/Documents/Raissa Camelo/Mineracao_rinaldo/AULA_03/LE_02/NLP.txt'
path2 = 'C:/Users/Familia Camelo/Documents/Raissa Camelo/Mineracao_rinaldo/AULA_03/LE_02/Corpus_en_NER.txt'
pathStopWords = 'C:/Users/Familia Camelo/Documents/Raissa Camelo/Mineracao_rinaldo/AULA_03/LE_02/stopwords.txt'
#--------Call the file----------- #Choose the item path and comment others
my_file_data = openFile(path2) #Switch between path and path2 for different itens
stopWords = openFile(pathStopWords)#Only use for StopWords in item 3
#----Tokenize to fetch words--------------
tokens = nltk.word_tokenize(my_file_data)
#----------------------------------------
#------Filter StopWords----------- #Only use this stage for item 3, comment otherwise
tokens = extractStopWords(tokens,stopWords)
#--------------Data----------------------
noun_tags = ['NN','NNP','NNS','NNPS']
verb_tags = ["VB","VBD","VBG","VBN","VBP","VBZ"]
#----------------------------------------
#----POS TAG words to get noums--------------
nouns = []
unic_nouns = []
nouns_frequency = []
pos_tagged = nltk.pos_tag(tokens)
#-----------Isolate Nouns or Verbs------------
for word,tag in pos_tagged:
if tag in verb_tags: #Switch between noun_tags and verb_tags for both questions
nouns.append(word)
#------------------------------------
#----Fetch 20 most frquent noums-----
freq = nltk.FreqDist(w.lower() for w in nouns)
for i in freq.most_common(20):
unic_nouns.append(i[0])
nouns_frequency.append(i[1])
#-----Lemmanizing Words------------
lemmas = []
wordnet_lemmatizer = WordNetLemmatizer()
for word in unic_nouns:
lemma = wordnet_lemmatizer.lemmatize(word,)
lemmas.append(lemma)
#-----------------------------------
#--------Word Cloud----------------- #Print to generate Word Clouds
""""
for i in range(len(lemmas)):
print(str(nouns_frequency[i])+ " " + lemmas[i])
"""
#-------------------------------------Item 4-----------------------------------------------------------
sentence = "The last love letter I wrote was probably about 10 years ago."
#tokenized = nltk.word_tokenize(sentence)
parse = StanfordCoreNLP('http://localhost:9000')
output = parse.annotate(sentence, properties={
'annotators': 'parse',
'outputFormat': 'json'
})
tree1 = output['sentences'][0]['parse'] + ""
treeFinal = Tree.fromstring(tree1)
treeFinal.draw()
#t = Tree.
#t.draw()
| true |
680b19634072384d70df43227e0a7585193c2ce0 | Python | Leozoka/ProjetosGH | /002.py | UTF-8 | 48 | 2.78125 | 3 | [] | no_license | msg = ('Python ')
msg = msg.rstrip()
print(msg)
| true |
968548f55263fe86b872dd7f7636e2de06d22ae1 | Python | benjiaming/leetcode | /test_group_anagrams.py | UTF-8 | 550 | 3.203125 | 3 | [] | no_license | import unittest
from group_anagrams import Solution
class TestSolution(unittest.TestCase):
def test_group_anagrams(self):
solution = Solution()
anagrams = [
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
result = solution.groupAnagrams(
["eat", "tea", "tan", "ate", "nat", "bat"]
)
self.assertEqual(
sorted([sorted(r) for r in result]),
sorted(anagrams)
)
if __name__ == '__main__':
unittest.main() | true |
c776d7c4104055f1b39004529e27f7c40173781a | Python | arpitsomani8/Python-Programming-Projects | /Image Processing-Enhance Your Image/Flipping the image/Flipping_the_image.py | UTF-8 | 294 | 3.21875 | 3 | [] | no_license | """
@author: Arpit Somani
"""
#flipping the image
from PIL import Image
#opening the image
img=Image.open("obtained.png")
#transposing
transposed_img=img.transpose(Image.FLIP_LEFT_RIGHT)
#SAVE IT TO A FILE IN A HUMAN UNDERSTANDABLE FORMET
transposed_img.save("corrected.png")
print("Done Flipping")
| true |
7de41f23fc40801f58c6a8b0921521ad9c630a4e | Python | KKP127/pythonexample | /ex.py | UTF-8 | 373 | 4.59375 | 5 | [] | no_license | # WARNING! We put a end=' ' at the end of each print line. This tells print to not end
# the line with a newline character and go to the next line
print("How old are you?:",end=' ')
age=input()
print("How tall are you?:",end=' ')
tall=input()
print("How much do you weight?:",end=' ')
weight=input()
print(f"So you are {age} year old,{tall} tall and {weight} weight")
| true |
78c48930f2523275956a8172ddfe29c3975c9b2d | Python | emmanuelgonzalezcota/PythonCourse | /3.1.2.8 Loops LAB continue Ugly Vowel Eater.py | UTF-8 | 142 | 3.28125 | 3 | [] | no_license | # Prompt the user to enter a word
# and assign it to the userWord variable.
for letter in userWord:
# Complete the body of the for loop.r | true |
753fc7c3490e097832ca657549d9c080ef47608c | Python | violasignorile01/sqlalchemy-challenge | /app.py | UTF-8 | 4,452 | 2.59375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
# create weather app
app = Flask(__name__)
latest_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
latest_date = list(np.ravel(latest_date))[0]
latest_date = dt.datetime.strptime(latest_date, "%Y-%m-%d")
latest_year = int(dt.datetime.strftime(latest_date, "%Y"))
latest_month = int(dt.datetime.strftime(latest_date, "%m"))
latest_day = int(dt.datetime.strftime(latest_date, "%d"))
year_before = dt.date(latest_year, latest_month, latest_day) - dt.timedelta(days=365)
year_before = dt.datetime.strftime(year_before, "%Y-%m-%d")
@app.route("/")
def home():
return (
f"Welcome to Surf's Up API - Hawaii<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/stations ~~~~~ a list of all weather observation stations<br/>"
f"/api/v1.0/precipitaton ~~ the latest year of precipitation data<br/>"
f"/api/v1.0/tobs ~~ the latest year of temperature data<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"/api/v1.0/2013-06-08 ~~ low, high, and average temp for dates given and each date after<br/>"
f"/api/v1.0/2013-06-08/2014-06-08 ~~ low, high, and average temp for dates given and each date up to and including end date<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"~ data available from 2010-01-01 to 2017-08-23 ~<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
)
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.name).all()
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@app.route("/api/v1.0/precipitaton")
def precipitation():
results = (
session.query(Measurement.date, Measurement.prcp, Measurement.station)
.filter(Measurement.date > year_before)
.order_by(Measurement.date)
.all()
)
precip_data = []
for result in results:
precip_dict = {result.date: result.prcp, "Station": result.station}
precip_data.append(precip_dict)
return jsonify(precip_data)
@app.route("/api/v1.0/tobs")
def tobs():
results = (
session.query(Measurement.date, Measurement.tobs, Measurement.station)
.filter(Measurement.date > year_before)
.order_by(Measurement.date)
.all()
)
temp_data = []
for result in results:
temp_dict = {result.date: result.tobs, "Station": result.station}
temp_data.append(temp_dict)
return jsonify(temp_data)
@app.route("/api/v1.0/<start>")
def start(start):
sel = [
Measurement.date,
func.min(Measurement.tobs),
func.avg(Measurement.tobs),
func.max(Measurement.tobs),
]
results = (
session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date) >= start)
.group_by(Measurement.date)
.all()
)
dates = []
for result in results:
date_dict = {}
date_dict["Date"] = result[0]
date_dict["Low Temp"] = result[1]
date_dict["Avg Temp"] = result[2]
date_dict["High Temp"] = result[3]
dates.append(date_dict)
return jsonify(dates)
@app.route("/api/v1.0/<start>/<end>")
def startEnd(start, end):
sel = [
Measurement.date,
func.min(Measurement.tobs),
func.avg(Measurement.tobs),
func.max(Measurement.tobs),
]
results = (
session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date) >= start)
.filter(func.strftime("%Y-%m-%d", Measurement.date) <= end)
.group_by(Measurement.date)
.all()
)
dates = []
for result in results:
date_dict = {}
date_dict["Date"] = result[0]
date_dict["Low Temp"] = result[1]
date_dict["Avg Temp"] = result[2]
date_dict["High Temp"] = result[3]
dates.append(date_dict)
return jsonify(dates)
if __name__ == "__main__":
app.run(debug=True)
| true |
57679ec31088d9e30a9381d3e51d9d4aebf9384e | Python | PHI-base/phi-nets | /Python-code/resolving_overlapping_domains.py | UTF-8 | 7,885 | 2.75 | 3 | [
"MIT"
] | permissive | #************ THE PROGRAM TO RESOLVE OVERLAPPING BETWEEN DOMAINS IN PROTEINS *************
# Author: Elzbieta Janowska-Sedja, 16/06/2019
# As input file hmmer file with domain signatures is used.
# The format of the file is described below
#*******************************************************************************************
# The general rules for solving overlapping problem were adopted from previous study (Seidl et al., 2011)
#*******************************************************************************************
#!/usr/bin/python
import sys
#*********WORKING DIRECTORY*******************************************************************
workDir1 = "your_working_directory"
#************** INPUT FILE *******************************************************************
file1 = "%s/your_input_file_with_hmmer_output.txt" % (workDir1)
#
# hmmer output is a tab-delimited file wih a following format including 9 columns:
# protein_id protein_description pfam_domain_id pfam_domain_description start_domain_coordinates end_domain_coordinates score e-value -log10(e-value)
#**********************************************************************************************
#************** OUTPUT FILES ******************************************************************
file2 = "%s/output_file_with_overlapping_domains_to_be_resolved_manualy_based_on_the_sore_in_matrix_included.txt" % (workDir1)
#file3 includes all proteins even the one where manual solving of overlapping is required. Thus, once resolve the manual overlapping, the domains for protein not resolved automaticaly (proteins from output file2) need to be updated in file3
file3 = "%s/output_for_all_domains_per_protein.txt" % (workDir1)
#**********************************************************************************************
repDom = dict()
fh1 = open (file1, "r")
fh2 = open (file2, "a")
fh3 = open (file3, "w")
t = tuple()
set1=set()
set2= set()
set3 = set()
#********************************************************************************************
class domainInstance:
def __init__(self, did, descr, start, end, evalue):
self.domainId = did
self.domainDescr = descr
self.start = float(start)
self.end = float(end)
self.length = float(end) - float(start)
self.evalue = float(evalue)
self.score = float(score)
def __str__(self):
return self.domainId
#********************************************************************************************
def findOverlappingGroups(group, value):
ovearlappingGroups = list()
for dom, rules in group.items():
print dom.domainId, rules
fh2.write("\n%s\t%s\n" %(dom.domainId, rules))
if 0 in rules:
processed = False
for ovearlappingGroup in ovearlappingGroups:
if ovearlappingGroup.__contains__(dom):
processed = True
break
if processed == True:
continue
ovearlappingGroup = depthFirstSeardch(dom, group, value, list())
ovearlappingGroups.append(ovearlappingGroup)
return ovearlappingGroups
#*********************************************************************************************
def depthFirstSeardch(dom, group, value, result):
if result.__contains__(dom):
return result
result.append(dom)
rules = group[dom]
for i in range (0, len(rules)):
if rules[i] != -1:
depthFirstSeardch(value[i], group, value, result)
return result
#*********************************************************************************************
def start_compare(a, b):
if a.start == b.start :
return 0
elif a.start < b.start:
return -1
return 1
#**********************************************************************************************
def resolveOverlap(group, value):
remove = []
removeZ = []
for dom, rules in group.items():
if 0 not in rules:
for z in range(0, len(rules)):
if rules[z] == 1:
remove.append(value[z])
removeZ.append(z)
removeZ = list(set(removeZ))
removeZ.sort()
removeZ.reverse()
for r in remove:
try:
del group[r]
except KeyError:
pass
nextPass = False
for z in removeZ:
value.pop(z)
for dom, rules in group.items():
for z in removeZ:
rules.pop(z)
if 0 in rules:
nextPass = True
retain = [x for x in value if x not in remove]
if nextPass == True:
if len(remove) == 0:
print " found a loop",key
fh2.write("\n&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
fh2.write("\nfound a loop %s\n" %key)
fh2.write("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n")
for d in retain:
print d.domainId,
fh2.write("%s\t" %d.domainId)
fh2.write(" \n")
print " "
ovearlappingGroups = findOverlappingGroups(group, value)
for ovearlappingGroup in ovearlappingGroups:
print key, " ",
#fh2.write("\n*****\n")
#fh2.write("%s\t\n" %key)
#for d in ovearlappingGroup:
#print d.domainId,
#fh2.write("\t%s" %d.domainId)
print ""
for i in range (1, len(ovearlappingGroup)):
retain.remove(ovearlappingGroup[i])
return retain
else:
retain = resolveOverlap(group, retain)
#fh3.write("\n+++++%s\n" %group)
return retain
def applyRules(l,k):
if (l.end <= k.end):
if l.end < k.start:
return -1
else:
overlap = l.end - k.start
if overlap/l.length < 0.1 and overlap/k.length < 0.1:
return -1
if (k.end <= l.end):
if k.end < l.start:
return -1
else:
overlap = k.end - l.start
#print overlap, overlap/l.length, overlap/k.length
if overlap/l.length < 0.1 and overlap/k.length < 0.1:
return -1
#evalue rule
if abs(l.evalue - k.evalue) > 5:
if l.evalue > k.evalue:
return 1
else:
return 0
#lenght rule
if l.length != k.length:
if l.length > k.length:
return 1
else:
return 0
#score rule
if l.length == k.length:
if l.score > k.score:
return 1
else:
return 0
return 1
#************************************************************************************************
#******************************MAIN PROGRAM******************************************************
#************************************************************************************************
for line in fh1.readlines():
line= line.split('\t')
fgId = line[0]#first column in the input file is FG id
pfamAcc = line[2]#third column in the input file is pfam accession number
pfamDescr = line[3]
start = int(line[4])#fith column in the input file is a start coordinate of the domain
end = int(line[5])#sixth column in the input file is a end coordinate of the domain
if start > end:
temp = start
start = end
end = temp
eValue= float(line[8].replace('\n',''))#column 9 in the input file is -log10(e-value)
score = float(line[6])#column 7 in the input file is a score
t = domainInstance(pfamAcc,pfamDescr, start, end, eValue) # t is an instance of domainInstance class
repDom.setdefault(fgId, list())#assigning key and value to dictionary repDom where value is a list
repDom[fgId].append(t)#filling the value(list)with list of domains
c = 0
for key, value in repDom.items():
if len(value) >= 1:
group = dict()
for i in range (0, len(value)):
rules = list()
group.setdefault(value[i], rules)
for j in range (0, len(value)):
if value[i] == value [j]:
rules.append(-1)
else:
rules.append(applyRules(value[i], value[j]))
retain = resolveOverlap(group, value)
result = list()
retain = sorted(retain, cmp=start_compare)
for d in retain:
result.append(d.domainId)
#c = c+1
#print c, key, result
#fh3.write("%s\t%s\t%s\n" %(c, key, result))
fh3.write("%s\t%s\t%s\t%s\t%s\n" %(key, d.domainId, d.domainDescr, d.start, d.end))
fh1.close()
fh2.close()
fh3.close()
#*****************************************************************************************
| true |
a4281863b19454d9a1d138063350f1e4c131b32f | Python | Keramas/RPG_Battle | /main.py | UTF-8 | 9,630 | 2.84375 | 3 | [] | no_license | from classes.game import Person, bcolors
from classes.magic import Spell
from classes.inventory import Item
import random
# Spells usable by the player
# Offensive spells:
staticBurst = Spell("Static Burst", 25, 600, "black")
gravitonCannon = Spell("Graviton Cannon", 250, 600, "black")
bash = Spell("Bash", 25, 600, "black")
electromagneticWave = Spell("Electromagnetic Wave", 40, 1200, "black")
systemOverload = Spell("System Overload", 14, 140, "black")
# Healing spells:
refresh = Spell("Refresh", 25, 620, "white")
refresh2 = Spell("Refresh II", 32, 1500, "white")
refresh3 = Spell("Refresh III", 50, 6000, "white")
# Spells usable by enemies
# Offensive spells:
sixShot = Spell("Static Burst", 25, 600, "black")
quickDraw = Spell("Graviton Cannon", 250, 600, "black")
chargedShot = Spell("Bash", 25, 600, "black")
dustEye = Spell("Electromagnetic Wave", 40, 1200, "black")
hackTech = Spell("System Overload", 14, 140, "black")
# Create items
potion = Item("Tonic", "potion", "Heals 50 HP", 50)
hipotion = Item("Hi-tonic", "potion", "Heals 100 HP", 100)
superpotion = Item("Super Tonic", "potion", "Heals 1000 HP", 1000)
elixir = Item("Nano Lubricant", "elixir", "Fully restores HP/MP of one party member", 9999)
megaelixir = Item("Super Nano Lubricant", "elixer", "Fully restores party's HP/MP", 9999)
grenade = Item("Thermobomb", "attack", "Deals 500 damage", 500)
player_spells = [staticBurst, gravitonCannon, bash, electromagneticWave, systemOverload, refresh, refresh2, refresh3]
player_items = [{"item": potion, "quantity": 15},
{"item": hipotion, "quantity": 5},
{"item": superpotion, "quantity": 5},
{"item": elixir, "quantity": 5},
{"item": megaelixir, "quantity": 2},
{"item": grenade, "quantity": 5}]
enemy_spells = [sixShot, quickDraw, chargedShot, dustEye, hackTech]
boss_spells = [sixShot, quickDraw, chargedShot, dustEye, hackTech]
# Instantiate People
player1 = Person("Talos", 3260, 132, 300, 34, player_spells, player_items)
player2 = Person("Rose ", 4160, 188, 311, 34, player_spells, player_items)
player3 = Person("Jin ", 3089, 174, 288, 34, player_spells, player_items)
enemy1 = Person("Goon ", 1250, 999999, 560, 325, enemy_spells, [])
enemy2 = Person("Vex ", 18200, 999999, 525, 25, enemy_spells, [])
enemy3 = Person("Goon ", 1250, 999999, 560, 325, enemy_spells, [])
players = [player1, player2, player3]
enemies = [enemy1, enemy2, enemy3]
running = True
print("==================================================================================")
print(" 01010100 01110010 01110101 01100101 01000010 01101001 01110100")
print(" _____ ______ _ _ ")
print(" |_ _| | ___ (_) | ")
print(" | |_ __ _ _ ___ | |_/ /_| |_")
print(" | | '__| | | |/ _ \ | ___ \ | __|")
print(" | | | | |_| | __/ | |_/ / | |_")
print(" \_/_| \__,_|\___| \____/|_|\__|")
print(" 01010100 01110010 01110101 01100101 01000010 01101001 01110100")
print(" ")
print("==================================================================================")
print("\n")
print("==================================================================================")
print(bcolors.FAIL + bcolors.BOLD + "Add story text here"
+ bcolors.ENDC)
print(bcolors.FAIL + bcolors.BOLD + "Add story text here!"
+ bcolors.ENDC)
print("==================================================================================")
print("\n")
# Player character stat UI
while running:
print("PARTY MEMBERS")
print("===========================================================================================================")
print("NAME HP MP")
for player in players:
player.get_stats()
print("===========================================================================================================")
print("\n")
print("ENEMIES")
for enemy in enemies:
enemy.get_enemy_stats()
for player in players:
player.choose_action()
choice = input(" Choose action: ")
index = int(choice) - 1
if index == 0:
dmg = player.generate_damage()
enemy = player.choose_target(enemies)
enemies[enemy].take_damage(dmg)
print("\n" + "You attacked " + enemies[enemy].name.replace(" ", "") + " for", dmg, "points of damage.")
if enemies[enemy].get_hp() == 0:
print("\n" + enemies[enemy].name.replace(" ", "") + " has been defeated.")
del enemies[enemy]
elif index == 1:
player.choose_magic()
magic_choice = int(input(" Choose magic: ")) - 1
if magic_choice == -1:
continue
spell = player.magic[magic_choice]
magic_dmg = spell.generate_damage()
current_mp = player.get_mp()
if spell.cost > current_mp:
print(bcolors.FAIL + "\nNot enough MP\n" + bcolors.ENDC)
continue
player.reduce_mp(spell.cost)
if spell.type == "white":
player.heal(magic_dmg)
print(bcolors.OKBLUE + "\n" + spell.name, " heals for", str(magic_dmg), "HP." + bcolors.ENDC)
elif spell.type == "black":
enemy = player.choose_target(enemies)
enemies[enemy].take_damage(magic_dmg)
print(bcolors.OKBLUE + "\n" + spell.name + " deals", str(magic_dmg), "points of damage to " +
enemies[enemy].name.replace(" ", "") + bcolors.ENDC)
if enemies[enemy].get_hp() == 0:
print("\n" + enemies[enemy].name.replace(" ", "") + " has been defeated.")
del enemies[enemy]
elif index == 2:
player.choose_items()
item_choice = int(input(" Choose item: ")) - 1
if item_choice == -1:
continue
item = player.items[item_choice]["item"]
if player.items[item_choice]["quantity"] == 0:
print(bcolors.FAIL + "\n" + "None left..." + bcolors.ENDC)
continue
player.items[item_choice]["quantity"] -= 1
if item.type == "potion":
player.heal(item.prop)
print(bcolors.OKGREEN + "\n" + item.name + " heals for", str(item.prop), "HP" + bcolors.ENDC)
elif item.type == "elixir":
if item.name == "MegaElixir":
for i in players:
i.hp = i.maxhp
i.mp = i.maxmp
else:
player.hp = player.maxhp
player.mp = player.maxmp
print(bcolors.OKGREEN + "\n" + item.name + " fully restores HP/MP" + bcolors.ENDC)
elif item.type == "attack":
enemy = player.choose_target(enemies)
enemies[enemy].take_damage(item.prop)
print(bcolors.FAIL + "\n" + item.name + " deals", str(item.prop), "points of damage to " +
enemies[enemy].name.replace(" ", "") + bcolors.ENDC)
if enemies[enemy].get_hp() == 0:
print("\n" + enemies[enemy].name.replace(" ", "") + " has been defeated.")
del enemies[enemy]
print("\n")
# Enemy attack phase
for enemy in enemies:
enemy_choice = random.randrange(0, 2)
if enemy_choice == 0:
# Chose attack
target = random.randrange(0, (len(players)))
enemy_dmg = enemy.generate_damage()
players[target].take_damage(enemy_dmg)
print("\n" + enemy.name.replace(" ", "") + " attacks " + players[target].name.replace(" ", "") + " for",
enemy_dmg)
elif enemy_choice == 1:
spell, magic_dmg = enemy.choose_enemy_spell()
enemy.reduce_mp(spell.cost)
if spell.type == "white":
enemy.heal(magic_dmg)
print("\n" + bcolors.OKBLUE + spell.name + " heals " + enemy.name + " for", str(magic_dmg),
"HP." + bcolors.ENDC)
elif spell.type == "black":
target = random.randrange(0, (len(players)))
players[target].take_damage(magic_dmg)
print(bcolors.OKBLUE + "\n" + enemy.name.replace(" ", "") + "'s " + spell.name + " deals",
str(magic_dmg), "points of damage to " + players[target].name.replace(" ", "") + bcolors.ENDC)
if players[target].get_hp() == 0:
print("\n" + players[target].name.replace(" ", "") + " has died.")
del players[target]
print("\n")
# Check is battle is over
defeated_enemies = 0
defeated_players = 0
for enemy in enemies:
if enemy.get_hp() == 0:
defeated_enemies += 1
for player in players:
if player.get_hp() == 0:
defeated_players += 1
# Check if player won
if defeated_enemies == 2:
print(bcolors.OKGREEN + "You win!" + bcolors.ENDC)
running = False
# Check if enemy won
elif defeated_players == 2:
print(bcolors.FAIL + "Your enemies have defeated you!" + bcolors.ENDC)
running = False
| true |
8670c6324b69c686a79487becaaacb1ec4ef4326 | Python | acmore/OpenEmbedding | /laboratory/benchmark/summary.py | UTF-8 | 746 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | import os
import sys
times = dict()
for name in os.listdir(sys.argv[1]):
time = 100000000
for line in open(sys.argv[1] + '/' + name):
r = line.find('s - loss')
l = line.find('-')
if l > r:
l = line.find(':')
if l != -1 and r != -1:
time = min(time, int(line[l+1:r]))
sp = len(name) - 6
key, np = name[:sp], name[sp:]
times.setdefault(key, [0, 0, 0, 0])
if np == '_1.out':
times[key][0] = time
if np == '_2.out':
times[key][1] = time
if np == '_4.out':
times[key][2] = time
if np == '_8.out':
times[key][3] = time
for key, value in sorted(times.items()):
print(key, *value) | true |
ed2d4f4816d1e6f5a3acb04644720dce6bbe0a2d | Python | skfo763/Problem_Solving | /backjoon/sorting/10989.py | UTF-8 | 377 | 3.484375 | 3 | [] | no_license | import sys
input = sys.stdin.readline
print = sys.stdout.write
list = [0 for i in range(10001)]
n = int(input().rstrip())
# 상자에 담는다.
for _ in range(n):
number = int(input().rstrip())
list[number] = list[number] + 1
# 상자에 담긴걸 확인하고 출력
for i, val in enumerate(list):
for j in range(val):
print(str(i))
print('\n') | true |
a14e518b119bc26378ff78e0cbf3fdfe552a056a | Python | qs8607a/Algorithm-39 | /Euler/part2/p98.py | UTF-8 | 988 | 3.15625 | 3 | [] | no_license | from itertools import permutations
from collections import Counter,defaultdict
issquare=lambda x:int(x**0.5)**2==x
def g(s1,s2):
s=list(set(s1))
l=len(s)
m=0
X=1
for x in permutations('0123456789',l):
if X%100000==0:
print(X)
X+=1
t1,t2=s1,s2
for i in range(l):
t1=t1.replace(s[i],x[i])
t2=t2.replace(s[i],x[i])
if t1[0]!='0' and t2[0]!='0' and issquare(int(t1)) and issquare(int(t2)):
if int(t1)>m:
m=int(t1)
if int(t2)>m:
m=int(t2)
return m
def f1():
txt=map(lambda x:x.strip('"'),open('txt/words.txt').read().strip().split(','))
txt=sorted(txt,key=len,reverse=True)
p=defaultdict(list)
for s in txt:
p[hash(str(Counter(s)))].append(s)
for i in p:
x=p[i]
if len(x)==2:
print(x[0],x[1],g(x[0],x[1]))
f1()
##print(g('INTRODUCE','REDUCTION'))
| true |
00be2045f248d1f5a874a224ecddaddc1ff7d1b7 | Python | YuliiaAntonova/codingbat | /warmup-1/parrot_trouble.py | UTF-8 | 543 | 4.09375 | 4 | [] | no_license |
# We have a loud talking parrot. The "hour" parameter is the
# current hour time in the range 0..23. We are in trouble if the parrot is
# talking and the hour is before 7 or after 20. Return True if we are in trouble.
# parrot_trouble(True, 6) → True
# parrot_trouble(True, 7) → False
# parrot_trouble(False, 6) → False
def parrot_trouble(talking, hour):
if talking and (hour > 20 or hour < 7):
return True
else:
return False
print(parrot_trouble(True, 6))
print(parrot_trouble(True, 7))
print(parrot_trouble(False, 6))
| true |
c965a6727c5551ceb6051e1e993cdcb90299966a | Python | NARMATHA-R/PYTHON-PRACTICE | /islower.py | UTF-8 | 46 | 3.21875 | 3 | [] | no_license | txt = "hello all!"
x = txt.islower()
print(x)
| true |
780e64202e6728eebf8e397f521634383c99ce36 | Python | s81320/dsw | /data-acquisition/get-content-tsp-02.py | UTF-8 | 1,497 | 2.625 | 3 | [] | no_license | from newspaper import Article
import time
import sys , os
import json
# first filename witt be i+1
# so i should be the latest number given to an article
i=529 # for 14th of July
with open("links-tsp-2020-07-14-new.txt" , "r") as link_file :
all_lines = link_file.readlines()
for link in all_lines:
link = link.replace('\n', '')
article = Article(link)
article.download()
time.sleep(2)
article.parse()
article.nlp()
article.fetch_images()
## generate a filename
i=i+1
filename = f'{i:05}'
# should check, if file exists ...
keep = article.meta_data['og']
keep['authors'] = article.authors
keep['text-link'] = filename
keep['images-link'] = list(article.images)
keep['publish-date'] = str(article.publish_date)
keep['paper'] = 'tagesspiegel'
keep['id'] = link[-13:-5]
#keep['id'] = link
keep['text'] = article.text
json_txt = json.dumps(keep, indent=4) # dumps , before used dump . What's the difference??
with open(filename + ".json", 'w', encoding='utf-8') as file:
file.write(json_txt)
print("wrote " + str(i))
# 8.7. added the article id as the numbers in the filename 8888888.html
# added the title to the text file, so the text file now contains the title and the text.
# keine direkte artikel id vorhanden, aber html dateien sind als nummern codiert
# 'url': 'https://www.tagesspiegel.de/politik/kommando-zurueck-was-gegen-die-wiedereinfuehrung-der-wehrpflicht-spricht/25978222.html',
# also einfach den dateinamen verwenden.
| true |
733b0712e557cfeba0566d83f44e619fc2b1b89a | Python | vumeshkumarraju/class | /assesment1/code2.py | UTF-8 | 290 | 4.21875 | 4 | [] | no_license | #factorial of a number
print("\nwelcome to the program")
print("we are going to find the factorial of your inputed number.\n")
n = int(input("enter the number="))
i=n
fact=1
print("THE FACTORIAL OF ",n,":-")
while i>1:
fact=fact*i
print(i,end=" x ")
i-=1
print("1 = ",fact)
| true |
1c7193de57ec8c8b2e61e07e9ba67c892c87d480 | Python | Grievi/Pomodoro | /app/auth/v1/utilities/timer.py | UTF-8 | 711 | 3.3125 | 3 | [] | no_license | import time
class UserTimer():
def pomodoro(t):
print("Timer starts now!")
for i in range(4):
set_time = t*60
while set_time:
mins = t // 60
secs = t % 60
timer = '{:02d}:{:02d}'.format(mins,secs)
print("" + timer, end ="\r")
time.sleep(1)
t -= 1
print('its Break time!')
t = 10*60
while t:
mins = t//60
secs = t % 60
timer = '{:02d}:{:02d}'.format(mins,secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
print("Work Time!")
| true |
6c65489667ae145255d4fe3fd55e1f0af6305dde | Python | kunaldesign/python-program | /program 11.py | UTF-8 | 445 | 4.3125 | 4 | [] | no_license | #program using if...else statment to find the largest number
n1=int(input("enter an 1st number: "))
n2=int(input("enter an 2nd number: "))
n3=int(input("enter an 3rd number: "))
if (n1>=n2):
if(n1>=n3):
print('{} is the largest.'.format(n1))
else:
print('{} is the largest.'.format(n3))
else:
if(n2>=n3):
print('{} is the largest.'.format(n2))
else:
print('{} is the largest.'.format(n3))
exit()
| true |
68333cb2522912e371fb2d34e3f9fd4c75272705 | Python | staticfloat/libsquiggly | /libsquiggly/resampling/upfirdn/__init__.py | UTF-8 | 11,983 | 2.8125 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | # Copyright (c) 2009, Motorola, Inc
#
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Motorola nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from .Resampler import ResamplerRR, ResamplerRC, ResamplerCR, ResamplerCC
def enumdims(ary, dims=(0,), complement=False):
"""Enumerate over the given array dimensions
yielding the index tuple and resulting array in sequence,
using ":" for each of the complementary dimensions.
For example, if x is an array of shape (2,3,4),
iterdims(x, [0]) yields 2 arrays of shape (3,4)
iterdims(x, [1]) yields 3 arrays of shape (2,4)
iterdims(x, [2]) yields 4 arrays of shape (2,3)
iterdims(x, [0,1]) yields 6 arrays of shape (4)
iterdims(x, [0,2]) yields 8 arrays of shape (3)
iterdims(x, [1,2]) yields 12 arrays of shape (2)
iterdims(x, [0,1,2]) yields 24 arrays of shape () (i.e., 0-D)
iterdims(x, []) yields 1 array of shape (2,3,4)
...
"""
dimsNoNegative = []
for d in dims:
if d < 0:
dimsNoNegative.append(len(ary.shape)+d)
else:
dimsNoNegative.append(d)
dims = tuple(dimsNoNegative)
cdims = tuple([i for i in range(len(ary.shape)) if i not in dims])
if complement:
dims, cdims = cdims, dims
x = ary.transpose(*(tuple(dims) + cdims))
ndindexArgs = tuple([x.shape[i] for i in range(len(dims))])
for idxTuple in np.ndindex(*ndindexArgs):
yield idxTuple, x[idxTuple]
def iterdims(ary, dims=(0,), complement=False):
"""Like enumdims but yielding only the partial arrays, not
the index tuple as well
for xi in iterdims(x): <-- equivalent to "for xi in x:"
for xi in iterdims(x,[-1]): <-- yields x[...,0], x[...,1], ... etc
for xi in iterdims(x,[0,-1]): <-- yields x[0,...,0], x[0,...,1], ... etc
for xi in iterdims(x,[0,1]): <-- yields x[0,0,...], x[0,1,...], ... etc
"""
for idx, x in enumdims(ary, dims, complement):
yield x
def full_index(x):
"""Return a list of arrays to index into array x."""
idx = [np.arange(xs) for xs in x.shape]
for i in range(len(idx)):
idx[i].shape += (1,)*(len(x.shape)-i-1)
return idx
def dim2back(x, xdim=-1):
"""
Transpose ndarray so that a given dimension is moved to the back.
Parameters
----------
x : ndarray
Input array.
xdim : int, optional
Dimension to put at back "x" input array. (default=-1)
Returns
-------
y : ndarray
view of x transposed
"""
num_dims_x = len(x.shape)
if xdim < 0:
xdim = num_dims_x + xdim
return x.transpose(*(list(range(xdim)) + list(range(xdim+1, num_dims_x)) + [xdim]))
def back2dim(x, xdim=-1):
"""
Transpose ndarray so that the back dimension is moved to a given position.
Parameters
----------
x : ndarray
Input array.
xdim : int, optional
Dimension at which to put the back "x" input array dimension.
(default=-1)
Returns
-------
y : ndarray
view of x transposed
"""
num_dims_x = len(x.shape)
if xdim < 0:
xdim = num_dims_x + xdim
return x.transpose(*(list(range(xdim)) + [num_dims_x-1] + \
list(range(xdim, num_dims_x-1))))
# Index into Resampler object type switchyard is
# (signal type complex, coefficient type complex) booleans
_SWITCH_YARD = {
(False, False): ResamplerRR,
(False, True): ResamplerRC,
(True, False): ResamplerCR,
(True, True): ResamplerCC
}
def klass_lookup(signal=1., coefficients=1.):
"""Return Resampler type based on input signal and coefficient objects.
"""
klass = _SWITCH_YARD[(np.iscomplexobj(signal), \
np.iscomplexobj(coefficients))]
return klass
class ResamplerBank(object):
"""
A bank of Resampler objects.
"""
def __init__(self, x, h, uprate=1, downrate=1, xdim=-1, hdim=-1):
"""
Construct the ResamplerBank object.
Parameters
----------
x : array-like
Input signal array. May be multi-dimensional (ND). The signals
will be operated on along the "xdim" dimension of x.
This is needed to determine how many Resamplers need to be created,
since each one needs to retain state.
h : array-like
FIR (finite-impulse response) filter coefficients array. May be ND.
The filters are along the "hdim" dimension of h.
uprate : int, optional
Upsampling rate. (default=1)
downrate : int, optional
Downsampling rate. (default=1)
xdim : int, optional
Dimension for "x" input signal array. (default=-1)
hdim : int, optional
Dimension for "h" coefficient array. (default=-1)
"""
x = np.atleast_1d(x)
h = np.atleast_1d(h)
klass = klass_lookup(x, h)
x = dim2back(x, xdim)
h = dim2back(h, hdim)
xi = full_index(x)
xi[-1] = xi[-1][0:1]
# xx is ignored
xx, hh = np.broadcast_arrays(x[xi], h)
self.hh = hh
bank = np.zeros(self.hh.shape[:-1], dtype=object)
for idx, hi in enumdims(self.hh, (-1,), complement=True):
bank[idx] = klass(uprate, downrate, hi)
self.bank = bank
self.r0 = self.bank.flat[0]
self.coefs_per_phase = (h.shape[-1] + uprate - 1) // uprate
self.xdim = xdim
if np.iscomplexobj(x) or np.iscomplexobj(h):
self.output_type = complex
else:
self.output_type = float
def apply(self, x, all_samples=False):
"""
Upsample, FIR filter, and downsample a signal or array of signals using
the bank of Resampler objects.
Parameters
----------
x : array-like
Input signal array. May be multi-dimensional (ND). The signals
will be operated on along the "xdim" dimension of x.
all_samples : bool, optional
If True, feeds in zeros after the input signal to "drain" the
resampler and get all the non-zero samples. (default=True)
Returns
-------
y : float ndarray
"""
x = np.atleast_1d(x)
x = dim2back(x, self.xdim)
# htemp is ignored
xx, htemp = np.broadcast_arrays(x, self.hh[..., 0:1])
in_count = xx.shape[-1]
if all_samples:
in_count += self.coefs_per_phase-1
z = np.zeros((self.coefs_per_phase-1,))
needed_out_count = self.r0.neededOutCount(in_count)
y = np.zeros(xx.shape[:-1] + (needed_out_count,), \
dtype=self.output_type)
for idx, xi in enumdims(xx, (-1,), complement=True):
out_count = self.bank[idx].apply(xi, y[idx])
if all_samples:
self.bank[idx].apply(z, y[idx][out_count:])
return back2dim(y, self.xdim)
def upfirdn(x, h, uprate=1, downrate=1, xdim=-1, hdim=-1, all_samples=True):
"""
Upsample, FIR filter, and downsample a signal or array of signals.
Parameters
----------
x : array-like
Input signal array. May be multi-dimensional (ND). The signals
will be operated on along the "xdim" dimension of x.
h : array-like
FIR (finite-impulse response) filter coefficients array. May be ND.
The filters are along the "hdim" dimension of h.
uprate : int, optional
Upsampling rate. (default=1)
downrate : int, optional
Downsampling rate. (default=1)
xdim : int, optional
Dimension for "x" input signal array. (default=-1)
hdim : int, optional
Dimension for "h" coefficient array. (default=-1)
all_samples : bool, optional
If True, feeds in zeros after the input signal to "drain" the resampler
and get all the non-zero samples. (default=True)
Returns
-------
y : float ndarray
The output signal array. The results of each upfirdn operation are
along the "xdim" dimension; the array is discontinuous if xdim is not
the last dimension.
Notes
-----
The standard rules of broadcasting apply to the input ND arrays x and h,
for those dimensions other than the "sample" dimension specified by
xdim and hdim. upfirdn operates along a single dimension, and
supports multiple such operations for all the other dimensions using
broadcasting; this allows you to, for example, operate on multiple signal
columns with a single filter, or apply multiple filters to a single signal.
The uprate and downrate however are scalar and apply to ALL operations.
In the case of ND, the most efficient choice of xdim is -1, that is, the
last dimension (assuming C-style input x); otherwise each signal is copied
prior to operating.
Examples
--------
>>> upfirdn([1,1,1], [1,1,1]) # FIR filter
array([ 1., 2., 3., 2., 1.])
>>> upfirdn([1, 2, 3], [1], 3) # upsampling with zeros insertion
array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])
>>> upfirdn([1,2,3], [1,1,1], 3) # upsampling with sample-and-hold
array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> upfirdn([1,1,1], [.5,1,.5], 2) # linear interpolation
array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])
>>> upfirdn(range(10), [1], 1, 3) # decimation by 3
array([ 0., 3., 6., 9.])
>>> upfirdn(range(10), [.5,1,.5], 2, 3) # linear interpolation, rate 2/3
array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])
# Apply single filter to multiple signals
>>> x = np.reshape(range(8), (4,2))
array([[0, 1],
[2, 3],
[4, 5],
[6, 7]])
>>> h = [1, 1]
>>> upfirdn(x, h, 2) # apply along last dimension of x
array([[ 0., 0., 1., 1.],
[ 2., 2., 3., 3.],
[ 4., 4., 5., 5.],
[ 6., 6., 7., 7.]])
>>> upfirdn(x, h, 2, xdim=0) # apply along 0th dimension of x
array([[ 0., 1.],
[ 0., 1.],
[ 2., 3.],
[ 2., 3.],
[ 4., 5.],
[ 4., 5.],
[ 6., 7.],
[ 6., 7.]])
"""
resampler_bank = ResamplerBank(x, h, uprate, downrate, xdim, hdim)
return resampler_bank.apply(x, all_samples)
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
if __name__ == '__main__':
h = np.ones((3,))
x = np.random.randn(2,3,4)
y = upfirdn(x, h, 3, 1, xdim=0)
print(y)
| true |
17cdb43142264b2a9a0793c25a5fef29f2947bca | Python | pvithayathil/titanic | /titantic_explore_pv.py | UTF-8 | 7,198 | 3.125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import sklearn.linear_model as lm
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
# Import the RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
# Thanks for https://www.kaggle.com/arthurlu/titanic/exploratory-tutorial-titanic/notebook
# Thanks for https://www.kaggle.com/davidfumo/titanic/exploratory-tutorial-titanic-disaster/discussion
# For Helping with the Analysis
# Load Data
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
print "-------Training Basic Statistics-------"
print ("Dimension: {}".format(train.shape))
print train.describe()
print "-------Test Basic Statistics-------"
print ("Dimension: {}".format(test.shape))
print test.describe()
### GRAPHS ###
## Distribution Graphs ## Investigate
plt.rc('font', size=10)
fig = plt.figure(figsize=(20, 10))
alpha = 0.5
train_color = '#e66101'
test_color = '#5e3c99'
ax = plt.subplot2grid((2,3), (0,0), colspan=1)
train.Survived.value_counts().plot(kind='bar', color=train_color, label='train', alpha=alpha)
#test.Survived.plot(kind ='bar', color=test_color,label='test', alpha=alpha)
ax.set_xlabel('Survived')
ax.set_title("Survived Distribution" )
plt.legend(loc='best')
ax1 = plt.subplot2grid((2,3), (0,1))
train.Age.plot.hist(color=train_color, label='train', alpha=alpha)
test.Age.plot.hist(color=test_color, label='test', alpha=alpha)
ax1.set_xlabel('Age')
ax1.set_title("Age Distribution" )
plt.legend(loc='best')
ax2 = plt.subplot2grid((2,3), (0,2), colspan=1)
train.Fare.plot.hist(bins = 30, color=train_color, label='train', alpha=alpha)
test.Fare.plot.hist(bins =30, color=test_color,label='test', alpha=alpha)
ax2.set_xlabel('Fare')
ax2.set_title("Fare Distribution" )
plt.legend(loc='best')
ax3 = plt.subplot2grid((2,3), (1,0))
train.Pclass.value_counts().plot(kind='bar', color=train_color, label='train', alpha=alpha)
test.Pclass.value_counts().plot(kind='bar', color=test_color,label='test', alpha=alpha)
ax3.set_ylabel('Frequency')
ax3.set_xlabel('Pclass')
ax3.set_title("Pclass Distribution" )
plt.legend(loc='best')
ax4 = plt.subplot2grid((2,3), (1,1))
train.Sex.value_counts().plot(kind='bar', color=train_color, label='train', alpha=alpha)
test.Sex.value_counts().plot(kind='bar', color=test_color, label='test', alpha=alpha)
ax4.set_ylabel('Frequency')
ax4.set_xlabel('Sex')
ax4.set_title("What's the distribution of Sex?" )
plt.legend(loc='best')
ax5 = plt.subplot2grid((2,3), (1,2))
train.Embarked.value_counts().plot(kind='bar', color=train_color, label='train', alpha=alpha)
test.Embarked.value_counts().plot(kind='bar', color=test_color,label='test', alpha=alpha)
ax5.set_ylabel('Frequency')
ax5.set_xlabel('Embarked')
ax5.set_title("What's the distribution of Embarked?" )
plt.legend(loc='best')
plt.suptitle("Distribution Graphs of Train & Test",size=20)
#plt.tight_layout()
## Training Graphs ## Investigate
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(3,4)
plt.rc('font', size=10)
fig = plt.figure(figsize=(20, 10))
alpha = 0.5
f_color = '#d01c8b'
m_color = '#4dac26'
d_color = '#ca0020'
s_color = '#0571b0'
#### Age Graph
ax = plt.subplot2grid((3,4), (0,0), colspan=2)
train[train.Survived==0].Age.plot(kind='density', color=d_color, label='Died', alpha=alpha)
train[train.Survived==1].Age.plot(kind='density', color=s_color, label='Survived', alpha=alpha)
ax.set_xlim([0, 100])
plt.ylabel('Frequency')
plt.title('Survival based on Age Distribution')
plt.legend(loc='best')
#### Age and Gender Graph
ax2 = plt.subplot2grid((3,4), (0,2), colspan=1,sharey = ax)
train[(train.Survived==0)&(train.Sex=='female')&(~train.Age.isnull())].Age.plot(kind='density', color=d_color, label='Died', alpha=alpha)
train[(train.Survived==1)&(train.Sex=='female')&(~train.Age.isnull())].Age.plot(kind='density', color=s_color, label='Survived', alpha=alpha)
ax2.set_xlim([0, 100])
plt.ylabel('Frequency')
plt.title('Survival base on Age & Female Distribution')
plt.legend(loc='best')
ax3 = plt.subplot2grid((3,4), (0,3), colspan=1,sharey = ax)
train[(train.Survived==0)&(train.Sex=='male')&(~train.Age.isnull())].Age.plot(kind='density', color=d_color, label='Died', alpha=alpha)
train[(train.Survived==1)&(train.Sex=='male')&(~train.Age.isnull())].Age.plot(kind='density', color=s_color, label='Survived', alpha=alpha)
ax3.set_xlim([0, 100])
plt.ylabel('Frequency')
plt.title('Survival base on Age & Male Distribution')
plt.legend(loc='best')
#### Graph by Gender
df_male = train[train.Sex=='male'].Survived.value_counts().sort_index()
df_female = train[train.Sex=='female'].Survived.value_counts().sort_index()
ax4 = plt.subplot2grid((3,4), (1,0),colspan=2)
df_female.plot(kind='barh', color=f_color, label='Female', alpha=alpha)
ax4.set_xlabel('Rate')
ax4.set_yticklabels(['Died', 'Survived'])
ax4.set_title("Female Survival Rate" )
plt.legend(loc='best')
ax5 = plt.subplot2grid((3,4), (1,2),colspan=2,sharey=ax4)
(df_male/train[train.Sex=='male'].shape[0]).plot(kind='barh', color=m_color,label='Male', alpha=alpha)
ax5.set_xlabel('Rate')
ax5.set_yticklabels(['Died', 'Survived'])
ax5.set_title("Male Survival Rate" )
plt.legend(loc='best')
df_male3 = train[(train.Sex=='male')&(train.Pclass==3)].Survived.value_counts().sort_index()
df_male0 = train[(train.Sex=='male')&(train.Pclass<3)].Survived.value_counts().sort_index()
df_female3 = train[(train.Sex=='female')&(train.Pclass==3)].Survived.value_counts().sort_index()
df_female0 = train[(train.Sex=='female')&(train.Pclass<3)].Survived.value_counts().sort_index()
ax6 = plt.subplot2grid((3,4), (2,0))
(df_female0/train[(train.Sex=='female')&(train.Pclass<3)].shape[0]).plot(kind='barh', color=f_color,label='Female', alpha=alpha)
ax6.set_xlabel('Rate')
ax6.set_yticklabels(['Died', 'Survived'])
ax6.set_title("Female Not 3rd Class Survival Rate" )
plt.legend(loc='best')
ax7 = plt.subplot2grid((3,4), (2,1),sharey=ax6)
(df_female3/train[(train.Sex=='female')&(train.Pclass==3)].shape[0]).plot(kind='barh', color=f_color,label='Female', alpha=alpha)
ax7.set_xlabel('Rate')
ax7.set_title("Female 3rd Class Survival Rate" )
plt.legend(loc='best')
ax8 = plt.subplot2grid((3,4), (2,2),sharey=ax6)
(df_male0/train[(train.Sex=='male')&(train.Pclass<3)].shape[0]).plot(kind='barh', color=m_color,label='Male', alpha=alpha)
ax8.set_xlabel('Rate')
ax8.set_title("Male Not 3rd Class Survival Rate" )
plt.legend(loc='best')
ax9 = plt.subplot2grid((3,4), (2,3),sharey=ax6)
(df_male3/train[(train.Sex=='male')&(train.Pclass==3)].shape[0]).plot(kind='barh', color=m_color,label='Male', alpha=alpha)
ax9.set_xlabel('Rate')
ax9.set_title("Male 3rd Class Survival Rate" )
plt.legend(loc='best')
plt.suptitle("Titantic Training Data",size =20)
#ax2 = plt.subplot2grid((3,4), (0,2), colspan=1)
#ax2 = plt.subplot(gs[0,2])
#train.boxplot(column='Age',by ='Survived')
#train[train.Survived==1].Age.plot(kind='density', color=s_color, label='Survived', alpha=alpha)
#plt.ylabel('Age')
#plt.title('Training Age Distribution')
#plt.legend(loc='best')
| true |
6583352a01a38143d0575cc310ab132e4a91ef80 | Python | jacekstamm/Python_Exercise | /exercises/calculator/Calculator.py | UTF-8 | 2,467 | 4.5625 | 5 | [] | no_license | def add(a, b):
return a + b
def substract(a, b):
return a - b
def multiplication(a, b):
return a * b
def divine(a, b):
return a / b
def power(a, b):
return a ** b
def calculator():
database = []
print("Wybierz działanie które chcesz wykonać:")
print("1. Dodawanie")
print("2. Odejmowanie")
print("3. Mnożenie")
print("4. Dzielenie")
print("5. Potęgowanie")
choice = input("Wpisz co wybrałeś: (1/2/3/4/5): ")
num1 = float(input("Wpisz pierwszą liczbę: "))
num2 = float(input("Wpisz drugą liczbę: "))
last = process(choice, num1, num2)
save = input("Czy chcesz zapisać swoje ostatnie działanie? Y/N: ")
if save == "Y" or save == "y":
database.append(last)
print("Jakie chcesz teraz działanie wykonać?")
print("1. Chcę coś obliczyc jeszcze raz.")
print("2. Wyświetl ostatnie zapisane działanie")
print("3. Zakończ program")
again = input("Które działanie wybierasz? 1/2/3: ")
if again == "1":
calculator()
elif again == "2":
for record in database:
print(record)
calculator()
elif again == "3":
print("Do zobaczenia!")
return
def process(choice, num1, num2):
if choice == "1":
print(num1, " + ", num2, " = ", add(num1, num2))
return str(num1) + "+" + str(num2) + "=" + str(add(num1, num2))
elif choice == "2":
print(num1, " - ", num2, " = ", substract(num1, num2))
return str(num1) + "-" + str(num2) + "=" + str(substract(num1, num2))
elif choice == "3":
print(num1, " * ", num2, " = ", multiplication(num1, num2))
return str(num1) + "*" + str(num2) + "=" + str(multiplication(num1, num2))
elif choice == "4":
try:
print(num1, " / ", num2, " = ", divine(num1, num2))
return str(num1) + "/" + str(num2) + "=" + str(divine(num1, num2))
except ZeroDivisionError:
print("Nie dziel przez zero!!!")
elif choice == "5":
print(num1, "^", num2, " = ", power(num1, num2))
return str(num1) + "^" + str(num2) + "=" + str(power(num1, num2))
def stop_application():
print("Dziękujem za skorzystanie z naszego SUPER kalkulatora")
return None
print("No to jak? Zaczynamy zabawę z matematyką?")
start = input("Y/N: ")
if start == "Y" or start == "y":
calculator()
elif start == "N" or start == "n":
stop_application()
| true |
60634ee271ca7bad1de37381cd511e70e26809f0 | Python | AmineCharko/UPGMA | /Projet_UPGMA.py | UTF-8 | 3,255 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: aminecharko
"""
def trouverDistMin(mat):
case_min = 999999999 # on génére case_min à l'infini
x, y = -1, -1
for i in range(len(mat)):
for j in range(len(mat[i])):
if (mat[i][j] < case_min and mat[i][j] != 0):
case_min = mat[i][j]
x, y = i, j
return x, y
def calculMatrice(i,j,matDist,A):
l = [0]
for k in range(len(matDist[0])):
if (k != i and k != j):
nI = float(A.G.nbFeuille())
nJ = float(A.D.nbFeuille())
dIk = float(matDist[i][k])
dJk = float(matDist[j][k])
l.append(((nI/(nI+nJ))*dIk)+((nJ/(nI+nJ))*dJk))
matDist.pop(i)
matDist.pop(j-1)
for k in range(len(matDist)):
matDist[k].pop(i)
matDist[k].pop(j-1)
matDist.insert(0,l)
for k in range(1,len(matDist)):
matDist[k].insert(0,l[k])
def swap_ab(node,ab,x,y):
tmp1 = ab.pop(x)
tmp2 = ab.pop(y-1)
ab.insert(x,node)
return ab
class Cl_AB():
#arbre
def __init__(self):
self.id = ""
self.distG = 0.0 #distance gauche
self.distD = 0.0 #distance droite
self.G = None #fils gauche
self.D = None #fils gauche
self.pere = None
#return vrai si self est une feuille, faux sinon
def estFeuille(self):
return ((self.D is None) and (self.G is None))
#return le nombre de feuilles
def nbFeuille(self):
if self.estFeuille():
return 1
else:
return self.D.nbFeuille()+self.G.nbFeuille()
#return la somme de toutes les distances entre la racine et une de ses feuilles
def lgBranche(self):
A = self
dist = 0
while (not A.estFeuille()):
dist += A.distG
A = A.G
return dist
def fusionAb(self,AG,AD,dij):
self.G = AG
self.distG = dij-AG.lgBranche()
self.D = AD
self.distD = dij-AD.lgBranche()
AG.pere = self
AD.pere = self
return self
def UPGMA(self,matDist,ab):
if (len(matDist[0]) > 1) :
x, y = trouverDistMin(matDist)
node = Cl_AB()
node.fusionAb(ab[x],ab[y],float(matDist[x][y])/float(2))
node.id = ab[x].id + ', ' + ab[y].id
calculMatrice(x,y,matDist,node)
ab = swap_ab(node,ab,x,y)
return(self.UPGMA(matDist,ab))
else :
return ab[0]
def newick(self):
p = ''
if not self.estFeuille() :
p += '('+ self.G.newick() + ":" + str(self.distG) + ','
p += self.D.newick() + ":" + str(self.distD) + ')'
else :
p += self.id
return(p)
H = [[0,0.092,0.106,0.177,0.207],
[0.092,0,0.111,0.193,0.218],
[0.106,0.111,0,0.188,0.218],
[0.177,0.193,0.188,0,0.219],
[0.207,0.218,0.218,0.219,0]]
A1 = Cl_AB()
A2 = Cl_AB()
A3 = Cl_AB()
A4 = Cl_AB()
A5 = Cl_AB()
A1.id = "Homme"
A2.id = "Chimpanze"
A3.id = "Gorille"
A4.id = "Orang-outan"
A5.id = "Gibbon"
A = [A1,A2,A3,A4,A5]
P = Cl_AB()
P = P.UPGMA(H,A)
print(P.__dict__)
print(P.newick()) | true |
20e4047146af85c0344b41811ca5777025ac6555 | Python | WeeDom/exercism | /python/robot-name/robot_name.py | UTF-8 | 978 | 3 | 3 | [] | no_license | import random
import string
import os
class Robot:
def generate_random_name(self):
# generate a random name, and check against extant robot_names.txt
name = ''.join(random.choice(string.ascii_uppercase) for i in range(2)) + \
''.join(random.choice(string.digits) for i in range(3))
if not os.path.exists('robot_names.txt'):
open('robot_names.txt', mode='w').close()
with open('robot_names.txt', mode='r+') as f:
# this may need to be refactored if we had a large number of robots
known_names = f.readlines()
if name in known_names:
self.generate_random_name()
else:
with open('robot_names.txt', mode='a') as f:
f.write(f'{name}\n')
return name
def reset(self):
self.name = self.generate_random_name()
def __init__(self):
self.reset()
self.name = self.generate_random_name()
| true |
43f981bd6111dcc005b3873bb9d28da88439494e | Python | Kratharth/1BM17CS035 | /Class Programs/class1.py | UTF-8 | 1,211 | 3.65625 | 4 | [] | no_license | class student:
def __init__(self):
self.i = None
self.m = None
self.a = None
def set(self,student_id,marks,age):
self.i = student_id
self.m = marks
self.a = age
def get(self):
print('Id is :' + str(self.i))
print('Marks is : ' + str(self.m))
print('Age is : '+ str(self.a))
def validate_marks(self):
return self.m >=0 and self.m <= 100
def validate_age(self):
return self.a > 20
def checkqualification(self):
if self.validate_marks() and self.validate_age():
if self.m >= 65:
return True
return False
if __name__ == '__main__':
num_students = int(input('enter the number of students'))
students = []
for i in range(num_students):
students.append(student())
for i in range(num_students):
print('enter the details of the student' + str(i+1))
student_id = int(input('enter the id'))
marks = int(input('enter the marks'))
age = int(input('enter the age'))
students[i].set(student_id,marks,age)
for i in range(num_students):
if students[i].checkqualification():
print('student ' + str(i+1) + ' is qualified')
print('His/Her details are : ',end = ' ' )
students[i].get()
else:
print('Student ' +str(i+1) + ' is not qualified')
| true |
46985d31dfa315fb38954be9118e719f75521bc5 | Python | feliciahsieh/holbertonschool-webstack_basics | /0x01-python_basics/106-weight_average.py | UTF-8 | 476 | 3.78125 | 4 | [] | no_license | #!/usr/bin/python3
"""106-weight_average.py - calc weighted average of all integer tuples
"""
def weight_average(my_list=[]):
""" weight_average() - calc weighted average of all integer tuples
Arguments:
my_list: list of tuples
Returns: weighted average
"""
if my_list == []:
return 0
else:
sum = 0
denom = 0
for i in my_list:
sum += i[0] * i[1]
denom += i[1]
return sum / denom
| true |
c19bfc2a050c0eef23069cb3c3e8b426cccc796e | Python | deepaknalore/IDS-for-Authentication-Services | /Simulation/AttackSimulation/LegitimateUser.py | UTF-8 | 3,177 | 2.859375 | 3 | [] | no_license | import requests
import csv
import json
import time
import threading
import queue
import random
from copy import deepcopy
from helper import passwordTypo
start_time = time.time()
LEGITIMATE_USER_DATA = '../Resources/user.csv'
# Threading related information
q = queue.Queue()
n_thread = 10
payload = {'user':'', 'password': '', 'metadata' : ''}
metadata = {'IP': '', 'Cookie': 0, 'Redirect' : 0, 'UserAgent' : '', 'Attack' : -1}
userAgent = {'OS': 'MacOSX','Browser': 'Chrm'}
metadata['UserAgent'] = userAgent
payload['metadata'] = metadata
url = "http://127.0.0.1:5000/attack"
headers = {
'Content-Type': 'application/json'
}
failedAuth = 0
sucessfulBreach = 0
blocked = 0
ip_count = 1
class ThreadClass(threading.Thread):
def __init__(self, q, ip):
threading.Thread.__init__(self)
self.q = q
self.ip = "1.1.1." + str(ip)
def run(self):
global sucessfulBreach
global failedAuth
global blocked
global ip_count
while True:
payload = self.q.get()
payload["metadata"]['IP'] = "1.1.1." + str(ip_count)
ip_count += 1
if(ip_count) > 100:
ip_count = 1
response = requests.request("POST", url, headers=headers, data=json.dumps(payload))
data = response.json()
if (data['Authentication'] == True):
sucessfulBreach += 1
elif (data['Authentication'] == False):
failedAuth += 1
elif (data['Authentication'] == 'Blocked'):
blocked += 1
self.q.task_done()
# Start all the threads
for i in range(n_thread):
t = ThreadClass(q,i)
t.setDaemon(True)
#Start thread
t.start()
# put all the username, password into a list
legitimate_list = []
csvfile = open(LEGITIMATE_USER_DATA)
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
payload = {'user': '', 'password': '', 'metadata': ''}
metadata = {'IP': '', 'Cookie': 0, 'Redirect': 0, 'UserAgent': ''}
userAgent = {'OS': 'MacOSX', 'Browser': 'Chrm'}
metadata['UserAgent'] = userAgent
payload['metadata'] = metadata
payload['user'] = row[0]
if(random.choices([0,1], [0.9,0.1])[0]):
payload['password'] = passwordTypo(row[1])
else:
payload['password'] = row[1]
payload['metadata']['IP'] = '1.1.1.1'
payload['metadata']['Attack'] = -1
payload['metadata']['Cookie'] = random.choices([0,1],[0.5,0.5])[0]
legitimate_list.append(payload)
starttime = time.time()
request_count = 0
while True:
request_count += 1
payload = random.choice(legitimate_list)
q.put(deepcopy(payload))
if(request_count > 12000):
break
q.join()
print('Number of blocked IPs: ' + str(blocked))
print('Number of successful breaches: ' + str(sucessfulBreach))
print('Number of failed auths: ' + str(failedAuth))
print('Number of blocked %: ' + str(blocked/request_count * 100))
print('Number of successful breaches: ' + str(sucessfulBreach/request_count * 100))
print('Number of failed auths: ' + str(failedAuth/request_count * 100))
print("Total time : %s seconds" %(time.time() - start_time )) | true |
77e03b7634c4dc2a0b5397a2abe0261dbe40a150 | Python | CJ8664/leetcode | /45-jump-game-ii/45-jump-game-ii.py | UTF-8 | 541 | 3.1875 | 3 | [] | no_license | class Solution:
def jump(self, nums: List[int]) -> int:
# from the current element in the window find the
# farthest index that you can jump. That becomes
# the end point of the next window
# Number of windows is the result
l, r = 0, 0
res = 0
while r < (len(nums) - 1):
maxJump = 0
for i in range(l, r + 1):
maxJump = max(maxJump, i + nums[i])
l = r + 1
r = maxJump
res += 1
return res
| true |
a78b71f5aafc41d5e59e3113d8af2553157c1089 | Python | shentonfreude/rfd | /rfd/views.py | UTF-8 | 2,421 | 2.515625 | 3 | [] | no_license | # Templates get 'context' automatically so we don't need to pass it.
from repoze.bfg.url import model_url
from webob.exc import HTTPFound
import logging
logging.basicConfig(level=logging.INFO)
def _make_name_url(context, request, thing):
"""Can I do this differently by having a url() meth on the obj?
And use __name__ in the template?
Or maybe we want to display a friendly 'name' instead of __name__
"""
if hasattr(thing, 'items'):
return [_make_name_url(context, request, t) for t in thing.items()]
import pdb; pdb.set_trace()
return {'name': thing.__name__,
'url': model_url(context, request, thing)}
def filedrop(context, request):
return {'project':'rfd (repoze file drop)',
'droplinks': _make_name_url(context, request, context['drops'])
}
def add_drop(context, request):
"""Add a Drop under Drops in the top-level FileDrop.
"""
logging.info('add_drop context=%s' % context)
if request.method == 'POST':
name = request.POST['name']
drops = context['drops']
drop = drops.add_drop(name)
# TODO: do I need to save() this?
# drop.attributes...
url = model_url(context, request)
return HTTPFound(location=url)
return {}
def add_file(context, request):
"""Add an uploaded file to the Folder or Drop context.
import pdb; pdb.set_trace()
"""
import pdb; pdb.set_trace()
logging.info('add_file context=%s' % context)
name = mimetype = size = body = ''
if request.method == 'POST':
file_ = request.POST['file']
name = file_.filename
mimetype = file_.type # ''
# NO size ATTR?
body = file_.file.read() # need binary here?
size = len(body)
return {'name' : name,
'mimetype' : mimetype,
'size' : size,
'body' : body,
}
# def upload(request):
# name = mimetype = size = body = ''
# if request.method == 'POST':
# file_ = request.POST['file']
# name = file_.filename
# mimetype = file_.type # ''
# # NO size ATTR?
# body = file_.file.read() # need binary here?
# size = len(body)
# import pdb; pdb.set_trace()
# return {'name' : name,
# 'mimetype' : mimetype,
# 'size' : size,
# 'body' : body,
# }
| true |
f8b7743b2341cfa7149f4d7bfb87c4b9187655f5 | Python | Sahara241/opencv- | /opencv18.py | UTF-8 | 639 | 3.171875 | 3 | [] | no_license | #Canny Edge Detection in OpenCV
import cv2
import numpy as np
from matplotlib import pyplot as plt
img=cv2.imread('ronaldo.jpg',0)
img2=cv2.imread('lena.jpg',0)
canny=cv2.Canny(img,100,200)
lena=cv2.Canny(img2,100,200)
titles=['image','image2','canny','canny2']
images=[img,canny,img2,lena]
for i in range(4):
plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
#The canny edge detection algorthm in steps:
# 1.)Noise reduction
# 2.)Gradient calculation
# 3.)Non-maximum supression
# 4.)Double threshold
# 5.)Edge Tracking by Hysteresis | true |
07f02225103dc93909358528f2c960cb1d2579c3 | Python | jimfred/python | /PythonSwigWindows/HurryTutorial/RunMe.py | UTF-8 | 169 | 2.765625 | 3 | [] | no_license | import example
print(example.cvar.My_variable) # 3.21
print(example.fact(5)) # 120
print(example.my_mod(7,3)) # 1
print(example.get_time()) # '2021-08-30 15:37:08'
| true |
cff6e849ac4f861e31065c785177efa5d2626e2f | Python | ccrain78990s/Python-Exercise | /0413 客服機器人/1-複習/Mylib.py | UTF-8 | 55 | 2.6875 | 3 | [
"MIT"
] | permissive | def ILikeEat(x):
print("我愛吃"+""+x+""+"嗎?")
| true |
bbe3cb24c9b083f1599d6f44e7ae67f2f25ddcf1 | Python | alimahmoudi29/tsdate | /tests/test_cache.py | UTF-8 | 1,950 | 2.640625 | 3 | [
"MIT"
] | permissive | """
Tests for the cache management code.
"""
import os
import pathlib
import unittest
import appdirs
import numpy as np
import tsdate
from tsdate.prior import ConditionalCoalescentTimes
class TestSetCacheDir(unittest.TestCase):
"""
Tests the set_cache_dir function.
"""
def test_cache_dir_exists(self):
cache_dir = pathlib.Path(appdirs.user_cache_dir("tsdate", "tsdate"))
self.assertEqual(tsdate.get_cache_dir(), cache_dir)
def test_cached_prior(self):
# Force approx prior with a tiny n
fn = ConditionalCoalescentTimes.get_precalc_cache(10)
if os.path.isfile(fn):
self.skipTest(f"The file {fn} already exists. Delete before testing")
with self.assertLogs(level="WARNING") as log:
priors_approx10 = ConditionalCoalescentTimes(10)
self.assertEqual(len(log.output), 1)
self.assertIn("user cache", log.output[0])
priors_approx10.add(10)
# Check we have created the prior file
self.assertTrue(os.path.isfile(fn))
priors_approxNone = ConditionalCoalescentTimes(None)
priors_approxNone.add(10)
self.assertTrue(
np.allclose(priors_approx10[10], priors_approxNone[10], equal_nan=True)
)
# Test when using a bigger n that we're using the precalculated version
priors_approx10.add(100)
self.assertEquals(priors_approx10[100].shape[0], 100 + 1)
priors_approxNone.add(100, approximate=False)
self.assertEquals(priors_approxNone[100].shape[0], 100 + 1)
self.assertFalse(
np.allclose(priors_approx10[100], priors_approxNone[100], equal_nan=True)
)
priors_approx10.clear_precalculated_priors()
self.assertFalse(
os.path.isfile(fn),
"The file "
+ fn
+ "should have been "
+ "deleted, but has not been. Please delete it",
)
| true |
e4b78bd8e1c173cfc6c37b54acc4105142439c0f | Python | gslavine30/slx_projecet | /data_etl/clickhouse/weidu_industry.py | UTF-8 | 2,079 | 2.75 | 3 | [] | no_license | from clickhouse_driver import Client
clickhouse_config = {'host': "39.100.224.138",
'port': '9090',
'database': 'JIANG',
'user': "default",
'password': "slx2021"
}
client = Client(**clickhouse_config)
def query_ep(depth):
query_sql = """SELECT * FROM industry WHERE `depth` ={depth}""".format(depth=depth)
print(query_sql)
try:
industry_list = client.execute(query_sql)
except Exception as e:
print(e)
return industry_list
def query_ep_by_code(code):
query_sql = """SELECT * FROM industry WHERE `code` = '{code}' """.format(code=code)
try:
industry_info = client.execute(query_sql)
except Exception as e:
print(e)
return industry_info
def insert_data(industry_weidu):
insert_sql = "INSERT INTO industry_weidu VALUES ('{}','{}','{}','{}');".format(industry_weidu['industry_code'],
industry_weidu["industry_level1"],
industry_weidu["industry_level2"],
industry_weidu['industry_level3'])
print(insert_sql)
try:
client.execute(insert_sql)
except Exception as e:
print('插入失败')
print(e)
if __name__ == "__main__":
industry3list = query_ep(depth=1)
for i in range(len(industry3list)):
industry_weidu = {"industry_code": "",
"industry_level1": "",
"industry_level2": None,
"industry_level3": None
}
industry2_info = query_ep_by_code(str(industry3list[i][4]))[0]
industry_weidu['industry_code'] = industry3list[i][0]
industry_weidu["industry_level1"] = industry3list[i][1]
insert_data(industry_weidu)
| true |
2fefa8a116104344610e4dd78c17714f9c0f5aa5 | Python | niterain/digsby | /digsby/src/common/scriptengine.py | UTF-8 | 3,256 | 2.828125 | 3 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from __future__ import with_statement
from types import GeneratorType
import collections
import os.path
import sys
def runfile(filename):
filename = os.path.expanduser(filename)
with open(filename) as f:
contents = f.read()
runscript(contents, filename)
def runscript(script, filename):
script_globals = {}
exec script in script_globals
try:
main = script_globals['main']
except KeyError:
raise AssertionError('script %r did not define a main() function' % filename)
if not hasattr(main, '__call__'):
raise AssertionError('script %r main is not callable')
exec 'main()' in script_globals
class Trampoline(object):
"""Manage communications between coroutines"""
# thanks PEP 342
running = False
def __init__(self):
self.queue = collections.deque()
def add(self, coroutine):
"""Request that a coroutine be executed"""
self.schedule(coroutine)
def run(self):
result = None
self.running = True
try:
while self.running and self.queue:
func = self.queue.popleft()
result = func()
return result
finally:
self.running = False
def stop(self):
self.running = False
def schedule(self, coroutine, stack=(), value=None, *exc):
def resume():
try:
if exc:
val = coroutine.throw(value, *exc)
else:
val = coroutine.send(value)
except:
if stack:
# send the error back to the "caller"
self.schedule(
stack[0], stack[1], *sys.exc_info()
)
else:
# Nothing left in this pseudothread to
# handle it, let it propagate to the
# run loop
raise
print 'val is', val
if isinstance(val, GeneratorType):
# Yielded to a specific coroutine, push the
# current one on the stack, and call the new
# one with no args
self.schedule(val, (coroutine, stack))
elif stack:
# Yielded a result, pop the stack and send the
# value to the caller
self.schedule(stack[0], stack[1], val)
elif hasattr(val, 'schedule'):
print 'stopping', stack
val.schedule(self)
self.stop()
self.schedule(coroutine, stack)
# else: this pseudothread has ended
self.queue.append(resume)
def main():
t = Trampoline()
#t.add(api.guilogin(t, 'kevin', 'password'))
#t.run()
#return
sys.path.append('src/tests/scripts')
import open_im_window
t.add(open_im_window.main())
import wx
class MyApp(wx.App):
def __init__(self, trampoline):
self.trampoline = trampoline
wx.App.__init__(self)
def OnInit(self):
self.trampoline.run()
#wx.Frame(None).Show()
a = MyApp(t)
a.MainLoop()
if __name__ == '__main__':
main()
| true |
e48562e4b5497bdb7c8ad6149fa8a5c929bdab51 | Python | gmeader/pybadge | /writefiles/writefile.py | UTF-8 | 360 | 2.640625 | 3 | [] | no_license | # check to see if PyBadge can write to its filesystem
# must have installed boot.py on the PyBadge
# and must hold a button while rebooting the PyBadge
import board
import digitalio
import storage
import time
try:
with open("/test.txt", "a") as fp:
fp.write("hello, world!")
print('Wrote file')
except Exception as exc:
error = exc
print(error)
| true |
d5960c648f42d0aef274ea9cf78057d325e4d660 | Python | MiguelBim/Python_40_c | /Challenge_28.py | UTF-8 | 2,550 | 4.15625 | 4 | [] | no_license | # CHALLENGE NUMBER 28
# TOPIC: While Struct
# Prime Number App
# https://www.udemy.com/course/the-art-of-doing/learn/lecture/17060854#overview
import time
def check_primer_num(number):
if number > 1:
for prev_num in range(2, number + 1):
if prev_num == number:
continue
else:
if number%prev_num == 0:
return print("{} is not prime!".format(number))
else:
print("{} is not prime!".format(number))
return print("{} is prime!".format(number))
def display_primer_num(lower_bound, upper_bound):
process_start_time = time.time()
if lower_bound == 1:
lower_bound = 2
prime_numbers = []
# while keep_looking:
for num in range(lower_bound, upper_bound + 1):
if num not in prime_numbers:
flag = True
for num_i in range(2, num + 1):
if num_i == num:
continue
else:
if num%num_i == 0:
flag = False
break
if flag:
prime_numbers.append(num)
else:
continue
print("\nCalculations took a total of {} seconds.".format(round(time.time() - process_start_time, 4)))
print("The following numbers between {} and {} are prime:".format(lower_bound, upper_bound))
input("Press enter to continue:")
for num in prime_numbers:
print(num)
return
if __name__ == '__main__':
print("Welcome to the Prime Number App")
run_app = True
while run_app:
print("\nEnter 1 to determine if a specific number is prime.")
print("Enter 2 to determine all prime numbers within a set range")
user_selection = int(input("Enter your choice 1 or 2: ").strip())
if user_selection == 1:
num = int(input("\nEnter a number tod determine if it is a prime or not: ").strip())
check_primer_num(num)
elif user_selection == 2:
lower_limit = int(input("\nEnter the lower bound of your range: ").strip())
upper_limit = int(input("Enter the upper bound of your range: ").strip())
display_primer_num(lower_limit, upper_limit)
else:
print("\nThat is not a valid option")
run_option = input("\nWould you like to run the program again (y/n): ").strip().lower()
if run_option != 'y':
print("\nThank you for using the program. Have a nice day.")
run_app = False | true |
25cadd8183ce0587f019f57b32974c214c859437 | Python | wudc5/Python_Teach | /Spider/getPicure.py | UTF-8 | 882 | 2.796875 | 3 | [] | no_license | #coding=utf-8
import urllib
import re
import urllib2
proxy_info = {'host': 'web-proxy.oa.com', 'port':8080}
proxy_support = urllib2.ProxyHandler({"http": "http://%(host)s:%(port)d" % proxy_info})
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
def getHtml(url):
page = urllib.urlopen(url)
htmlcontent = page.read()
return htmlcontent
def getImg(htmlcontent):
reg = r'src="(.+?\.jpg|png)"'
imgre = re.compile(reg)
imgurllist = re.findall(imgre, htmlcontent)
x = 0
for imgurl in imgurllist:
# if len(imgurl) ==0 or len(imgurl) >100:
# continue
# urllib.urlretrieve(imgurl, 'C:\Users\wdc\Pictures\%s.jpg' % x)
x += 1
print "imgurl: ", imgurl
if __name__ == "__main__":
html = getHtml("http://image.baidu.com")
print html
getImg(html) | true |
75adb7844013a711a39da6132c449c787577aba7 | Python | RUPAK7406/leetCode-python-solutions | /leetCode-349.py | UTF-8 | 220 | 2.890625 | 3 | [] | no_license | class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
nums1 = set(nums1)
nums2 = set(nums2)
result = nums1.intersection(nums2)
return result
| true |
c8c1c01e4710c344fe5449f5789f9d7b28ad423f | Python | spiedeman/LeetCode-Note | /html2md.py | UTF-8 | 5,116 | 2.78125 | 3 | [] | no_license | import os
import sys
import re
from functools import partial
LOCAL_PATH=os.path.split(os.path.abspath(sys.argv[0]))[0]
class HTML2MARKDOWN(object):
def __init__(self, info=dict(), output='output.md', solution_path='.'):
self.info = info
self.problem = ''
self.output = solution_path+'/'+output
self.rules = []
def template(self):
with open(LOCAL_PATH+'/template.md', 'r') as f:
page = ''.join(f.readlines())
return page
def rule_apply(self, match):
code = match.group(1)
for rule in self.rules:
pat, repl = rule
code = re.sub(pat, repl, code)
return '</pre>{}<pre>'.format(code)
def convert(self):
"""
根据注册的规则将 html 文本转换为 markdown 格式
"""
level = {1: '简单', 2: '中等', 3: '困难'}
is_ac = {None: '未做', 'ac': '已解答'}
frontend_id = self.info['frontend_id']
url = self.info['url']
status = is_ac[self.info['status']]
accept_ratio = 100 * self.info['accept_ratio']
difficulty = level[self.info['level']]
title = self.info['title_zh'].replace('-', ' ')
content = self.info['content_zh']
if not self.rules:
self.rigister()
# <pre></pre> 结构内的字符串不作处理
content = '</pre>{}<pre>'.format(content)
content = re.sub(re.compile(r'</pre>(.*?)<pre>',flags=re.DOTALL), self.rule_apply, content)
content = re.match(re.compile(r'</pre>(.*)<pre>',flags=re.DOTALL), content).group(1)
content = re.sub(r'(\n{3,})', r'\n\n', content)
form = (frontend_id, title, url, status, accept_ratio, difficulty, content)
self.problem = self.template().format(*form)
def write(self):
"""
转换后的文本写入到硬盘
"""
# 若文件已存在,修改多半因为更新了html转markdown格式的新规则
# 因此题目分析和相应代码部分应做保留
if os.access(self.output, os.F_OK):
with open(self.output, 'r') as f:
content = ''.join(f.readlines())
rule = re.compile(r'(## 题目分析.*$)', flags=re.DOTALL)
reserve = re.search(rule, content).group(1)
self.problem = re.sub(rule, reserve, self.problem)
# print(self.problem)
with open(self.output, 'w') as f:
f.write(self.problem)
def rigister(self):
rules_func = ['self.'+rule+'()' for rule in self.__dir__() if '_rules' in rule]
for rule_func in rules_func:
# print(rule_func)
exec(rule_func)
def common_rules(self):
"""
普通段落、强调、斜体
"""
self.rules.append([re.compile(r'<p>(.*?)</p>', flags=re.DOTALL), r'\1'])
self.rules.append([re.compile(r'<strong>(.*?)</strong>', flags=re.DOTALL), r'**\1**'])
self.rules.append([re.compile(r'<em>(.*?)</em>', flags=re.DOTALL), r'*\1*'])
def code_rules(self):
"""
行内代码和代码块
"""
self.rules.append([re.compile(r'<code>(.*?)</code>', flags=re.DOTALL), r'`\1`'])
# self.rules.append([re.compile(r'<pre>(.*?)</pre>', flags=re.DOTALL), r'''\n\1\n'''])
def list_rules(self):
"""
列表(有序、无序)
"""
def replacement_list(match, order=False):
code = match.group(1)
if order:
# 有序列表
for i, tmp in enumerate(re.finditer(r'<li>(.*?)</li>', code)):
pat = re.sub(r'<li>(.*?)</li>', r'.*?<li>(\1)</li>.*?', re.escape(tmp.group(0)))
code = re.sub(pat, r'{}. \1'.format(i+1), code)
else:
# 无序列表
code = re.sub(r'.*?<li>(.*?)</li>.*?', r'- \1', code)
return code
# 无序列表
self.rules.append([re.compile(r'<ul>(.*?)</ul>', flags=re.DOTALL), replacement_list])
# 有序列表
self.rules.append([re.compile(r'<ol>(.*?)</ol>', flags=re.DOTALL), partial(replacement_list, order=True)])
def punctuation_rules(self):
"""
标点符号(引号等)
"""
# 单、双引号
self.rules.append([re.compile(r''', flags=re.DOTALL), r"'"])
self.rules.append([re.compile(r'"', flags=re.DOTALL), r'"'])
self.rules.append([re.compile(r'“', flags=re.DOTALL), r'“'])
self.rules.append([re.compile(r'”', flags=re.DOTALL), r'”'])
def math_rules(self):
"""
数学符号
"""
# 二元关系
self.rules.append([re.compile(r'<', flags=re.DOTALL), r'<'])
self.rules.append([re.compile(r'>', flags=re.DOTALL), r'>'])
def special_rules(self):
"""
html中的空格
"""
self.rules.append([re.compile(r' ', flags=re.DOTALL), r''])
self.rules.append([re.compile(r'<br />', flags=re.DOTALL), r'\n'])
| true |
b8b5725590710126db4ab8b2fc12f194e55efbd3 | Python | erjillsison/Reddit-Wallpaper-Downloader | /Source Codes/scheduler.py | UTF-8 | 2,042 | 2.828125 | 3 | [] | no_license | import os, subprocess, sys, time
#Get and set the current working directory
if getattr(sys, 'frozen', False):
absWorkingDir = os.path.dirname(sys.executable)
elif __file__:
absWorkingDir = os.path.dirname(__file__)
os.chdir(absWorkingDir)
filePath = os.path.join(absWorkingDir,"rwd.pyw")
sc = ''
mo = ''
def quit():
print('Done, closing window...')
time.sleep(2)
#input('Done')
def createSched():
print()
command=['schtasks.exe','/CREATE','/SC','%s'%sc,'/TN','Reddit Wallpaper Changer','/TR','pyw.exe "%s"'%filePath,'/MO','%s'%mo]
proc = subprocess.Popen(command)
proc.wait()
quit()
def askInterval():
while(True):
print()
userInput= input('Enter interval between wallpaper change: (format example: 1 hour, 2 min)\n')
userInput = userInput.split()
if len(userInput)==2:
try:
global mo
mo = int(userInput[0])
except:
print('Wrong input')
continue
global sc
if userInput[1] == 'min':
sc = 'MINUTE'
createSched()
break
elif userInput[1] == 'hour':
sc = 'HOURLY'
createSched()
break
else:
print('Wrong input')
continue
else:
print('Wrong input')
continue
def deleteSched():
print()
command=['schtasks.exe','/delete','/TN','Reddit Wallpaper Changer']
p = subprocess.Popen(command)
p.wait()
quit()
def mainMenu():
while(True):
userInput = input('MAIN MENU:\n[1]:Create new schedule or change current schedule\n[2]:Delete schedule\nEnter value: ')
if userInput == '1':
askInterval()
break
elif userInput == '2':
deleteSched()
break
else:
print('wrong input')
continue
mainMenu()
| true |
cf55afaaab82dc67f842f67c045e8c4e189da463 | Python | kanishkegb/CSCI-6527-projects | /Project-1/crop_funcs.py | UTF-8 | 4,864 | 3.515625 | 4 | [] | no_license | def crop_aligned_image(img, roll_g, roll_r):
'''
Crop the aligned image consideing the outside border and the amount of
pixels the G and R layers were rolled to align them.
Args:
img: array - aligned image
roll_g: tuple - amount of pixels the G layer was rolled to align it
with B layer
roll_r: tuple - amount of pixels the R layer was rolled to align it
with B layer
Returns:
cropped: array - cropped image
'''
h, w, c = img.shape
crop_outer = 0.04
t, b, l, r = crop_limits(h, w, crop_outer)
t_adj, b_adj = crop_limits_gr(roll_g[0], roll_r[0])
l_adj, r_adj = crop_limits_gr(roll_g[1], roll_r[1])
cropped = img[t + t_adj:b + b_adj, l + l_adj:r + r_adj, :]
return cropped
def crop_limits_gr(g, r):
'''
Finds the crop limits based on the pixels of G and R layer were rolled in
order to align them with B layer.
Args:
g: int - number of pixels G layer was rolled
r: int - number of pixels R layer was rolled
Returns:
crop1, crop2: ints - amount of pixels to be cropped on top/left side
and on bottom/right side
'''
crop1, crop2 = 0, 0
if g > 0 and r > 0:
crop1 = max([g, r])
elif g < 0 and r < 0:
crop2 = min([g, r])
elif g > 0 and r < 0:
crop1, crop2 = g, r
elif g < 0 and r > 0:
crop1, crop2 = r, g
return crop1, crop2
def crop_limits(h, w, p):
'''
Calculates crop limis in pixels so that an image can be cropped by a
percent along all borders
Args:
h: int - height of the image
w: int - width of the image
p: double - percentage (between 0.0 - 1.0) crop limits
Returns:
t, b, l, r: ints - top, bottom, left, right pixel values
'''
crop_percent = p
t = int(crop_percent * h)
b = int((1 - crop_percent) * h)
l = int(crop_percent * w)
r = int((1 - crop_percent) * w)
return t, b, l, r
def detect_edges(img):
'''
Detects the border of the image based on the difference of the white outer
space and the black border.
Args:
img: array - image to be cropped
Returns:
t, b: int - top and bottom crop limits
l, r: int - left and right crop limits
'''
h, w = img.shape
black_threshold = 50
counter_threshold = w * 0.8
t, l = find_top_left(img, black_threshold, counter_threshold)
b, r = find_bottom_right(img, black_threshold, counter_threshold)
# in case boundaries are not detected in a reasonable range, do not
# crop anything
t_lim, b_lim = 0.1 * h, 0.9 * h
l_lim, r_lim = 0.1 * w, 0.9 * w
t_lim, b_lim, l_lim, r_lim = crop_limits(h, w, 0.1)
t = 0 if t > t_lim else t
b = h if b < b_lim else b
l = 0 if l > l_lim else l
r = w if r < r_lim else r
return t, b, l, r
def find_top_left(img, black_threshold, counter_threshold):
'''
Finds top left corner of the border
Args:
img: array - image with the border
black_threshold: int - black cutoff value
counter_threshold: int - number of continuous pixels to be checked
for black color
'''
h, w = img.shape
tl_found = 0
for i in range(h):
t_counter = 0
l_found = 0
for j in range(w):
if img[i, j] < black_threshold:
t_counter += 1
if not l_found:
l_found = 1
l = j
else:
t_counter
if t_counter > counter_threshold:
t = i
tl_found = 1
break
if tl_found:
break
return t, l
def find_bottom_right(img, black_threshold, counter_threshold):
'''
Finds bottom right corner of the border
Args:
img: array - image with the border
black_threshold: int - black cutoff value
counter_threshold: int - number of continous pixels to be checked
for black color
'''
h, w = img.shape
br_found = 0
for i in range(h - 1, 0, -1):
b_counter = 0
r_found = 0
for j in range(w - 1, 0, -1):
if img[i, j] < black_threshold:
b_counter += 1
if not r_found:
r_found = 1
r = j
else:
b_counter
if b_counter > counter_threshold:
b = i
br_found = 1
break
if br_found:
break
return b, r
| true |
50ba3e1aab14a67273936154659cb136c0fe03d4 | Python | markbaas/markstimetracker | /markstimetracker/models.py | UTF-8 | 3,733 | 2.71875 | 3 | [] | no_license | import datetime
import random
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
Base = declarative_base()
today = datetime.date.today()
PERIODS = {'month': (today.replace(day=1),
today.replace(day=1) + relativedelta(months=1)),
'last_month': (today.replace(day=1) - relativedelta(months=1),
today.replace(day=1, month=today.month)),
'week': (today - relativedelta(days=today.weekday()),
today + relativedelta(days=7 - today.weekday())),
'last_and_current_week': (today - relativedelta(days=today.weekday() + 7),
today + relativedelta(days=7 - today.weekday())),
}
class Task(Base):
__tablename__ = 'Tasks'
id = Column(Integer, primary_key=True)
task_id = Column(Integer)
parent = Column(Integer)
name = Column(String)
active = Column(Boolean)
redmine = Column(Boolean)
def __new__(cls, *args, **kwargs):
for key, args in PERIODS.items():
rel = relationship(
'Event',
primaryjoin='and_(Task.task_id==Event.task_id, '
'Event.start >= "{}", Event.start < "{}")'.format(*args),
order_by='Event.start'
)
if not hasattr(cls, 'events_' + key):
setattr(cls, 'events_' + key, rel)
return super(Task, cls).__new__(cls)
@classmethod
def get_or_create(cls, session, name, task_id=None, parent=None):
if not task_id:
task_id = random.randint(1000000, 9999999)
task = session.query(cls).filter(cls.name == name).first()
if not task:
obj = Task(name=name, task_id=task_id, parent=parent)
session.add(obj)
return obj
else:
return task
def get_spent_time_per_day(self, period='week'):
d = defaultdict(float)
for event in getattr(self, 'events_' + period):
d[event.start_date.date()] += event.spent_time / 3600.
for key, raw_time in d.items():
d[key] = round(raw_time * 4) / 4 # rounded on quarter hours
return d
@hybrid_property
def description(self):
return '{} - {}'.format(self.task_id, self.name)
class Event(Base):
__tablename__ = 'Events'
id = Column(Integer, primary_key=True)
task_id = Column(Integer, ForeignKey('Tasks.task_id'))
task = relationship(Task, backref='events')
comment = Column(String)
start = Column(String)
end = Column(String)
@hybrid_property
def start_date(self):
return datetime.datetime.strptime(self.start, '%Y-%m-%d %H:%M:%S.%f')
@hybrid_property
def end_date(self):
return datetime.datetime.strptime(self.end, '%Y-%m-%d %H:%M:%S.%f') if self.end else None
@hybrid_property
def spent_time(self):
if self.end_date is None:
return (datetime.datetime.now() - self.start_date).seconds
else:
return (self.end_date - self.start_date).seconds
@classmethod
def get_spent_time_period(cls, session, start, end):
events = session.query(cls).filter(cls.start.between(start.strftime("%Y-%m-%d %H:%M:%S"),
end.strftime("%Y-%m-%d %H:%M:%S")))
return sum([round(x.spent_time / 3600. * 4) / 4 for x in events])
def init_db(engine):
Base.metadata.create_all(engine)
| true |
afa7e5d214a0d413aafcbfb93abdb02edcf214d6 | Python | kurtd5105/AdventOfCode | /Day 14/part2.py | UTF-8 | 2,333 | 3.40625 | 3 | [] | no_license | import sys
def getNextDist(reindeer):
"""Iterator to Calculate the distance that each reindeer travels in 2503 seconds"""
limit = 2503
time = 0
prevTime = 0
timeCycle = 0
distance = 0
flying = True
while time < limit:
#If it's flying then it's moving
if flying:
#If the reindeer isn't tired and doesn't need to stop
if timeCycle < reindeer[2]:
#Increase the distance by its speed
distance += reindeer[1]
timeCycle += 1
time += 1
else:
flying = False
timeCycle = 0
else:
#If the reindeer is still tired
flying = True
timeCycle = 0
#If it is a unique distance travelled then yield the disance
if time != prevTime:
yield distance
prevTime = time
try:
with open(sys.argv[1]) as fileInput:
reindeerData = [line.strip() for line in fileInput]
except:
print "File could not be opened."
parsedData = []
#Parse each line so that it contains the reindeer name, speed, cooldowns (2)
for line in reindeerData:
line = line.replace(" can fly", "").replace(" km/s for", "")
line = line.replace(" seconds, but then must rest for", "")
line = line.split(" ")[:4]
line[1] = int(line[1])
line[2] = int(line[2])
line[3] = int(line[3])
parsedData.append(list(line))
distances = {}
scores = {}
#Create an entry for every reindeer and containing a list of its distance every second
for reindeer in parsedData:
distances[reindeer[0]] = [dist for dist in getNextDist(reindeer)]
scores[reindeer[0]] = 0
limit = 2503
#Award score for the leader(s)
for i in xrange(limit):
values = {}
#Create a keyword for each reindeer's distance, with a list of reindeer with that value
for reindeer in distances:
#If the value doesn't exist yet then add it with the reindeer as its key
if distances[reindeer][i] not in values:
values[distances[reindeer][i]] = [reindeer]
else:
values[distances[reindeer][i]].append(reindeer)
#Get the max distance travelled
maxDist = max([value for value in values])
leads = values[maxDist]
#Increase the score for every leader
for leader in leads:
scores[leader] += 1
scoreValues = []
#Put all the scores into a list
for key, value in scores.iteritems():
scoreValues.append(value)
#Print the maximum score obtained
print max(scoreValues)
| true |
0548a067fd315bcb2b743ab306ff2e73541a83f4 | Python | hetaov/study | /data/read_rules.py | UTF-8 | 191 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
def read():
book = open_workbook('rules.xlsx')
sheets = book.sheets()
sheet = sheets[5]
if __name__ == '__main__':
rules = read()
print rules
| true |
1462a332647a738fd6bfc6252eb91d4677288587 | Python | Prashast07/Python-Projects | /findBob.py | UTF-8 | 191 | 3.15625 | 3 | [] | no_license | s = "bobhdhaboblklbobbob"
count = 0
subString = ""
for i in range(len(s)):
if s[i] == 'b':
subString = s[i:i+3]
if subString == "bob":
count += 1
print count
| true |
e1baea77798df41f9d34f05648d6c783e317a10a | Python | Jingyueshi/MyPython | /mztespro/baidu.py | UTF-8 | 672 | 3.1875 | 3 | [] | no_license | # -*- coding utf-8 -*-
# @author:"zhangJingHua"
import time
from selenium import webdriver
# driver=webdriver.Firefox()
driver=webdriver.Chrome()
driver.get("https://www.baidu.com")
#获得输入框的尺寸
size=driver.find_element_by_id('kw').size
print(size)
#返回百度页面底部备案信息
text=driver.find_element_by_id("cp").text
print(text)
#返回元素的属性值,可以是id,name,type或其他任意属性
attribute=driver.find_element_by_id("kw").get_attribute('type')
print(attribute)
#返回元素的结果是否可见,返回结果为True或False
result=driver.find_element_by_id("kw").is_displayed()
print(result)
time.sleep(30)
driver.quit() | true |
698ca3ec8867ab65094e95c210288e6d1cd78bbd | Python | abhi-laksh/Python | /Adv/Photoshop/a.py | UTF-8 | 52 | 2.59375 | 3 | [] | no_license | lst =[['A','B'],'\n' , ['C' , 'D']]
print(str(lst)) | true |
b1be8efd5df8945dd1861156cafc124e80ad6151 | Python | Valijon21/python-lessons | /numlist.py | UTF-8 | 506 | 2.875 | 3 | [] | no_license | sonlar = [17,7,21,1993,-2,56.2,22.3,2]
print(sonlar)
qush = sonlar[0]+sonlar[2]
ayir = sonlar[1]-sonlar[7]
kupaytir = sonlar[3]*sonlar[4]
bul = sonlar[4]/sonlar[5]
ildiz = sonlar[1]**(1/2)
butun = sonlar[0]%sonlar[7]
qoldiq = sonlar[1]//sonlar[7]
kvadrat = sonlar[1]**2
print(f" yig'indi: {qush} \n ayirma: {ayir} \n kupaytma: {kupaytir}\n bulish:{bul}\n ildiz:{ildiz} \n butun qism:{butun} \n qoldiq{qoldiq}\n kvadrat:{kvadrat}")
sonlar[4] = 2000
sonlar[5] = 100
sonlar[6] = sonlar[6]+12.7
print(sonlar)
| true |
54481ddc69663a918e744c120511fede8ccd0a5c | Python | EverlastingBugstopper/tiny-projects | /calc3_scripts/vectors/inputmenu.py | UTF-8 | 992 | 3.484375 | 3 | [] | no_license | class Menu:
def __init__(self, title="Menu", options=["Option 1", "Option 2"]):
self.options = options
self.result = -1
self.title = title
def __str__(self):
result = ""
self.options.append(self.title)
maxLength = int(len(max(self.options, key=len)) + len(str(len(self.options))))
dashes = int((maxLength - (len(self.title) - 2)) / 2)
self.options.remove(self.title)
for dash in range(dashes):
result += "-"
result += " " + str(self.title) + " "
for dash in range(dashes):
result += "-"
result += "\n"
for index, option in enumerate(self.options):
result += str(index + 1) + ": " + option + "\n"
for dash in range((dashes * 2) + len(self.title) + 2):
result += "-"
result += "\n"
return result
def get_input(self):
print(self.__str__())
self.result = int(input("Enter the number of your choice: ")) - 1
def get_result(self):
if (self.result < 0):
print("Invalid choice: " + str(self.result))
else:
return self.options[self.result] | true |
9718b912d2d30c8e68dc4ec939a8003892b60cd3 | Python | Eric-Hsieh97/2019ChallengeEntries | /DoctorWho/sepsis_challenge_s6/src/mgp/mgp.py | UTF-8 | 8,646 | 2.78125 | 3 | [
"BSD-2-Clause"
] | permissive | '''
MGP Module (code for the most part copied from Futoma et al. 2017 ICML)
'''
import tensorflow as tf
import numpy as np
from .mgp_utils import OU_kernel,CG,Lanczos,block_CG,block_Lanczos
#------------------------------------------------
##### Convinience classes for managing parameters
class DecompositionMethod():
valid_methods = ['chol', 'cg']
def __init__(self, methodname, add_diag=1e-3):
if methodname not in self.valid_methods:
raise ValueError('{} is not a valid methodname. Must be one of {}'.format(methodname, self.valid_methods))
self.methodname = methodname
self.add_diag = add_diag
class GPParameters():
def __init__(self, input_dim, n_mc_smps, decomp_method, pad_before):
self.input_dim = input_dim
self.log_length = tf.Variable(tf.random_normal([1],mean=1,stddev=0.1),name="GP-log-length")
self.length = tf.exp(self.log_length)
#different noise level of each lab
self.log_noises = tf.Variable(tf.random_normal([input_dim],mean=-2,stddev=0.1),name="GP-log-noises")
self.noises = tf.exp(self.log_noises)
#init cov between labs
self.L_f_init = tf.Variable(tf.eye(input_dim),name="GP-Lf")
self.Lf = tf.matrix_band_part(self.L_f_init,-1,0)
self.Kf = tf.matmul(self.Lf,tf.transpose(self.Lf))
self.n_mc_smps = n_mc_smps
#which decomposition method of Covariance matrix to use:
self.method = decomp_method
#boolean if GP draws should be padded before or afterwards (in time axis) before clf
self.pad_before = pad_before
#------------------------------------------------
##### Tensorflow functions to draw samples from MGP
def draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti, gp_params):
"""
given GP hyperparams and data values at observation times, draw from
conditional GP
inputs:
length,noises,Lf,Kf: GP params
Yi: observation values
Ti: observation times
Xi: grid points (new times for tcn)
ind_kfi,ind_kti: indices into Y
returns:
draws from the GP at the evenly spaced grid times Xi, given hyperparams and data
"""
n_mc_smps, length, noises, Lf, Kf, method = gp_params.n_mc_smps, gp_params.length, gp_params.noises, gp_params.Lf, gp_params.Kf, gp_params.method
M = gp_params.input_dim
ny = tf.shape(Yi)[0]
K_tt = OU_kernel(length,Ti,Ti)
D = tf.diag(noises)
grid_f = tf.meshgrid(ind_kfi,ind_kfi) #same as np.meshgrid
Kf_big = tf.gather_nd(Kf,tf.stack((grid_f[0],grid_f[1]),-1))
grid_t = tf.meshgrid(ind_kti,ind_kti)
Kt_big = tf.gather_nd(K_tt,tf.stack((grid_t[0],grid_t[1]),-1))
Kf_Ktt = tf.multiply(Kf_big,Kt_big)
DI_big = tf.gather_nd(D,tf.stack((grid_f[0],grid_f[1]),-1))
DI = tf.diag(tf.diag_part(DI_big)) #D kron I
#data covariance.
#Either need to take Cholesky of this or use CG / block CG for matrix-vector products
Ky = Kf_Ktt + DI + method.add_diag*tf.eye(ny)
### build out cross-covariances and covariance at grid
nx = tf.shape(Xi)[0]
K_xx = OU_kernel(length,Xi,Xi)
K_xt = OU_kernel(length,Xi,Ti)
ind = tf.concat([tf.tile([i],[nx]) for i in range(M)],0)
grid = tf.meshgrid(ind,ind)
Kf_big = tf.gather_nd(Kf,tf.stack((grid[0],grid[1]),-1))
ind2 = tf.tile(tf.range(nx),[M])
grid2 = tf.meshgrid(ind2,ind2)
Kxx_big = tf.gather_nd(K_xx,tf.stack((grid2[0],grid2[1]),-1))
K_ff = tf.multiply(Kf_big,Kxx_big) #cov at grid points
full_f = tf.concat([tf.tile([i],[nx]) for i in range(M)],0)
grid_1 = tf.meshgrid(full_f,ind_kfi,indexing='ij')
Kf_big = tf.gather_nd(Kf,tf.stack((grid_1[0],grid_1[1]),-1))
full_x = tf.tile(tf.range(nx),[M])
grid_2 = tf.meshgrid(full_x,ind_kti,indexing='ij')
Kxt_big = tf.gather_nd(K_xt,tf.stack((grid_2[0],grid_2[1]),-1))
K_fy = tf.multiply(Kf_big,Kxt_big)
#now get draws!
y_ = tf.reshape(Yi,[-1,1])
xi = tf.random_normal((nx*M, n_mc_smps))
#print('xi shape:')
#print(xi.shape)
if method.methodname == 'chol':
Ly = tf.cholesky(Ky)
Mu = tf.matmul(K_fy,tf.cholesky_solve(Ly,y_))
Sigma = K_ff - tf.matmul(K_fy,tf.cholesky_solve(Ly,tf.transpose(K_fy))) + method.add_diag*tf.eye(tf.shape(K_ff)[0])
#Exp2: increase noise on Sigma 1e-6 to 1e-3, to 1e-1?
#Sigma = tf.cast(Sigma, tf.float64) ## Experiment: is chol instable and needs float64? Will this crash Memory?
#draw = Mu + tf.matmul(tf.cast(tf.cholesky(Sigma),tf.float32),xi)
draw = Mu + tf.matmul(tf.cholesky(Sigma),xi)
draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])
#print('cholesky draw:')
#print(sess.run(draw_reshape))
elif method.methodname == 'cg':
Mu = tf.matmul(K_fy,CG(Ky,y_)) #May be faster with CG for large problems
#Never need to explicitly compute Sigma! Just need matrix products with Sigma in Lanczos algorithm
def Sigma_mul(vec):
# vec must be a 2d tensor, shape (?,?)
return tf.matmul(K_ff,vec) - tf.matmul(K_fy,block_CG(Ky,tf.matmul(tf.transpose(K_fy),vec)))
def large_draw():
return Mu + block_Lanczos(Sigma_mul,xi,n_mc_smps) #no need to explicitly reshape Mu
#draw = tf.cond(tf.less(nx*M,BLOCK_LANC_THRESH),small_draw,large_draw)
draw = large_draw()
draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])
#print('cg draw shape:')
#print(draw_reshape.shape)
#TODO: it's worth testing to see at what point computation speedup of Lanczos algorithm is useful & needed.
# For smaller examples, using Cholesky will probably be faster than this unoptimized Lanczos implementation.
# Likewise for CG and BCG vs just taking the Cholesky of Ky once
#draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])
return draw_reshape
def get_GP_samples(minibatch, gp_params): ##,med_cov_grid
"""
returns samples from GP at evenly-spaced gridpoints
"""
#Unravel minibatch object
Y = minibatch.Y
T = minibatch.T
X = minibatch.X
ind_kf = minibatch.ind_kf
ind_kt = minibatch.ind_kt
num_obs_times = minibatch.num_obs_times
num_obs_values = minibatch.num_obs_values
num_tcn_grid_times = minibatch.num_tcn_grid_times
#cov_grid = minibatch.cov_grid
n_mc_smps, M, pad_before = gp_params.n_mc_smps, gp_params.input_dim, gp_params.pad_before
grid_max = tf.shape(X)[1]
Z = tf.zeros([0,grid_max, M])
N = tf.shape(T)[0] #number of observations
#setup tf while loop (have to use this bc loop size is variable)
def cond(i,Z):
return i<N
def body(i,Z):
Yi = tf.reshape(tf.slice(Y,[i,0],[1,num_obs_values[i]]),[-1]) #MM: tf.reshape(x, [-1]) flattens tensor x (e.g. [2,3,1] to [6]), slice cuts out all Y data of one patient
Ti = tf.reshape(tf.slice(T,[i,0],[1,num_obs_times[i]]),[-1])
ind_kfi = tf.reshape(tf.slice(ind_kf,[i,0],[1,num_obs_values[i]]),[-1])
ind_kti = tf.reshape(tf.slice(ind_kt,[i,0],[1,num_obs_values[i]]),[-1])
Xi = tf.reshape(tf.slice(X,[i,0],[1,num_tcn_grid_times[i]]),[-1])
X_len = num_tcn_grid_times[i]
GP_draws = draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti, gp_params=gp_params)
pad_len = grid_max-X_len #pad by this much
#padding direction:
if pad_before:
print('Padding GP_draws before observed data..')
padded_GP_draws = tf.concat([tf.zeros((n_mc_smps,pad_len,M)), GP_draws],1)
else:
padded_GP_draws = tf.concat([GP_draws,tf.zeros((n_mc_smps,pad_len,M))],1)
#if lab_vitals_only:
Z = tf.concat([Z,padded_GP_draws],0) #without covs
#if covs are used:
# medcovs = tf.slice(cov_grid,[i,0,0],[1,-1,-1])
# tiled_medcovs = tf.tile(medcovs,[n_mc_smps,1,1])
# padded_GPdraws_medcovs = tf.concat([padded_GP_draws,tiled_medcovs],2)
# Z = tf.concat([Z,padded_GPdraws_medcovs],0) #with covs
return i+1,Z
i = tf.constant(0)
#with tf.control_dependencies([tf.Print(tf.shape(ind_kf), [tf.shape(ind_kf), tf.shape(ind_kt), num_obs_values], 'ind_kf & ind_kt & num_obs_values')]):
i,Z = tf.while_loop(cond,body,loop_vars=[i,Z],
shape_invariants=[i.get_shape(),tf.TensorShape([None,None,None])])
return Z
| true |
fcbdcab5f7d72840aca3b3c719a93f1edb6148e6 | Python | nanduzz/python_backup | /DBBackup.py | UTF-8 | 1,120 | 2.703125 | 3 | [] | no_license | import os
import subprocess
from subprocess import Popen, PIPE, STDOUT
class DBBackup(object):
def __init__(self, endereco, database, login, senha, porta=3306):
self.endereco = endereco
self.database = database
self.login = login
self.senha = senha
self.porta = porta
self.filename = '{}.bkp'.format(self.database)
def print_dados(self):
print('endereco:{}, database:{}, login:{}, senha:{}'.format(self.endereco, self.database, self.login, self.senha))
def backup(self):
#subprocess.Popen('mysqldump -h localhost -P 3306 -u -root mydb | mysql -h localhost -P 3306 -u root mydb2', shell=True)
p1 = subprocess.Popen('mysqldump -h {} -P {} -u {} -p{} {}'.format(
self.endereco,
self.porta,
self.login,
self.senha,
self.database
), stdout=PIPE, shell=True)
out, err = p1.communicate()
fo = open(self.filename, 'w')
fo.write(out.decode("utf-8"))
fo.close()
print("backup terminado do banco de dados")
return self.filename | true |
8bc406061a502f2a0a22d65bf9425b70de4b3ef9 | Python | Haestad/datatek | /oving3/ciphers_test.py | UTF-8 | 2,039 | 2.609375 | 3 | [] | no_license | from multiplication import Multiplication
from affine import Affine
from oving3.cipher import Cipher
from rsa import RSA
from unbreakable import Unbreakable
from sender import Sender
from receiver import Receiver
from caesar import Caesar
if __name__ == '__main__':
c1 = Caesar()
c2 = Multiplication()
c3 = Affine()
c4 = Unbreakable()
c5 = RSA()
s1 = Sender()
r1 = Receiver()
MESSAGE = "what if i change the message?"
# test Caesar
key = c1.generate_keys()
s1.set_cipher(c1)
s1.set_key(key)
r1.set_cipher(c1)
r1.set_key(key)
enc_melding = s1.operate_cipher(MESSAGE)
dec_melding = r1.operate_cipher(enc_melding)
if Cipher.verify(MESSAGE, dec_melding):
print("verified melding 1")
# test Multiplication
key = c2.generate_keys()
s1.set_cipher(c2)
s1.set_key(key)
r1.set_cipher(c2)
r1.set_key(key)
enc_melding = s1.operate_cipher(MESSAGE)
dec_melding = r1.operate_cipher(enc_melding)
if Cipher.verify(MESSAGE, dec_melding):
print("verified melding 2")
# test Affine
key = c3.generate_keys()
s1.set_cipher(c3)
s1.set_key(key)
r1.set_cipher(c3)
r1.set_key(key)
enc_melding = s1.operate_cipher(MESSAGE)
dec_melding = r1.operate_cipher(enc_melding)
if Cipher.verify(MESSAGE, dec_melding):
print("verified melding 3")
# test Unbreakable
key = c4.generate_keys()
s1.set_cipher(c4)
s1.set_key(key)
r1.set_cipher(c4)
r1.set_key(key)
enc_melding = s1.operate_cipher(MESSAGE)
dec_melding = r1.operate_cipher(enc_melding)
if Cipher.verify(MESSAGE, dec_melding):
print("verified melding 4")
# test RSA
key = c5.generate_keys()
s1.set_cipher(c5)
s1.set_key((key[0], key[1]))
r1.set_cipher(c5)
r1.set_key((key[0], key[2]))
enc_melding = s1.operate_cipher(MESSAGE)
dec_melding = r1.operate_cipher(enc_melding)
if Cipher.verify(MESSAGE, dec_melding):
print("verified melding 5")
| true |
e759de37b517d32716894db3720ec78a58981aa5 | Python | samgmorrone/IsochroneGenerator | /Script.py | UTF-8 | 1,584 | 2.578125 | 3 | [] | no_license | import arcpy #Here is our Python ArcGIS package
from arcpy import env #Env class contains all geoprocessing environments
from arcpy.sa import * #Importing spatial analysis package
import sys #sys module (information ab constants, functions, + methods
import os #os module (functions for editing directories)
#Check out the Network Analyst extension license
arcpy.CheckOutExtension("Network")
#Set environment settings
env.workspace = "C:/data/NewPaltz.gdb"
env.overwriteOutput = True
#tools will execute and overwrite the output dataset
#Set local variables
network_data_source = "openStreet_nd" #name of our street layer
layer_name = "serviceArea" #setting name of service area
travel_mode = "DriveTime" #Setting drive/time variable
travel_direction = "TRAVEL_FROM" #Indicates leaving from facility
break_values = "1, 2, 3, 4, 5" #Setting 1-5 min intervals
inFacilities = "SRFD.gdb\Stations" #Grabbing station locations
#Create the Service Area Layer
sa = arcpy.na.MakeServiceAreaLayer(network_data_source,layer_name, travel_mode, travel_direction, break_values)
outNALayer = sa.getOutput(0)
#Here we create a service area, using the values we just defined.
#Get the names of all the sublayers within the service area layer.
subLayerNames = arcpy.na.GetNAClassNames(outNALayer)
#Stores the layer names that we will use later
facilitiesLayerName = subLayerNames["Facilities"]
#Load the fire stations as facilities
arcpy.na.AddLocations(outNALayer, facilitiesLayerName, inFacilities, "", "")
#Solve the Service Area Layer
arcpy.na.Solve(outNALayer)
| true |
74c583552970781c19d93d660ceeb2da79926fdf | Python | Tskatom/company_market | /code/util/extractDailyInteraction.py | UTF-8 | 4,595 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
extract the interaction between users in the daily tweet network,
we extract following actions:
tweets sent
mentions
replies
retweents
for each day, we will output a file in which each line is for a user
"""
__author__ = "Wei Wang"
__email__ = "tskatom@vt.edu"
import sys
import os
from multiprocessing import Process, Queue, freeze_support
import argparse
import logging
from datetime import datetime, timedelta
import json
from dateutil import parser
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('--fold', type=str,
help='the tweets folder')
ap.add_argument('--core', type=int,
help='the core num')
ap.add_argument('--out', type=str,
help='output folder')
ap.add_argument('--start', type=str,
help='the mini time to handle the files')
arg = ap.parse_args()
return arg
def worker(task_queue, result_queue):
for task in iter(task_queue.get, 'STOP'):
result = extract_actions(task)
result_queue.put(result)
def extract_actions(task):
user = task["user"]
files = task["files"]
actions = {}
for f in files:
with open(f) as tf:
for line in tf:
try:
tweet = json.loads(line)
day = parser.parse(tweet['created_at']).strftime("%Y-%m-%d")
actions.setdefault(day,{"tweets":0,
"mentions":[],
"replies":[],
"retweets":[]})
actions[day]["tweets"] += 1
#extract mention info
mention = [m["screen_name"] for m in tweet["entities"]["user_mentions"]]
actions[day]["mentions"].extend(mention)
#add reply infomation
if tweet["in_reply_to_screen_name"]:
actions[day]['replies'].append(tweet['in_reply_to_screen_name'])
#check retweet information
if "retweeted_status" in tweet:
ori_user = tweet['retweeted_status']['user']['screen_name']
actions[day]['retweets'].append(ori_user)
except:
print 'error[%s]' % sys.exc_info()[0]
return {"user": user, "actions": actions}
def read_folder(folder):
for f in os.listdir(folder):
f = os.path.join(folder, f)
if os.path.isdir(f):
for inner_f in read_folder(f):
yield inner_f
else:
yield f
def merge_files(files):
files.sort()
p_name = None
p_file = None
p_sets = []
for f in files:
name = f.split('/')[-1][0:-30]
if name != p_name and p_name is not None:
yield (p_name, p_sets)
p_sets = []
p_name = None
p_name = name
p_sets.append(f)
yield (p_name, p_sets)
def create_task(folder, task_queue, start=None):
task_count = 0
print folder
subfolders = [os.path.join(folder, f)
for f in os.listdir(folder) if os.path.isdir(os.path.join(folder,f))]
print subfolders
for folder in subfolders:
if start is None:
files = [f for f in read_folder(folder)]
else:
files = [f for f in read_folder(folder) if f.find(start) != -1]
for name, f_set in merge_files(files):
task = {"user": name, "files":f_set}
task_queue.put(task)
task_count += 1
return task_count
def main():
arg = parse_args()
folder = arg.fold
core = arg.core
output = arg.out
start = arg.start
if start:
start = start.replace('-', '') + '000000'
task_queue = Queue()
result_queue = Queue()
task_count = create_task(folder, task_queue, start)
print task_count
for i in range(core):
Process(target=worker, args=(task_queue, result_queue)).start()
#send stop signal
for i in range(core):
task_queue.put('STOP')
#print result
out_files = {}
for i in range(task_count):
actions = result_queue.get()
user = actions["user"]
for day in actions["actions"]:
if day not in out_files:
out_files[day] = open(os.path.join(output, day), "w")
out_files[day].write(json.dumps({"user": user, "actions": actions["actions"][day]}) + "\n")
for day in out_files:
out_files[day].flush()
out_files[day].close()
if __name__ == "__main__":
main()
| true |
8bec4ab7ed8a2ae6f6c482c537a74d4e624839ed | Python | Martin-Ruggeri-Bio/Desarrollo_Personal | /python/manejo_de_archivos/manejo_de archivos.py | UTF-8 | 143 | 2.609375 | 3 | [] | no_license | from io import open
archivo_texto = open("archivo.txt", "w")
frase = "es un estupendodo dia"
archivo_texto.write(frase)
archivo_texto.close()
| true |
4703fe716c0cabfd9e3d4d61dd882b725a4fb8a8 | Python | uniqueness001/Machine_learning | /YOLO/distance_to_camera.py | UTF-8 | 1,762 | 3.015625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import numpy as np
import cv2
def find_marker(image):
# 将图像转化为灰度值,并检测图像边缘
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
(_,cnts,_) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = max(cnts, key = cv2.contourArea)
# 计算的区域的边界框,并返回
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
# 计算并返回从物体到摄像头之间的距离
return (knownWidth * focalLength) / perWidth
# 初始化物体到摄像头的已知距离
# in this case is 24 inches
KNOWN_DISTANCE = 30.0
# 初始化物体的width
KNOWN_WIDTH = 15
# 初始化图像列表(两张图像以上)
IMAGE_PATHS = ["D:/test/pig01.jpg","D:/test/pig02.jpg","D:/test/pig03.jpg"]
# 计算出focalLength
image = cv2.imread(IMAGE_PATHS[0])
marker = find_marker(image)
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# 遍历图像
if __name__ == "__main__":
for imagePath in IMAGE_PATHS:
# 下载图像,找到图像中的marker,并计算物体到摄像头的距离
image = cv2.imread(imagePath)
marker = find_marker(image)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
# 在画面中画一个边界框,并显示它
box = np.int0(cv2.boxPoints(marker))
cv2.drawContours(image, [box], -1, (0, 255, 0), 2)
cv2.putText(image, "%.3fft" % (inches / 12),
(image.shape[1] - 200, image.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("image", image)
cv2.waitKey(0) | true |
dd435e65dda81b58286cc8b26d456383aa2e474f | Python | jesugq/algorithms-v1 | /leetcode/970.py | UTF-8 | 2,121 | 3.484375 | 3 | [] | no_license | from typing import List
class Solution:
def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:
max_base = max(x, y)
min_base = min(x, y)
sets = set()
if max_base != 1 and min_base != 1:
max_exponent = 0
max_operation = 1
while max_operation < bound:
min_exponent = 0
min_operation = 1
both_operation = max_operation + min_operation
while both_operation <= bound:
sets.add(both_operation)
min_exponent += 1
min_operation = pow(min_base, min_exponent)
both_operation = max_operation + min_operation
max_exponent += 1
max_operation = pow(max_base, max_exponent)
elif max_base != 1 and min_base == 1:
max_exponent = 0
max_operation = 1
while max_operation + 1 <= bound:
sets.add(max_operation + 1)
max_exponent += 1
max_operation = pow(max_base, max_exponent)
else:
return [2] if bound >= 2 else []
answer = sorted(sets)
return answer
class Main:
def execute(self):
solution = Solution()
tests = self.tests()
answers = self.answers()
if len(tests) != len(answers):
return
for index in range(len(tests)):
answer = solution.powerfulIntegers(tests[index][0], tests[index][1], tests[index][2])
print(answer == answers[index], answer)
def tests(self):
return [
[2, 3, 10],
[3, 5, 15],
[2, 1, 10],
[1, 2, 1000000],
[1, 1, 1],
[1, 1, 9999999],
]
def answers(self):
return [
[2,3,4,5,7,9,10],
[2,4,6,8,10,14],
[2,3,5,9],
[2,3,5,9,17,33,65,129,257,513,1025,2049,4097,8193,16385,32769,65537,131073,262145,524289],
[],
[2],
]
main = Main()
main.execute() | true |
185e02eac55ec9bc20367e1b7e9a720497ae1605 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2567/60810/254734.py | UTF-8 | 501 | 3.21875 | 3 | [] | no_license | inp = input()
nums = inp[1:len(inp)-1].split(",")
lower = int(input())
upper = int(input())
sum = 0
add = []
result = 0
for i in range(0, len(nums)):
sum = sum + int(nums[i])
add.append(sum)
add = list(map(int, add))
for i in range(0, len(add)):
if add[i] >= lower:
if add[i] <= upper:
result = result + 1
for j in range(0, i):
temp = add[i] - add[j]
if temp >= lower:
if temp <= upper:
result = result + 1
print(result) | true |
f98eff32046ed4e4f1cd938bad666202aa2bb9af | Python | MrScrith/kidsGames | /main.py | UTF-8 | 3,029 | 2.8125 | 3 | [
"MIT"
] | permissive | import pygame
import time
from utils import *
import colordraw
import gamemenu
import colorfill
pygame.init()
# Current list of games, more to be added later.
gameList = ["Draw Colors", "Color Fill"]
def Main():
js1 = None
js2 = None
jscount = 0
screen = pygame.display.set_mode((900, 500), pygame.DOUBLEBUF, 32)
#screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN & pygame.DOUBLEBUF, 32)
clock = pygame.time.Clock()
gameObj = gamemenu.GameMenu(screen, js1, js2)
gameMode = GAMELIST.MENU
buttonPressTime = 0
run = True
while run:
eventList = pygame.event.get()
pygame.display.update()
clock.tick(60)
for event in eventList:
if event.type == pygame.QUIT:
run = False
pygame.quit()
if event.type == pygame.JOYBUTTONUP:
# If the "start" button is pressed for more than 2 seconds quit game.
if event.button == 8:
if time.time() - buttonPressTime > 2:
gameMode = GAMELIST.MENU
del gameObj
gameObj = gamemenu.GameMenu(screen, js1, js2)
if event.type == pygame.JOYBUTTONDOWN:
print(str(event))
if event.button == 8:
buttonPressTime = time.time()
if pygame.version.vernum[0] > 1:
if event.type == pygame.JOYDEVICEADDED:
if js1 is None:
js1 = pygame.joystick.Joystick(int(event.device_index))
elif js2 is None:
js2 = pygame.joystick.Joystick(int(event.device_index))
gameMode = GAMELIST.MENU
del gameObj
gameObj = gamemenu.GameMenu(screen, js1, js2)
else:
if pygame.joystick.get_count() > jscount:
jscount = pygame.joystick.get_count()
if js1 is None:
js1 = pygame.joystick.Joystick(0)
elif js2 is None and jscount > 1:
js2 = pygame.joystick.Joystick(1)
gameMode = GAMELIST.MENU
del gameObj
gameObj = gamemenu.GameMenu(screen, js1, js2)
# last option
newMode = gameObj.run(eventList)
if newMode != gameMode:
del gameObj
gameMode = newMode
screen.fill(COLORS.GRAY) # blank(ish) the screen for the new game.
if gameMode == GAMELIST.MENU:
gameObj = gamemenu.GameMenu(screen, js1, js2)
elif gameMode == GAMELIST.DRAW:
gameObj = colordraw.ColorDraw(screen, js1, js2)
elif gameMode == GAMELIST.FILL:
gameObj = colorfill.ColorFill(screen, js1, js2)
elif gameMode == GAMELIST.QUIT:
run = False
pygame.quit()
if __name__ == '__main__':
Main()
| true |
ebf5f17fb3fe694d76da12121149623fe599ffe1 | Python | isym444/Competitive-Programming-Solved-Problems | /CP1/Codewars/Practice/USACO_Bronze/MilkFactoryFAILEDLOGIC.py | UTF-8 | 668 | 3.21875 | 3 | [] | no_license | import sys
sys.stdin = open("/Users/isym444/Desktop/PythonCP/CP1/Codewars/Practice/input.txt", "r")
sys.stdout = open("/Users/isym444/Desktop/PythonCP/CP1/Codewars/Practice/output.txt", "w")
""" sys.stdin = open("factory.in", "r")
sys.stdout = open("factory.out", "w") """
""" there must be a RH number that appears n-1 times """
n = int(input())
a = []
for i in sys.stdin:
a.append(list(map(int, i.split())))
""" print(a) """
b = {}
for i in range(1, n + 1):
b[i] = 0
""" print(b) """
for i in a:
b[i[1]] += 1
checker = 0
for i in range(1, n + 1):
if b[i] == n - 1:
print(i)
checker = 1
break
if checker == 0:
print(-1) | true |
c69534b6c0b081baedf634e37f3db3adcd5f3b9c | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2877/60678/260811.py | UTF-8 | 185 | 3.484375 | 3 | [] | no_license | num = int(input())
nums = input().split()
for i in range(0, num):
nums[i] = int(nums[i])
sum = 0
for i in nums:
if i < 0:
sum += -i
else:
sum += i
print(sum) | true |
126be290b656e2a2e504ee8303354a9f3b0d43a8 | Python | gameguyr/personal_repo | /kitty/GridLayout.py | UTF-8 | 433 | 3.09375 | 3 | [] | no_license | #! /usr/bin/python2.7
#####################
# PURPOSE: to learn how to write grids using 2 for loops
#
# DATE: 7/3/2013
#
# AUTHOR: Russell Lego
####################
import numpy as np
list = np.arange(0, 64)
count=0
while count < len(list):
count2=0
while count2 < 3:
print str(list[count+count2]) +' '+str(list[count+count2+1])+' '+str(list[count+count2+2])
count2+=3
count = count+count2 | true |
aead45ea7d204e9335f7007b9c06939d5a41e871 | Python | lebuingockhang123/L-p-tr-nh-python | /LeBuiNgocKhang_53157_CH01/Exercise/page_05_exercise_04.py | UTF-8 | 513 | 2.890625 | 3 | [] | no_license | """
Author: Le Bui Ngoc Khang
Date: 12/07/1997
Problem: Describe an instruction that is not well defined and thus could not be included as a
step in an algorithm. Give an example of such an instruction.
Solution:
The process that cannot be carried out by any computing agent should not be included as a step in an algorithm.
The statement of'Dividing a number by zero' is not well defined as division by zero cannot be performed by the
processor and if not handled correctly then it would crash the program.
""" | true |
48cf233e4d7858af799610dec3505a0e9b6f1e50 | Python | psgandalf/advent_of_code_2019 | /day1/day1_1.py | UTF-8 | 162 | 3.53125 | 4 | [] | no_license | file = open('input.txt')
rows = file.readlines()
sum = 0
for row in rows:
value = int(row.strip())
value = value // 3 - 2
sum += value
print(sum)
| true |
4bcef54ce3e713d23b4526f6c65b2bb4c16189aa | Python | demonlife/DataBaseLea | /codesegment/python_use_redis_oplua.py | UTF-8 | 467 | 2.59375 | 3 | [] | no_license | #encoding: utf8
import redis, time
r = redis.Redis('localhost', db=0)
script1 = '''
local i=0
local b=0
local res
local limit = tonumber(KEYS[1])
while (i <= limit) do
res = redis.call('set', i, b)
i = i + 1
b = b + 1
end
return KEYS[1]
'''
#r.eval(script1, 1, 200)
script2 = '''
local list = redis.call('keys', '*')
for x in pairs(list) do
redis.call('del', x)
end
return 1
'''
try:
print r.eval(script2, 0)
except Exception, e:
print e
| true |
d8820dba8084d09f3f84e8bd7705f8a5fb6781e8 | Python | jesusveca/chordDiagram_boroCD | /preprocessNYBorough/preprocess.py | UTF-8 | 3,546 | 2.96875 | 3 | [] | no_license | import json
from pprint import pprint
import csv
import sys
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
data = json.load(open('boroughs.json'))
size_boroughs = len(data["features"])
borough_list = []
# for x in range(size_boroughs):
# name_borough = data["features"][x]["properties"]["BoroName"]
# borough_list.append(str(name_borough))
# print borough_list
# for x in range (size_boroughs):
# for y in range(len(data["features"][x]["geometry"]["coordinates"])): # number of polygons presents in each borough # each_polygon_on_borough = data["features"][x]["geometry"]["coordinates"][y] # each polygon on a borough
# polygon_of_each_borough = data["features"][x]["geometry"]["coordinates"][y][0]
# point_x = -73.985130
# point_y = 40.758896
# # print point_in_poly(point_x,point_y,polygon_of_each_borough)
#
contador_staten = 0 # 0
contador_queens = 0 # 1
contador_brooklyn = 0 # 2
contador_manhattan =0 #3
contador_bronx = 0 # 4
from_staten_to_queens=0
from_staten_to_brooklyn=0
from_staten_to_manhattan=0
from_staten_to_bronx=0
from_queens_to_staten=0
from_queens_to_brooklyn=0
from_queens_to_manhattan=0
from_queens_to_bronx=0
from_brooklyn_to_queens=0
from_brooklyn_to_staten=0
from_brooklyn_to_manhattan=0
from_brooklyn_to_bronx=0
from_manhattan_to_staten=0
from_manhattan_to_brooklyn=0
from_manhattan_to_queens=0
from_manhattan_to_bronx=0
from_bronx_to_staten=0
from_bronx_to_brooklyn=0
from_bronx_to_manhattan=0
from_bronx_to_queens=0
list_to_chord=[]
num = lambda s: eval(s) if not set(s).difference('0123456789. *+-/e') else None
f = open("test.csv", 'wt')
writer = csv.writer(f)
writer.writerow( ('Origin', 'Orig Long' , 'Orig Lat','Dest Long', 'Dest Lat', 'Times') )
with open('sample_merged_1.csv') as File:
reader = csv.reader(File)
for row in reader:
pickup_long = row[2] # x
pickup_long = num(pickup_long)
pickup_lat = row[3] # y
pickup_lat = num(pickup_lat)
dropoff_long = row[4] # x
dropoff_long = num(dropoff_long)
dropoff_lat = row[5] # y
dropoff_lat = num(dropoff_lat)
for x in range (size_boroughs):
for y in range(len(data["features"][x]["geometry"]["coordinates"])): # number of polygons presents in each borough # each_polygon_on_borough = data["features"][x]["geometry"]["coordinates"][y] # each polygon on a borough
polygon_of_each_borough = data["features"][x]["geometry"]["coordinates"][y][0]
pprint (polygon_of_each_borough)
point_x = pickup_long
point_y = pickup_lat
point_x2 = dropoff_long
point_y2 = dropoff_lat
if point_in_poly(point_x,point_y,polygon_of_each_borough):
nombre_from= data["features"][x]["properties"]["BoroName"]
print nombre_from+ " "+ str(point_x) + " " + str(point_y)+" to "+ str(point_x2) + " " + str(point_y2)
# writer.writerow( (nombre_from,point_x,point_y, point_x2, point_y2, 0))
pprint (list_to_chord)
| true |
a08a3637a3d5b0af1eab659d82b0cd8a58195782 | Python | Akus0ni/bug-free-fiesta | /exe5.py | UTF-8 | 598 | 3.21875 | 3 | [] | no_license | my_name = 'Aku Soni'
my_age = 24 # no lying
my_height = 74 # inches
my_weight = 75 #kg
my_eyes = 'Brown'
my_teeth = 'Yellowish White'
my_hair = 'Brown'
print "Lets talk about %s." %my_name
print "He's %d inches tall." %my_height
print "He's %d Kg heavy." %my_weight
print "Actually thats not too heavy."
print "He's got %s eyes and %s hair." %(my_eyes,my_hair)
print "his teeth are usually %s depending on the coffee." %my_teeth
# %r is a case which can print whatever it may be string, num, etc
print "if I add %d, %r and %d I get %d." %(
my_age, my_height, my_weight, my_age+my_height+my_weight) | true |
dce7d3cd1b7c65e100447778bb14d44edcea591b | Python | Ehsan-Nirjhar/Person_Re-Identification_Project_Fall18 | /custom/testset.py | UTF-8 | 2,557 | 3.03125 | 3 | [] | no_license | #################################################################################
######################## CSCE 625 : AI PROJECT : TEAM 17 ########################
## Prepares the validation dataset givan in the class
## Copy the 'testSet' folder to the '/data' folder
## Must contain:
## /data/testSet/gallery/*.png (all gallery images in .png format)
## /data/testSet/query/*.png (all query images in .png format)
#################################################################################
#################################################################################
import os.path as osp
from os import listdir
class TestSetCSCE625 (object):
"""
CSCE625 AI Project ValSet Dataset
"""
dataset_dir = 'testSet'
def __init__(self, root='data', **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.que_dir = osp.join(self.dataset_dir, 'query')
self.gal_dir = osp.join(self.dataset_dir, 'gallery')
self._check_before_run()
que, num_que_imgs = self._process_dir(self.que_dir)
gal, num_gal_imgs = self._process_dir(self.gal_dir)
num_tot_imgs = num_que_imgs + num_gal_imgs
print("=> Test set for CSCE625 loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # images")
print(" ------------------------------")
print(" query | {:8d}".format(num_que_imgs))
print(" gallery | {:8d}".format(num_gal_imgs))
print(" ------------------------------")
print(" total | {:8d}".format(num_tot_imgs))
print(" ------------------------------")
self.query = que
self.gallery = gal
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.que_dir):
raise RuntimeError("'{}' is not available".format(self.que_dir))
if not osp.exists(self.gal_dir):
raise RuntimeError("'{}' is not available".format(self.gal_dir))
def _process_dir(self, dir_imgs):
dataset = []
all_files = listdir(dir_imgs)
for file in all_files:
if file.endswith('.png'):
img_path = osp.join(dir_imgs,file)
dataset.append((img_path, file[:-4], 0))
num_imgs = len(dataset)
return dataset, num_imgs
| true |
e2ced1bebdc2ef22c917f58ffc1a1b4dfff0aca7 | Python | akoshdev/Weather | /ob_havo/templates/views.py | UTF-8 | 1,220 | 2.515625 | 3 | [] | no_license | from django.shortcuts import render
from django.http import HttpResponse
import requests
import json
# city = "London"
# country_code = "UK"
# location = city+','+country_code
# APIKEY = '53dc894d6fe5612c69a7eaf3b13d2059' #get an api key from openweathermap.org
# url = "http://api.openweathermap.org/data/2.5/find?q=%s&units=metric&APPID=%s" %(location,APIKEY)\
# response = requests.get(url)
# response_dict = json.loads(response.text)
def home(request):
return render(request, 'index.html')
def obhavo(request):
user_city = request.GET['usertext_city']
user_country = request.GET['usertext_country']
city = user_city
country_code = user_country
location = city + ',' + country_code
APIKEY = '53dc894d6fe5612c69a7eaf3b13d2059' # get an api key from openweathermap.org
url = "http://api.openweathermap.org/data/2.5/find?q=%s&units=metric&APPID=%s" % (location,APIKEY)
response = requests.get(url)
response_dict = json.loads(response.text)
temp = response_dict["list"][0]["main"]["temp"]
davlenie = response_dict["list"][0]["main"]["pressure"]
vlajnost = response_dict["list"][0]["main"]["humidity"]
return render(request,'obhavo.html',{'temp':temp,'user_city':user_city,'davlenie':davlenie,'vlajnost':vlajnost}) | true |
28426e4330e796e482e8405510f6e0ddfbe84b16 | Python | R0fM1a/python-related-file-Decoder | /pyscript_decode.py | UTF-8 | 3,852 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
'''
this script can help you when analysing python related PE or pythonscript format file
created by rofmia
and other features such as deconfusion will be supported later
'''
import os, os.path
import pefile
import marshal
import zipfile
import StringIO
import argparse
from uncompyle6.main import decompile
"""
flag returns a vlaue that stand for diffent file_format
0 not PE file
1 Py2exe file
2 PyInstaller file
3 pyc file
4 PYTHONSCRIPT binary stream"""
class PythonExecutable_parse(object):
def __init__(self, filename, check_option = False):
self.python_version = 0.0
if os.path.exists(filename):
self.filename = filename
self.check = check_option
self.fptr = open(filename, "rb")
else:
raise Exception("File not found")
sys.exit(1)
def executable_check(self):
try:
pe_file = pefile.PE(self.filename)
except:
#check for PYTHONSCRIPT format
file_data = self.fptr.read()
if b'\x12\x34\x56\x78' == file_data[:4]:
return ("PYTHONSCRIPT", file_data)
#can not support for python 3.x currently
else:
return ("NOT_PE", None)
#check for py2exe foamrt file
if hasattr(pe_file, "DIRECTORY_ENTRY_RESOURCE"):
for entry in pe_file.DIRECTORY_ENTRY_RESOURCE.entries:
#get python script version from pythonx.x.dll
if str(entry.name).endswith(".DLL"):
self.python_version = int(str(entry.name)[6:8])
if str(entry.name) == "PYTHONSCRIPT":
script_resourse = entry.directory.entries[0].directory.entries[0]
if script_resourse != None:
pythonscript = pe_file.get_data(script_resourse.data.struct.OffsetToData, script_resourse.data.struct.Size)
return ("PY2EXE", pythonscript)
#check for pyinstaller encode PE file
#pass
#check for pyc file
#pass
def fclose(self):
try:
self.fptr.close()
except Exception as e:
pass
def Decompilepy2exe(rsrc_data, version = 2.7):
offset = rsrc_data[16:].find("\x00")
if offset == -1:
return
pythoncode = marshal.loads(rsrc_data[16 + offset + 1:])
oStringIO = StringIO.StringIO()
decompile(version, pythoncode[-1], oStringIO)
return oStringIO.getvalue()
if __name__ =="__main__":
print os.getcwd()
parser = argparse.ArgumentParser(description = "this script can help you detect or unpack python-binary file such as py2exe and pyinstaller.")
parser.add_argument("-i", "--input", dest="input", required=True, action="store", help="pyscript file or binary file packed with py2exe or pyinstaller")
parser.add_argument("-o", "--output", dest="output", required=False, action="store", help="folder you pointed to store decompiled script.")
parser.add_argument("-c", "--check", dest="check", required=False, action="store_true", default=False, help="that helps you to check what your binary file encoded with")
args = parser.parse_args()
file_name = args.input
output_dir_name = args.output
check_option = args.check
if file_name is not None:
a = PythonExecutable_parse(file_name)
py_metadata = a.executable_check()
a.fclose()
if check_option == True:
print "%s is a %s file"%(file_name, py_metadata[0])
if output_dir_name is not None:
if py_metadata[0] == "PYTHONSCRIPT":
#not support for pythonscript version check currently
pyscript = Decompilepy2exe(py_metadata[1], 2.7)
#print pyscript
elif py_metadata[0] == "PY2EXE":
pyscript = Decompilepy2exe(py_metadata[1], a.python_version/ 10.0)
#print pyscript
if os.path.exists(output_dir_name):
open(os.path.join(output_dir_name, "decode_script.py"), "w").write(pyscript)
else:
os.system("mkdir %s"%output_dir_name)
open(os.path.join(output_dir_name, "decode_script.py"), "w").write(pyscript)
| true |
78a64e7b447d0ae3e48bd8ee7aa027e019ac7073 | Python | gitter-lab/prmf | /script/prepare_nodelist.py | UTF-8 | 2,241 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import sys, argparse
import os, os.path
import networkx as nx
import prmf
from prmf import string_db as sdb
def main():
parser = argparse.ArgumentParser(description="""
Construct a nodelist containing all nodes from STRING and all nodes from all networks in <graphml-dir>.
""")
parser.add_argument("--stringdb", type=argparse.FileType('r'), required=True)
parser.add_argument("--graphml-dir", type=str)
parser.add_argument("--graphmls", type=str, nargs='+')
parser.add_argument("--node-attribute", type=str, help='Node attribute which contains protein identifier; if not provided, the node identifier is used as a protein identifier')
parser.add_argument("--out-nodelist", type=argparse.FileType('w'), required=True)
parser.add_argument("--out-graph", type=argparse.FileType('wb'), required=True)
args = parser.parse_args()
if args.graphml_dir is None and args.graphmls is None:
sys.stderr.write("Exactly one of --graphml-dir or --graphmls is required.\n")
sys.exit(21)
if args.graphml_dir is not None and args.graphmls is not None:
sys.stderr.write("Exactly one of --graphml-dir or --graphmls is required.\n")
sys.exit(22)
# TODO use prmf.prepare_nodelist for consistency
Gs = []
if args.graphmls is not None:
Gs = prmf.parse_pathways(args.graphmls)
if args.graphml_dir is not None:
Gs = prmf.parse_pathways_dir(args.graphml_dir)
# relabel nodes if needed
Gs = list(map(lambda G: prmf.relabel_nodes(G, args.node_attribute), Gs))
G_ppi = sdb.parse_string_fh(args.stringdb)
# check identifiers, error if there is no overlap with --stringdb
node_ids = set()
for G in Gs:
for node in G.nodes():
node_ids.add(node)
all_missing = True
for node in node_ids:
if node in G_ppi:
all_missing = False
break
if(all_missing):
sys.stderr.write("No node identifiers in the provided pathways overlap with the background protein-protein interaction network. Exiting.\n")
sys.exit(23)
Gs = [G_ppi] + list(Gs)
G_union = prmf.weighted_union(Gs)
nodes = sorted(G_union.nodes())
for node in nodes:
args.out_nodelist.write(node + "\n")
nx.write_graphml(G_union, args.out_graph)
if __name__ == "__main__":
main()
| true |