blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
71dd7be659cfb5c46903c4e76feade537f9cdb09 | Python | RyanDraves/buffham | /buffham/buffham_gen.py | UTF-8 | 630 | 2.625 | 3 | [] | no_license | #!/usr/bin/python3
import argparse
import glob
import pathlib
import buffham.parse as bh
def main(dir: pathlib.Path):
for bh_file in glob.glob(str(dir.absolute() / '**/*.bh'), recursive=True):
bh_file = pathlib.Path(bh_file)
messages = bh.Parser.parse_file(bh_file)
bh.Generator.generate(bh_file, messages)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate BuffHam definitions')
parser.add_argument('dir', help='Directory to recursively generate through', default=str(pathlib.Path.cwd()))
args = parser.parse_args()
main(pathlib.Path(args.dir))
| true |
8d47ee7607bcbb94e417505f5a2325e8e6dbac4c | Python | regalk13/Tkinter-projects | /file.py | UTF-8 | 647 | 2.6875 | 3 | [] | no_license | from tkinter import *
from PIL import Image, ImageTk
from tkinter import filedialog
root = Tk()
root.title('File')
img = PhotoImage(file='images/icon.png')
root.tk.call('wm', 'iconphoto', root._w, img)
def open():
global my_image
root.filename = filedialog.askopenfilename(initialdir="tkinter-projects/images", title="Select a file", filetypes=(("png files", "*.png"),("all files", "*.*")))
my_label = Label(root, text=root.filename).pack()
my_image = ImageTk.PhotoImage(Image.open(root.filename))
label_image = Label(root, image=my_image).pack()
button = Button(root, text="Open file", command=open).pack()
root.mainloop() | true |
172fdc750f08b7474275a639cb28798a7468c787 | Python | a-pavco/ant_simulation | /other/ant_sim_sdf/ant_sim/scripts/moveTripods.py | UTF-8 | 582 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
def talker():
pub = rospy.Publisher('/ant_topic', Float32, queue_size=1)
rospy.init_node('ant_rosnode_py', anonymous=True)
rate = rospy.Rate(1)
position = Float32()
tmp = 0;
position.data = -100
while not rospy.is_shutdown():
#rospy.loginfo('Moving ant joints to position == '+str(position))
pub.publish(position)
tmp += 1;
rospy.loginfo(tmp)
rate.sleep()
if tmp == 2:
position.data *= -1
tmp = 0
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| true |
0a25b23358de3792e35b6e9f5f4b51465e3fe6d5 | Python | NoahNacho/python-solving-problems-examples | /Chapter4/Exercise8.py | UTF-8 | 139 | 3.765625 | 4 | [
"MIT"
] | permissive | def area_of_circle(r):
A = 3.141592653589793*r**2
print(A)
radius = int(input('What shouldthe radius be?'))
area_of_circle(radius) | true |
9006d2c20fa935aedd9f2d951aaca192b5393718 | Python | Nirperm/Design-Pattern | /Observer/number_generator.py | UTF-8 | 489 | 3.046875 | 3 | [] | no_license | from abc import ABCMeta, abstractmethod
class NumberGenerator(metaclass=ABCMeta):
__observers = []
def add_observer(self, observer):
self.__observers.append(observer)
def delete_observer(self, observer):
self.__observers.remove(observer)
def notify_observers(self):
for observer in self.__observers:
observer.update(self)
@abstractmethod
def get_number():
pass
@abstractmethod
def execute():
pass
| true |
71fe6dc9d54e15846439ae9a8265a0e36dbc3ce7 | Python | kuplo/OstryKodzing | /Model.py | UTF-8 | 1,529 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
# importy zewnetrzne
# nasze importy
from Stale import *
#===================================== Na czas testow
Gracz = {"nick" :"user",
"haslo" :"123",
"ip" :"212.191.227.106",
"nazwaSerwera" :"LocalHome"}
Pliki = {"/" :None}
Pliki["/"] = {"bin" :None,
"hello.txt" :Type["txt"]}
Pliki["bin"] = {"exit" :Type["exec"]}
#===================================================#
class Model:
def __init__(self, nowyKontroler = 0, nowyWidok = 0) :
self.kontroler = nowyKontroler
self.widok = nowyWidok
self.error = 0
self.gracz = None
def dodajKontroler(self, nowyKontroler) :
if self.kontroler == 0 :
self.kontroler = nowyKontroler
def dodajWidok(self, nowyWidok) :
if self.widok == 0 :
self.widok = nowyWidok
def pobierzDane(self, idZadania, args=None) :
if idZadania == Zadania["Login"] :
self.zaloguj(args)
def zaloguj(self, args) : # potrzebuje w args: [nick, haslo]
#=================================== Na czas testow
if args[0] != Gracz["nick"] :
self.error = Error["zlyNick"]
return
if args[1] != Gracz["haslo"] :
self.error = Error["zleHaslo"]
return
self.gracz = Gracz
self.pliki = Pliki
#=================================================#
self.error = Error["brakBledow"]
| true |
23504d1ac21b653538cdb1922efe499d071254d2 | Python | akuchibotla/subway | /generate_all_solutions.py | UTF-8 | 429 | 2.5625 | 3 | [] | no_license | from subway import solver
import json
import os
for i in xrange(10000):
num_str = str(i)
num_str = '0' * (4 - len(num_str)) + num_str
file_name = 'solutions/' + num_str + '.json'
if os.path.isfile(file_name):
print num_str, 'has already been computed, so skipping'
continue
solutions = solver(num_str)._getvalue()
with open(file_name, 'w') as outfile:
json.dump(solutions, outfile)
print 'finished with', num_str | true |
fac55ceb275d5e7680255d19ea8dff958728348c | Python | Kashish24/hackerEarthCodingSolutions | /Length of a valley.py | UTF-8 | 2,550 | 3.46875 | 3 | [] | no_license | Link to Problem:- https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/hill-150045b2/
# Brute Force Approach.
'''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
# Write your code here
for t in range(int(input())):
n = int(input()) ; arr = list(map(int, input().split()))
l = 0;
r = 0;
res = []
for i in range(len(arr)):
l = i+1;
r = i+1;
for j in range(i-1,-1,-1):
if arr[j] > arr[j+1]:
l -=1;
else:
break;
for j in range(i+1,len(arr)):
if arr[j] > arr[j-1]:
r +=1;
else:
break;
res.append(r-l+1);
print(*res)
# Better RunTime Complexity using Stack;
'''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
# Write your code here
def r_area(arr,n):
stack = [];
r_index = [0]*n;
index=0;
while(index<n):
if(len(stack)==0 or arr[stack[-1]]< arr[index]):
stack.append(index);
index +=1;
else:
while(len(stack)):
elem = stack.pop();
r_index[elem] = index;
while(len(stack)):
elem = stack.pop();
r_index[elem] = index;
return r_index;
def l_area(arr,n):
stack = [];
l_index = [0]*n;
index=0;
while(index<n):
if(len(stack)==0 or arr[stack[-1]]< arr[index]):
stack.append(index);
index +=1;
else:
while(len(stack)):
elem = stack.pop();
l_index[elem] = index;
while(len(stack)):
elem = stack.pop();
l_index[elem] = index;
return l_index;
for t in range(int(input())):
n = int(input()) ; arr = list(map(int, input().split()))
l_index = [0]*n;
r_index = [0]*n;
res = [0]*n;
l_index = l_area(arr[::-1],n);
l_index = l_index[::-1];
l_index = [(n-i+1) for i in l_index];
r_index = r_area(arr,n);
for i in range(n):
res[i] = r_index[i] - l_index[i] +1;
print(*res)
| true |
dbe60c419b0f917fb280db21ffd40d6b4eb55b6f | Python | jsmack/learn | /old/language/python/udemy/ds/184/185.py | UTF-8 | 1,691 | 2.859375 | 3 | [] | no_license | import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,format='%(threadName)s: %(message)s')
#def work1(x,y=2):
#def work1(d, lock):
def work1(lock):
logging.debug('start')
with lock:
lock.acquire()
i = d['x']
time.sleep(1)
d['x'] = i + 1
logging.debug(d)
with lock:
d['x'] = i + 1
lock.release()
logging.debug('end')
#def work2(x,y=2):
#def work2():
def work2(d, lock):
logging.debug('start')
lock.acquire()
i = d['x']
d['x'] = i + 1
logging.debug(d)
lock.release()
logging.debug('end')
if __name__ == '__main__':
##threads = []
##for _ in range(5):
# t = threading.Thread(target=work1)
# t.setDaemon(True)
# t.start()
# threads.append(t)
##for thred in threads:
## thred.join()
#for thread in threading.enumerate():
# if thread is threading.currentThread():
# print(thread)
# continue
# thread.join()
###t1 = threading.Thread(name='rename work1',target=work1)
##t1 = threading.Thread(target=work1)
##t1.setDaemon(True)
###t2 = threading.Thread(target=work2,args=(100,),kwargs={'y':200})
##t2 = threading.Thread(target=work2)
##t1.start()
##t2.start()
##print('started')
##t1.join()
##t2.join()
########
#t = threading.Timer(3,work2,args=(100,), kwargs={'y': 200})
#t.start()
#t.join()
d = {'x': 0}
#lock = threading.Lock()
lock = threading.RLock()
t1 = threading.Thread(target=work1, args=(d, lock))
t2 = threading.Thread(target=work2, args=(d, lock))
t1.start()
t2.start()
| true |
35a0849d21e224be5a4b6b358da770c26104acf2 | Python | suneilr/readingspeed | /readingspeed.py | UTF-8 | 2,355 | 3.265625 | 3 | [] | no_license | filename='[file name with path]'
import datetime
def timenow():
time=datetime.datetime.now()
timevalue=float(time.hour*60+time.minute+time.second/60)
return timevalue
paragraph = ''
f = open(filename,'r',encoding="ISO-8859-1")
#f = open(filename,'r',encoding="utf8")
data = f.read()
totalwords = len(data.split())
A=0
B=0
C=0
R=0
count=0
with open(filename, encoding="utf8", errors='ignore') as f:
lines = f.readlines()
for line in lines:
if line.isspace(): # is it an empty line?
#print ("hello")
if paragraph:
#print ("hello there")
count=count+1
print ("Paragraph: ", count)
a=timenow()
A=A+a
print (paragraph)
res = len(paragraph.split())
input("Press Enter to continue....")
b=timenow()
B=B+b
c=res/(b-a)
R=R+res
C=R/(B-A)
balancetimeestimate=(totalwords-R)/C
totaltimeestimate=B-A+balancetimeestimate
print()
print (res ,"words in",'{:.2f}'.format(b-a),"minutes =",'{:.0f}'.format(c)," wpm")
print ("Average ",'{:.0f}'.format(C),"wpm")
print ("Remaining words: ", (totalwords-R))
print ("Remaining time estimate: ",'{:.1f}'.format(balancetimeestimate),"min")
print ("Total time estimate: ",'{:.1f}'.format(totaltimeestimate),"min")
#input("Press Enter to continue2...")
print(".....................................................")
print()
paragraph = ''
else:
continue
else:
paragraph += ' ' + line.strip()
count=count+1
print ("Paragraph: ", count)
a=timenow()
input("Press Enter to continue...")
A=A+a
print (paragraph)
res = len(paragraph.split())
input("Press Enter to finish...")
b=timenow()
B=B+b
c=res/(b-a)
R=R+res
C=R/(B-A)
balancetimeestimate=(totalwords-R)/C
totaltimeestimate=B-A+balancetimeestimate
print()
print (res ,"words in",'{:.2f}'.format(b-a),"minutes =",'{:.0f}'.format(c)," wpm")
print ("Average ",'{:.0f}'.format(C),"wpm")
print ("Remaining time estimate: ",'{:.1f}'.format(balancetimeestimate),"min")
print ("Total time estimate: ",'{:.1f}'.format(totaltimeestimate),"min")
print(".........Page Over...............................................")
| true |
475737ad472b5b01dac2c2317a83ebea9cbf6e25 | Python | jgazal/DSA_Python-FAD | /Python_FAD/Capitulo2_TiposEstruturasDados/Dicionarios.py | UTF-8 | 1,973 | 4.34375 | 4 | [] | no_license | print("Dicionários")
print("-----------")
# Isso é uma lista
estudantes_lst = ["Pedro", 24, "Fernando", 22, "Tania", 26, "Cris", 25]
print(estudantes_lst)
# Isso é um dicionário
estudantes_dict = {"Pedro":24, "Fernando":22, "Tania":26, "Cris":25}
print(estudantes_dict)
print(estudantes_dict["Pedro"])
estudantes_dict["Pedro"] = 23
print(estudantes_dict["Pedro"])
print(estudantes_dict["Tania"])
estudantes_dict.clear()
print(estudantes_dict)
del estudantes_dict
# print(estudantes_dict)
estudantes = {"Pedro":24, "Fernando":22, "Tania":26, "Cris":25}
print(estudantes)
print(len(estudantes))
print(estudantes.keys())
print(estudantes.values())
print(estudantes.items())
estudantes2 = {"Maria":27, "Erika":28, "Milton":26}
print(estudantes2)
estudantes.update(estudantes2)
print(estudantes)
dic1 = {}
print(dic1)
dic1["key_one"] = 2
print(dic1)
dic1[10] = 5
print(dic1)
dic1[8.2] = "Olá"
print(dic1)
dic1["teste"] = 5
print(dic1)
dict1 = {}
print(dict1)
dict1["teste"] = 10
dict1["key"] = "teste"
# Atenção, pois chave e valor podem ser iguais, mas representam coisas diferentes.
print(dict1)
dict2 = {}
dict2["key1"] = "Big Data"
dict2["key2"] = 10
dict2["key3"] = 5.6
print(dict2)
a = dict2["key1"]
b = dict2["key2"]
c = dict2["key3"]
print(a, b, c)
# Dicionário de listas
dict3 = {'key1':1230,'key2':[22,453,73.4],'key3':['leite','maça','batata']}
print(dict3)
print(dict3['key2'])
# Acessando um item da lista, dentro do dicionário
print(dict3['key3'][0].upper())
# Operações com itens da lista, dentro do dicionário
var1 = dict3['key2'][0] - 2
print(var1)
# Duas operações no mesmo comando, para atualizar um item dentro da lista
dict3['key2'][0] -= 2
print(dict3)
print("\nCriando dicionários aninhados")
print("---------------------------")
# Criando dicionários aninhados
dict_aninhado = {'key1':{'key2_aninhada':{'key3_aninhada':'Dict aninhado em Python'}}}
print(dict_aninhado)
print(dict_aninhado['key1']['key2_aninhada']['key3_aninhada']) | true |
6b9cc71b2c2d5763b43294e2599e5a9888f01a3a | Python | gregoryhooks/FarmersMarketRegister | /discountFunctions/BXGD.py | UTF-8 | 1,312 | 3.8125 | 4 | [] | no_license | # Buy X Products, get them all at any percentage off
# Written by Gregory Hooks
# Expected input: List of items in basket, the rule to define the discount
# Expected output: The total number of times the discount needs to be applied
# Verify that the requirements for product x is satisfied and return the appropriate number of discounts
def validate(basket, rule):
# Get the information required to calculate the discount
try:
numberofx = basket.count(rule['RequiredItem'])
requiredx = int(rule['RequiredQuantity'])
limit = int(rule['Limit'])
except (KeyError):
print "Warning: Ignoring invalid discount \"" + rule['Description'] + "\""
return 0
except ValueError:
print "Warning: Ignoring invalid discount \"" + rule['Description'] + "\""
return 0
# Validate that the rules are valid, if not, print error and return 0
if not ((requiredx > 0) & (limit >= 0)):
print "Warning: Ignoring invalid discount \"" + rule['Description'] + "\""
return 0
# If the requirement is satisfied, return the number of items as the amount to discount or the limit
if numberofx >= requiredx:
if limit:
return min(numberofx, limit)
else:
return numberofx
else:
return 0
| true |
5d5460734f4e454babdf73bff329e89a06b7b17a | Python | carlosgaleano/samsung | /contar.py | UTF-8 | 184 | 2.859375 | 3 | [] | no_license | # -*- coding: iso-8859-15
import sys
import os
if len(sys.argv) >= 2:
print "El textos '%s' tiene %s caracteres" % (sys.argv[1],len(sys.argv[1]))
else:
print "Necesito un parámetro" | true |
a3f869af7c2ff622b0c585785100eab28dcf597c | Python | kolakows/ART2 | /test_net.py | UTF-8 | 6,390 | 2.75 | 3 | [] | no_license | from mnist import MNIST
from art2 import Art2Network
import matplotlib.pyplot as plt
from utils import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def simple_test_cube(directory_path, train_split = 0.9):
data = pd.read_csv(f'{directory_path}/cube.csv').to_numpy()
np.random.shuffle(data)
test_count = int(train_split * len(data))
x_train = data[:test_count, :-1]
y_train = data[:test_count, -1]
x_test = data[test_count:, :-1]
y_test = data[test_count:, -1]
L1_size = 3
L2_size = 8
# art2 = Art2(L1_size, L2_size, d = 0.9, c = 0.1, rho = 0.9)
# art2.train(img[:10000], epochs = 1)
net = Art2Network(L1_size, L2_size, 0.9)
y_train_pred = net.process_points(x_train, True)
y_test_pred = net.process_points(x_test, False)
print(f'Train accuracy: {cluster_acc(y_train, y_train_pred)}')
print(f'Test accuracy: {cluster_acc(y_test, y_test_pred)}')
show_confusion_matrix(y_test, y_test_pred)
Plot3dData(x_train, y_train_pred, net, y_train, 'Simple cube')
def simple_test_cube_not_matching(directory_path):
data = pd.read_csv(f'{directory_path}/cube.csv').to_numpy()
data_not_matching = pd.read_csv(f'{directory_path}/cube-notmatching.csv').to_numpy()
x_train = data[:, :-1]
y_train = data[:, -1]
x_test = data_not_matching[:, :-1]
y_test = data_not_matching[:, -1]
L1_size = 3
L2_size = 8
# art2 = Art2(L1_size, L2_size, d = 0.9, c = 0.1, rho = 0.9)
# art2.train(img[:10000], epochs = 1)
net = Art2Network(L1_size, L2_size, 0.9)
y_train_pred = net.process_points(x_train, True)
y_test_pred = net.process_points(x_test, False)
print(f'Train accuracy: {cluster_acc(y_train, y_train_pred)}')
print(f'Test accuracy: {cluster_acc(y_test, y_test_pred)}')
show_confusion_matrix(y_test, y_test_pred)
Plot3dData(x_test, y_test_pred, net, y_test, 'cube-notmaching')
def simple_test_hexagon(directory_path, train_split = 0.9):
data = pd.read_csv(f'{directory_path}/hexagon.csv').to_numpy()
np.random.shuffle(data)
test_count = int(train_split * len(data))
x_train = data[:test_count, :-1]
y_train = data[:test_count, -1]
x_test = data[test_count:, :-1]
y_test = data[test_count:, -1]
L1_size = 2
L2_size = 6
# art2 = Art2(L1_size, L2_size, d = 0.9, c = 0.1, rho = 0.9)
# art2.train(img[:10000], epochs = 1)
net = Art2Network(L1_size, L2_size, 0.99)
y_train_pred = net.process_points(x_train, True)
y_test_pred = net.process_points(x_test, False)
print(f'Train accuracy: {cluster_acc(y_train, y_train_pred)}')
print(f'Test accuracy: {cluster_acc(y_test, y_test_pred)}')
show_confusion_matrix(y_test, y_test_pred)
plot_hex(x_train, y_train_pred, net, y_train)
def test_mnist(directory_path, train_split = 0.9):
mndata = MNIST(directory_path)
img, labels_true = mndata.load_training()
test_count = int(train_split * len(img))
x_train = img[:test_count]
y_train = labels_true[:test_count]
x_test = img[test_count:]
y_test = labels_true[test_count:]
L1_size = 784
L2_size = 10
# art2 = Art2(L1_size, L2_size, d = 0.9, c = 0.1, rho = 0.9)
# art2.train(img[:10000], epochs = 1)
net = Art2Network(L1_size, L2_size, 0.8)
y_train_pred = net.process_points(x_train, True)
y_test_pred = net.process_points(x_test, False)
print(f'Train accuracy: {cluster_acc(y_train, y_train_pred)}')
print(f'Test accuracy: {cluster_acc(y_test, y_test_pred)}')
show_confusion_matrix(y_test, y_test_pred)
clusters = []
f, axarr = plt.subplots(L2_size//5,5)
axarr = axarr.flatten()
for i in range(L2_size):
#cluster = art2.get_cluster_exemplar(i)
cluster = net.get_cluster_exemplar(i)
clusters.append(cluster)
axarr[i].imshow(cluster.reshape(28,28))
plt.show()
def test_mnist_subset(directory_path, train_split = 0.9, example_count = 'all'):
mndata = MNIST(directory_path)
img, labels_true = mndata.load_training()
if example_count != 'all':
img = img[:example_count]
labels_true = labels_true[:example_count]
test_count = int(train_split * len(img))
x_train = np.array(img[:test_count])
y_train = np.array(labels_true[:test_count])
#data for only 8 labels
x_train = x_train[y_train < 8]
y_train = y_train[y_train < 8]
#test data with all labels
x_test = img[test_count:]
y_test = labels_true[test_count:]
L1_size = 784
L2_size = 10
# art2 = Art2(L1_size, L2_size, d = 0.9, c = 0.1, rho = 0.9)
# art2.train(img[:10000], epochs = 1)
net = Art2Network(L1_size, L2_size, 0.8)
y_train_pred = net.process_points(x_train, True)
y_test_pred = net.process_points(x_test, False)
show_cms(y_train, y_train_pred, y_test, y_test_pred)
print(f'Train accuracy: {cluster_acc(y_train, y_train_pred)}')
print(f'Test accuracy: {cluster_acc(y_test, y_test_pred)}')
# show_confusion_matrix(y_train, y_train_pred)
# show_confusion_matrix(y_test, y_test_pred)
clusters = []
f, axarr = plt.subplots(L2_size//5,5)
axarr = axarr.flatten()
for i in range(L2_size):
#cluster = art2.get_cluster_exemplar(i)
cluster = net.get_cluster_exemplar(i)
clusters.append(cluster)
axarr[i].imshow(cluster.reshape(28,28))
plt.show()
def test_human(directory_path):
x_train = pd.read_csv(f'{directory_path}/train/X_train.txt', delim_whitespace=True, header=None).to_numpy()
y_train = pd.read_csv(f'{directory_path}/train/y_train.txt', delim_whitespace=True, header=None).to_numpy()
x_test = pd.read_csv(f'{directory_path}/test/X_test.txt', delim_whitespace=True, header=None).to_numpy()
y_test = pd.read_csv(f'{directory_path}/test/y_test.txt', delim_whitespace=True, header=None).to_numpy()
L1_size = 561
L2_size = 6
# art2 = Art2(L1_size, L2_size, d = 0.9, c = 0.1, rho = 0.9)
# art2.train(img[:10000], epochs = 1)
net = Art2Network(L1_size, L2_size, 0.95)
y_train_pred = net.process_points(x_train, True)
y_test_pred = net.process_points(x_test, False)
print(f'Test accuracy: {cluster_acc(y_train, y_train_pred)}')
print(f'Train accuracy: {cluster_acc(y_test, y_test_pred)}')
show_confusion_matrix(y_test, y_test_pred) | true |
89bcc430250d365378d1c5696e0f953145517a11 | Python | FinnHB/Python.GDAL | /Scripts/reduce_dataset.py | UTF-8 | 3,972 | 2.96875 | 3 | [] | no_license | '''
____________________________________________________________________
|TITLE : Reading Raster Data with GDAL |
|DATE : 08 May, 2021 |
|====================================================================|
|DESCRIPTION: |
| |
|This script goes through some basic function in GDAL and OGR, using |
|landcover data and a digital elevation model for the United Kingdom.|
|More specifically, the script covers: |
| 1. Reading & writing shapefiles/raster files |
| 2. Spatial transformations and projections |
| 3. Extracting polygons from larger shapefiles |
| 4. Clipping raster files based on polygon geometry |
| 5. Masks raster data by values |
| 6. Converting rasters to polygons |
| 7. Draw new polygons based on coordinates |
|____________________________________________________________________|
'''
#----------------------------#
#== PACKAGES & DIRECTORIES ==#
#----------------------------#
#-- Packages --#
import numpy as np
import pandas as pd
from osgeo import gdal, ogr, osr
import matplotlib.pyplot as plt
import os
from itertools import compress
#-- Directories --#
#Set main directory
dirname = os.path.dirname(os.path.abspath("__file__"))
os.listdir(dirname)
#Relative paths
paths = dict()
paths["shp_in"] = os.path.join(dirname, "..", "Data", "Inputs", "Shapefiles", "")
paths["raster_in"] = os.path.join(dirname, "..", "Data", "Inputs", "Rasters", "")
paths["shp_out"] = os.path.join(dirname, "..", "Data", "Outputs", "Shapefiles", "")
paths["raster_out"] = os.path.join(dirname, "..", "Data", "Outputs", "Rasters", "")
#----------------#
#== PARAMETERS ==#
#----------------#
#-- Switches --#
overwrite = False #If False, will not overwrite data if it already exists
#-----------------------------------#
#== READING & PRE-PROCESSING DATA ==#
#-----------------------------------#
#-- Reading Data --#
#Reading corine land cover
corine = gdal.Open(paths["raster_in"] + "CLC_2018.tif")
#UK border & counties
uk = ogr.Open(paths["shp_in"] + "uk_poly.shp", 1)
#Combine all the DEMs for North Yorkshire into a single DEM
ny_dem = gdal.Open(paths["raster_in"] + "nyorkshire_dem.tif")
#-- Reproject to WGS84 --#
#Corine land cover
fname = "corine_wgs84.tif"
if fname not in os.listdir(paths["raster_in"]) or overwrite == True:
file = paths["raster_out"] + fname
corine_wgs84 = gdal.Warp(file, corine, dstSRS = "EPSG:4326")
#North Yorkshire DEM
fname = "nyorkshire_dem_wgs84.tif"
if fname not in os.listdir(paths["raster_in"]) or overwrite == True:
file = paths["raster_out"] + fname
dem_wgs84 = gdal.Warp(file, ny_dem, dstSRS = "EPSG:4326")
#-- Reduce resolution --#
#Corine
file = paths["raster_out"] + "corine_lowres.tif"
infile = paths["raster_out"] + "corine_wgs84.tif"
corine_lr = gdal.Warp(file, infile, xRes = 0.01, yRes = 0.01,
resampleAlg = "min")
#Clip landcover to UK
fname = "corine_uk.tif"
if fname not in os.listdir(paths["raster_out"]) or overwrite == True:
#Reads in Corine data if not already loaded
file = paths["raster_out"] + fname
crop_poly = paths["shp_in"] + "uk_poly.shp"
corine_uk = gdal.Warp(file, corine_lr, cutlineDSName = crop_poly,
cropToCutline = True, dstNodata = np.nan)
#DEM
file = paths["raster_out"] + "dem_lowres.tif"
infile = paths["raster_out"] + "nyorkshire_dem_wgs84.tif"
dem_lr = gdal.Warp(file, infile, xRes = 0.01, yRes = 0.01,
resampleAlg = "min")
| true |
d03e922baef9aa9bb2d58195aaf2822acd7bc5c4 | Python | BrindaSahoo2020/Python-Programs | /SmallestNumer.py | UTF-8 | 307 | 4.125 | 4 | [] | no_license | #Python program to find smallest number in a list
#Sample Input
'''list1 = [12,1,3,4,5,5,12,13,12,12,12,1,2,0,-1]'''
#Sample Output
'''Smallest number is -1'''
list1 = [12,1,3,4,5,5,12,13,12,12,12,1,2,0,-1]
sma = list1[0]
for i in list1:
if (sma > i):
sma = i
print("Smallest number is :",sma) | true |
0dfc0dff7af289e05f5cf8276c31a1a7193b24d7 | Python | tcloudb/Biological-Databases-Assignment-1 | /src/getSeq.py | UTF-8 | 478 | 3.40625 | 3 | [] | no_license | sequence = open("data.seq")
locater = open("data.in")
query = raw_input("Please enter your query: ")
file_contents = sequence.read()
index = file_contents.find(query)
if index != -1:
for line in reversed(locater.readlines()):
start = line.find(" ")
value = int(line[start+1:])
if index > value:
#Printing as a form of returning the gi number
print line[:start]
break
else:
print "The query is not present." | true |
2877016e5ed59892e070bcac3cf11a34e38d9c61 | Python | ahmedkamel97/CS142_assignment3 | /Parser2.py | UTF-8 | 2,364 | 3.203125 | 3 | [] | no_license | import typing
from typing import List
from CNF import read_grammar, convert_grammar
import random
END = None
debug = 1
"""
This is the main parser function
I used some online resources to build this parser function, the resources are cited within the references section
"""
def parse(grammar_file_path: List[str], sentence: str) -> bool:
grammar = convert_grammar(grammar_file_path)
new_string = sentence.split()
length = len(new_string)
matrix = [[[] for x in range(length)] for y in range(length)]
for word in range(length):
for rule in grammar:
if rule[1] == "\'%s\'" % new_string[word]:
matrix[0][word].append(rule[0])
for words_to_consider in range(2, length + 1):
for start_cell in range(0, length - words_to_consider + 1):
for left_size in range(1, words_to_consider):
right_size = words_to_consider - left_size
for rule in grammar:
if [x for x in matrix[left_size - 1][start_cell] if x == rule[1]]:
if [x for x in matrix[right_size - 1][start_cell + left_size] if x == rule[2]]:
matrix[words_to_consider - 1][start_cell].append(rule[0])
sentence = grammar[0][0]
if [n for n in matrix[-1][0] if n == sentence]:
print("Belongs to the grammar")
return True
else:
print("Does not belong to the grammar")
return False
functions = ['f ( x ) = x * 5',
'f ( x , z ) = sin ( x * z )',
'f ( x , z ) = ( x + z ) / 2',
'x + y + z',
'f ( a ) = a / 2',
'g ( x ) = f ( z )',
]
for i in functions:
print(i,':')
parse('./math_grammar.txt', i)
print('')
#------------------------------------------------------------------------------FUNCTION_Generator-----------------------------------------------------------------------
def fun_gen(function_generator: str) -> str:
if parse('./math_grammar.txt', function_generator) != True:
print('Therefore, code cannot be generated')
else:
replace = function_generator.replace(' ', '')
left, right = replace.split('=')
print('def {}: \n y = {} \n return y'.format(left, right))
for i in functions:
print(i,':')
fun_gen(i)
print('')
| true |
7c7eef3b16f5f09809e29b8a59a54374fcc8a825 | Python | robstall/pi-play | /Servo.py | UTF-8 | 815 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
class Servo:
def __init__(self, pin, cycle_ms, pulse_ms_list):
self.pin = pin
self.frequency = 1000 / cycle_ms
self.dutycycles= []
for p in pulse_ms_list:
self.dutycycles.append(p / cycle_ms * 100)
print "f=" + str(self.frequency) + " dc=" + str(self.dutycycles)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.pin, GPIO.OUT)
self.pwm = GPIO.PWM(self.pin, self.frequency)
def start(self, idx):
self.pwm.ChangeDutyCycle(self.dutycycles[idx])
self.pwm.start(self.dutycycles[idx])
def stop(self):
self.pwm.stop()
servo = Servo(7, 20, [1.3, 1.5, 1.7])
servo.start(0)
time.sleep(2)
servo.start(1)
time.sleep(1)
servo.start(2)
time.sleep(2)
servo.stop()
GPIO.cleanup()
| true |
b8be4bdb76e7984b8c8b1c0c457aa46965c52abe | Python | dqyi11/SVNBackup | /Proj/UR/GeneratePaths/WorldViz.py | UTF-8 | 4,188 | 2.859375 | 3 | [] | no_license | '''
Created on Jul 30, 2015
@author: daqing_yi
'''
import pygame, sys
from pygame.locals import *
import numpy as np
from Path import *
BLUE = (0,0,255)
RED = (255,0,0)
BLACK = (0,0,0)
GREEN = (0,255,0)
class WorldViz(object):
def __init__(self, world):
self.world = world
pygame.init()
self.screen = pygame.display.set_mode((int(self.world.width),int(self.world.height)))
pygame.display.set_caption(self.world.name)
self.screen.fill((255,255,255))
self.myfont = pygame.font.SysFont("monospace", 15)
self.colors = []
for obj in self.world.objects:
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
self.colors.append(color)
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
pos = pygame.mouse.get_pos()
print "LEFT " + str(pos)
self.world.init = pos
else:
pos = pygame.mouse.get_pos()
print "RIGHT " + str(pos)
self.world.goal = pos
self.screen.fill((255,255,255))
RADIUS = 10
RECT_WIDTH = 16
for i in range(len(self.world.objects)):
obj = self.world.objects[i]
if obj.type == "robot":
pygame.draw.circle(self.screen, self.colors[i], obj.center, RADIUS)
else:
pygame.draw.rect(self.screen, self.colors[i], (obj.center[0]-RECT_WIDTH/2, obj.center[1]-RECT_WIDTH/2, RECT_WIDTH, RECT_WIDTH))
label = self.myfont.render(obj.type+"("+obj.name+")", 1, (0,0,0))
self.screen.blit(label, (obj.center[0], obj.center[1]+15))
#pygame.draw.line(self.screen, GREEN, [int(obj.bounding[0]), int(obj.center.y)], [int(obj.bounding[2]),int(obj.center.y)], 2)
#pygame.draw.line(self.screen, GREEN, [int(obj.center.x), int(obj.bounding[1])], [int(obj.center.x), int(obj.bounding[3])], 2)
if self.world.init != None:
pygame.draw.circle(self.screen, BLUE, self.world.init, 10, 0)
if self.world.goal != None:
pygame.draw.circle(self.screen, RED, self.world.goal, 10, 0)
pygame.display.flip()
pygame.time.delay(100)
return True
def close(self):
pygame.quit()
def drawPath(self, path, filename, background=""):
surface = pygame.Surface((self.world.width, self.world.height))
if background == "":
surface.fill((255,255,255))
else:
#surface.fill((255,255,255))
img = pygame.image.load(background)
surface.blit( img, (0,0) )
RADIUS = 10
RECT_WIDTH = 16
for i in range(len(self.world.objects)):
obj = self.world.objects[i]
if obj.type == "robot":
pygame.draw.circle(surface, self.colors[i], obj.center, RADIUS)
else:
pygame.draw.rect(surface, self.colors[i], (obj.center[0]-RECT_WIDTH/2, obj.center[1]-RECT_WIDTH/2, RECT_WIDTH, RECT_WIDTH))
label = self.myfont.render(obj.type+"("+obj.name+")", 1, (0,0,0))
surface.blit(label, (obj.center[0], obj.center[1]+15))
pathLen = len(path.waypoints)
#print path.waypoints
for i in range(pathLen-1):
pygame.draw.line(surface, (0,0,0), path.waypoints[i], path.waypoints[i+1], 6)
if self.world.init != None:
pygame.draw.circle(surface, BLUE, self.world.init, 10, 0)
if self.world.goal != None:
pygame.draw.circle(surface, RED, self.world.goal, 10, 0)
pygame.image.save(surface, filename)
| true |
1a02aa73a4a4c8b1fe503f6873915ff693051b25 | Python | ayushagarwal2419/common_scripts | /extract_text/krutidev_unicode_converter.py | UTF-8 | 11,691 | 2.859375 | 3 | [
"MIT"
] | permissive | def krutidev_to_unicode(krutidev_substring):
modified_substring = krutidev_substring
array_one = ["ñ", "Q+Z", "sas", "aa", ")Z", "ZZ", "‘", "’", "“", "”",
"å", "ƒ", "„", "…", "†", "‡", "ˆ", "‰", "Š", "‹",
"¶+", "d+", "[+k", "[+", "x+", "T+", "t+", "M+", "<+", "Q+", ";+", "j+", "u+",
"Ùk", "Ù", "Dr", "–", "—", "é", "™", "=kk", "f=k",
"à", "á", "â", "ã", "ºz", "º", "í", "{k", "{", "=", "«",
"Nî", "Vî", "Bî", "Mî", "<î", "|", "K", "}",
"J", "Vª", "Mª", "<ªª", "Nª", "Ø", "Ý", "nzZ", "æ", "ç", "Á", "xz", "#", ":",
"v‚", "vks", "vkS", "vk", "v", "b±", "Ã", "bZ", "b", "m", "Å", ",s", ",", "_",
"ô", "d", "Dk", "D", "[k", "[", "x", "Xk", "X", "Ä", "?k", "?", "³",
"pkS", "p", "Pk", "P", "N", "t", "Tk", "T", ">", "÷", "¥",
"ê", "ë", "V", "B", "ì", "ï", "M+", "<+", "M", "<", ".k", ".",
"r", "Rk", "R", "Fk", "F", ")", "n", "/k", "èk", "/", "Ë", "è", "u", "Uk", "U",
"i", "Ik", "I", "Q", "¶", "c", "Ck", "C", "Hk", "H", "e", "Ek", "E",
";", "¸", "j", "y", "Yk", "Y", "G", "o", "Ok", "O",
"'k", "'", "\"k", "\"", "l", "Lk", "L", "g",
"È", "z",
"Ì", "Í", "Î", "Ï", "Ñ", "Ò", "Ó", "Ô", "Ö", "Ø", "Ù", "Ük", "Ü",
"‚", "ks", "kS", "k", "h", "q", "w", "`", "s", "S",
"a", "¡", "%", "W", "•", "·", "∙", "·", "~j", "~", "\\", "+", " ः",
"^", "*", "Þ", "ß", "(", "¼", "½", "¿", "À", "¾", "A", "-", "&", "&", "Œ", "]", "~ ", "@"]
array_two = ["॰", "QZ+", "sa", "a", "र्द्ध", "Z", "\"", "\"", "'", "'",
"०", "१", "२", "३", "४", "५", "६", "७", "८", "९",
"फ़्", "क़", "ख़", "ख़्", "ग़", "ज़्", "ज़", "ड़", "ढ़", "फ़", "य़", "ऱ", "ऩ",
"त्त", "त्त्", "क्त", "दृ", "कृ", "न्न", "न्न्", "=k", "f=",
"ह्न", "ह्य", "हृ", "ह्म", "ह्र", "ह्", "द्द", "क्ष", "क्ष्", "त्र", "त्र्",
"छ्य", "ट्य", "ठ्य", "ड्य", "ढ्य", "द्य", "ज्ञ", "द्व",
"श्र", "ट्र", "ड्र", "ढ्र", "छ्र", "क्र", "फ्र", "र्द्र", "द्र", "प्र", "प्र", "ग्र", "रु", "रू",
"ऑ", "ओ", "औ", "आ", "अ", "ईं", "ई", "ई", "इ", "उ", "ऊ", "ऐ", "ए", "ऋ",
"क्क", "क", "क", "क्", "ख", "ख्", "ग", "ग", "ग्", "घ", "घ", "घ्", "ङ",
"चै", "च", "च", "च्", "छ", "ज", "ज", "ज्", "झ", "झ्", "ञ",
"ट्ट", "ट्ठ", "ट", "ठ", "ड्ड", "ड्ढ", "ड़", "ढ़", "ड", "ढ", "ण", "ण्",
"त", "त", "त्", "थ", "थ्", "द्ध", "द", "ध", "ध", "ध्", "ध्", "ध्", "न", "न", "न्",
"प", "प", "प्", "फ", "फ्", "ब", "ब", "ब्", "भ", "भ्", "म", "म", "म्",
"य", "य्", "र", "ल", "ल", "ल्", "ळ", "व", "व", "व्",
"श", "श्", "ष", "ष्", "स", "स", "स्", "ह",
"ीं", "्र",
"द्द", "ट्ट", "ट्ठ", "ड्ड", "कृ", "भ", "्य", "ड्ढ", "झ्", "क्र", "त्त्", "श", "श्",
"ॉ", "ो", "ौ", "ा", "ी", "ु", "ू", "ृ", "े", "ै",
"ं", "ँ", "ः", "ॅ", "ऽ", "ऽ", "ऽ", "ऽ", "्र", "्", "?", "़", ":",
"‘", "’", "“", "”", ";", "(", ")", "{", "}", "=", "।", ".", "-", "µ", "॰", ",", "् ", "/"]
array_one_length = len(array_one)
# Specialty characters
# Move "f" to correct position and replace
modified_substring = " " + modified_substring + " "
position_of_f = modified_substring.rfind("f")
while position_of_f != -1:
modified_substring = modified_substring[:position_of_f] + modified_substring[position_of_f + 1] + \
modified_substring[position_of_f] + modified_substring[position_of_f + 2:]
position_of_f = modified_substring.rfind("f", 0,
position_of_f - 1) # search for f ahead of the current position.
modified_substring = modified_substring.replace("f", "ि")
modified_substring = modified_substring.strip()
# Move "half R" to correct position and replace
modified_substring = " " + modified_substring + " "
position_of_r = modified_substring.find("Z")
set_of_matras = ["‚", "ks", "kS", "k", "h", "q", "w", "`", "s", "S", "a", "¡", "%", "W", "·", "~ ", "~"]
while (position_of_r != -1):
modified_substring = modified_substring.replace("Z", "", 1)
if modified_substring[position_of_r - 1] in set_of_matras:
modified_substring = modified_substring[:position_of_r - 2] + "j~" + modified_substring[position_of_r - 2:]
else:
modified_substring = modified_substring[:position_of_r - 1] + "j~" + modified_substring[position_of_r - 1:]
position_of_r = modified_substring.find("Z")
modified_substring = modified_substring.strip()
# Replace ASCII with Unicode
for input_symbol_idx in range(0, array_one_length):
modified_substring = modified_substring.replace(array_one[input_symbol_idx], array_two[input_symbol_idx])
return modified_substring
# Unicode to KrutiDev function
def unicode_to_krutidev(unicode_substring):
modified_substring = unicode_substring
array_one = ["‘", "’", "“", "”", "(", ")", "{", "}", "=", "।", "?", "-", "µ", "॰", ",", ".", "् ",
"०", "१", "२", "३", "४", "५", "६", "७", "८", "९", "x",
"फ़्", "क़", "ख़", "ग़", "ज़्", "ज़", "ड़", "ढ़", "फ़", "य़", "ऱ", "ऩ",
"त्त्", "त्त", "क्त", "दृ", "कृ",
"ह्न", "ह्य", "हृ", "ह्म", "ह्र", "ह्", "द्द", "क्ष्", "क्ष", "त्र्", "त्र", "ज्ञ",
"छ्य", "ट्य", "ठ्य", "ड्य", "ढ्य", "द्य", "द्व",
"श्र", "ट्र", "ड्र", "ढ्र", "छ्र", "क्र", "फ्र", "द्र", "प्र", "ग्र", "रु", "रू",
"्र",
"ओ", "औ", "आ", "अ", "ई", "इ", "उ", "ऊ", "ऐ", "ए", "ऋ",
"क्", "क", "क्क", "ख्", "ख", "ग्", "ग", "घ्", "घ", "ङ",
"चै", "च्", "च", "छ", "ज्", "ज", "झ्", "झ", "ञ",
"ट्ट", "ट्ठ", "ट", "ठ", "ड्ड", "ड्ढ", "ड", "ढ", "ण्", "ण",
"त्", "त", "थ्", "थ", "द्ध", "द", "ध्", "ध", "न्", "न",
"प्", "प", "फ्", "फ", "ब्", "ब", "भ्", "भ", "म्", "म",
"य्", "य", "र", "ल्", "ल", "ळ", "व्", "व",
"श्", "श", "ष्", "ष", "स्", "स", "ह",
"ऑ", "ॉ", "ो", "ौ", "ा", "ी", "ु", "ू", "ृ", "े", "ै",
"ं", "ँ", "ः", "ॅ", "ऽ", "् ", "्"]
array_two = ["^", "*", "Þ", "ß", "¼", "½", "¿", "À", "¾", "A", "\\", "&", "&", "Œ", "]", "-", "~ ",
"å", "ƒ", "„", "…", "†", "‡", "ˆ", "‰", "Š", "‹", "Û",
"¶", "d", "[k", "x", "T", "t", "M+", "<+", "Q", ";", "j", "u",
"Ù", "Ùk", "Dr", "–", "—",
"à", "á", "â", "ã", "ºz", "º", "í", "{", "{k", "«", "=", "K",
"Nî", "Vî", "Bî", "Mî", "<î", "|", "}",
"J", "Vª", "Mª", "<ªª", "Nª", "Ø", "Ý", "æ", "ç", "xz", "#", ":",
"z",
"vks", "vkS", "vk", "v", "bZ", "b", "m", "Å", ",s", ",", "_",
"D", "d", "ô", "[", "[k", "X", "x", "?", "?k", "³",
"pkS", "P", "p", "N", "T", "t", "÷", ">", "¥",
"ê", "ë", "V", "B", "ì", "ï", "M", "<", ".", ".k",
"R", "r", "F", "Fk", ")", "n", "/", "/k", "U", "u",
"I", "i", "¶", "Q", "C", "c", "H", "Hk", "E", "e",
"¸", ";", "j", "Y", "y", "G", "O", "o",
"'", "'k", "\"", "\"k", "L", "l", "g",
"v‚", "‚", "ks", "kS", "k", "h", "q", "w", "`", "s", "S",
"a", "¡", "%", "W", "·", "~ ", "~"]
array_one_length = len(array_one)
# Specialty characters
modified_substring = modified_substring.replace("क़", "क़")
modified_substring = modified_substring.replace("ख़", "ख़")
modified_substring = modified_substring.replace("ग़", "ग़")
modified_substring = modified_substring.replace("ज़", "ज़")
modified_substring = modified_substring.replace("ड़", "ड़")
modified_substring = modified_substring.replace("ढ़", "ढ़")
modified_substring = modified_substring.replace("ऩ", "ऩ")
modified_substring = modified_substring.replace("फ़", "फ़")
modified_substring = modified_substring.replace("य़", "य़")
modified_substring = modified_substring.replace("ऱ", "ऱ")
modified_substring = modified_substring.replace("ि", "f")
# Replace Unicode with ASCII
for input_symbol_idx in range(0, array_one_length):
modified_substring = modified_substring.replace(array_one[input_symbol_idx], array_two[input_symbol_idx])
# Move "f" to correct position
modified_substring = " " + modified_substring + " "
position_of_f = modified_substring.find("f")
while position_of_f != -1:
modified_substring = modified_substring[:position_of_f - 1] + modified_substring[position_of_f] + \
modified_substring[position_of_f - 1] + modified_substring[position_of_f + 1:]
position_of_f = modified_substring.find("f", position_of_f + 1) # search for f ahead of the current position.
modified_substring = modified_substring.strip()
# Move "half R" to correct position and replace
modified_substring = " " + modified_substring + " "
position_of_r = modified_substring.find("j~")
set_of_matras = ["‚", "ks", "kS", "k", "h", "q", "w", "`", "s", "S", "a", "¡", "%", "W", "·", "~ ", "~"]
while position_of_r != -1:
modified_substring = modified_substring.replace("j~", "", 1)
if modified_substring[position_of_r + 1] in set_of_matras:
modified_substring = modified_substring[:position_of_r + 2] + "Z" + modified_substring[position_of_r + 2:]
else:
modified_substring = modified_substring[:position_of_r + 1] + "Z" + modified_substring[position_of_r + 1:]
position_of_r = modified_substring.find("j~")
modified_substring = modified_substring.strip()
return modified_substring
if __name__ == '__main__':
print(krutidev_to_unicode('ikVhZd'))
| true |
a6d1bcf4312d70bac6103850a49c949e88931e09 | Python | ZoranPandovski/al-go-rithms | /sort/BozoSort/bozo_sort.py | UTF-8 | 1,162 | 4.34375 | 4 | [
"CC0-1.0"
] | permissive | """
Bozo sort consists out of checking if the input sequence is sorted and if not swapping randomly two elements.
This is repeated until eventually the sequence is sorted.
Takes in an array of numbers and provides a sorted array.
It may take O(infinity) at worst case as upper limit is not defined and best case will be O(1) when the array is sorted.
Since on average, it may take n! permutations, its average complexity is considered O(n!)
"""
import random
def sort_check(array):
for i in range(0,len(array)-1):
if array[i]>array[i+1]:
return False
return True
def bozo_sort_algo(array):
while not sort_check(array):
i,j=random.randint(0,len(array)-1),random.randint(0,len(array)-1)
array[i],array[j]=array[j],array[i]
return array
n=int(input('Enter Size of Array: '))
arr=[input('Enter Element %d : '%(i+1)) for i in range(n)]
print ('Original Array : ',arr)
print ('Sorted Array : ',bozo_sort_algo(arr))
def test():
arr=[54,78,95,63,12,221,1,0,-7,8,35,15,7,66,421,798]
print ('Original Array : ',arr)
print ('Sorted Array : ',bozo_sort_algo(arr))
test()
| true |
b01ad886b7d2ef87359a509b221500d894d23b4b | Python | kerighan/differential | /tests/lotka_volterra.py | UTF-8 | 475 | 3.375 | 3 | [] | no_license | from differential import Differential, Unknown
import numpy as np
# initial condition
alpha = 2 / 3
beta = 4 / 3
gamma = 1
delta = 1
# create unknowns
x = Unknown('x', 2, label='proie') # proie
y = Unknown('y', 1.5, label='prédateur') # prédateur
# write down equation
x.dt = alpha * x - beta * x * y
y.dt = delta * x * y - gamma * y
# time points
t = np.linspace(0, 20, 1000)
# solve ODE
diff = Differential(x, y)
diff.solve(t)
diff.plot(t)
| true |
95e23b0d54d064365cb4d3a4d093804ca3ec257a | Python | adityachoudary54/covid19 | /WorldIndiaDjangoproject/src/worldStatistics/trendsScript.py | UTF-8 | 2,592 | 2.65625 | 3 | [] | no_license | import os
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import wget
from bs4 import BeautifulSoup
import urllib.request
url = "https://pomber.github.io/covid19/timeseries.json"
def download():
reqPath=os.path.abspath("./worldStatistics/trendsData")
if "timeseries.json" not in os.listdir(reqPath):
wget.download(url, os.path.join(reqPath,'timeseries.json'))
def printData():
timeSeriesDf=pd.read_json('./worldStatistics/trendsData/timeseries.json')
print(timeSeriesDf.head())
download()
timeSeriesDf=pd.read_json('./worldStatistics/trendsData/timeseries.json')
def cleanedData(timeSeriesDf):
data={}
for x,y in timeSeriesDf.items():
dataCountry=pd.DataFrame.from_dict(y[0],orient='index')
for index,item in enumerate(y[1:]):
s2=pd.DataFrame.from_dict(item,orient='index')
dataCountry=pd.concat([dataCountry,s2],axis=1)
dataCountry=dataCountry.T
data[x]=dataCountry
return data
data=cleanedData(timeSeriesDf)
def countryList():
return list(data.keys())
def countryPlotLy(countryName,casetype):
temp=os.path.abspath("./worldStatistics/trendsData")
path=os.path.join(temp,'{} {} trends.html'.format(countryName,casetype))
fig = go.Figure()
fig.add_trace(go.Scatter(x=data[countryName]['date'], y=data[countryName][casetype],
mode='lines+markers',
name='{} cases'.format(casetype)))
# fig.add_trace(go.Scatter(x=data[countryName]['date'], y=data[countryName]['deaths'],
# mode='lines',
# name='Deaths'))
# fig.add_trace(go.Scatter(x=data[countryName]['date'], y=data[countryName]['recovered'],
# mode='lines', name='Recovered'))
# fig.add_trace(go.Scatter(x=data[countryName]['date'], y=data[countryName]['confirmed']-data[countryName]['deaths']-data[countryName]['recovered'],
# mode='lines+markers', name='Actual cases remaining'))
fig.update_layout(
height=600,
width=800,
title='{} covid19 {} cases statistics'.format(countryName,casetype),
xaxis_title="Date",
yaxis_title="Number of cases",
font=dict(
family="Arial",
size=12,
color='black'
),
)
fig.write_html(path)
f=open(path, "r")
contents=f.read()
soup=BeautifulSoup(contents,'html.parser')
div=str(soup.find('div'))
return div
# countryPlotLy('India',data) | true |
713b81246531095ca417e82bfa9e253ca0afee4c | Python | Blogg9ggg/Project-Euler-Solution | /P16.py | UTF-8 | 174 | 2.78125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 00:37:55 2020
@author: blogg9ggg
"""
a = 2**1000
ans = 0
while a > 0:
ans = ans + a%10
a = a//10
print(ans)
| true |
22ca833edc9733b83e3f69fb794668ed824893f0 | Python | xu-robert/Leetcode | /Get Maximum in Generated Array.py | UTF-8 | 1,850 | 3.96875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 09:51:40 2021
Get Maximum in Generated Array
You are given an integer n. An array nums of length n + 1 is generated in the following way:
nums[0] = 0
nums[1] = 1
nums[2 * i] = nums[i] when 2 <= 2 * i <= n
nums[2 * i + 1] = nums[i] + nums[i + 1] when 2 <= 2 * i + 1 <= n
Return the maximum integer in the array nums.
Example 1:
Input: n = 7
Output: 3
Explanation: According to the given rules:
nums[0] = 0
nums[1] = 1
nums[(1 * 2) = 2] = nums[1] = 1
nums[(1 * 2) + 1 = 3] = nums[1] + nums[2] = 1 + 1 = 2
nums[(2 * 2) = 4] = nums[2] = 1
nums[(2 * 2) + 1 = 5] = nums[2] + nums[3] = 1 + 2 = 3
nums[(3 * 2) = 6] = nums[3] = 2
nums[(3 * 2) + 1 = 7] = nums[3] + nums[4] = 2 + 1 = 3
Hence, nums = [0,1,1,2,1,3,2,3], and the maximum is 3.
Example 2:
Input: n = 2
Output: 1
Explanation: According to the given rules, the maximum between nums[0], nums[1], and nums[2] is 1.
Example 3:
Input: n = 3
Output: 2
Explanation: According to the given rules, the maximum between nums[0], nums[1], nums[2], and nums[3] is 2.
Constraints:
0 <= n <= 100
Solution:
if n < 1, just return n
otherwise just follow the rules: if n even: arr[n] = arr[n//2], if n odd: arr[n] = arr[n//2] + arr[n-n//2] and
keep track of the max element max(ans, arr[-1]).
@author: Robert Xu
"""
class Solution(object):
def getMaximumGenerated(self, n):
"""
:type n: int
:rtype: int
"""
arr = [0,1]
if n <= 1:
return arr[n]
ans = 0
for i in range(2, n+1):
if i%2 == 0:
arr.append(arr[i//2])
else:
arr.append(arr[i//2] + arr[i-i//2])
ans = max(ans, arr[-1])
return ans
a = Solution()
b = a.getMaximumGenerated(100) | true |
385c23f290dd44a7c0bcb06fb1279007f2d043fc | Python | afcarl/time_series_anomaly_detect_keras | /src/data/data_transform.py | UTF-8 | 989 | 2.59375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from scipy import signal
import numpy as np
from scipy import signal
class DataTransform(object):
def __init__(self, data: np.array, sampling_frequency: int=10e3):
self.data = data
self.sampling_frequency = sampling_frequency
def data_transform(self, transform_option: str='spectrogram') -> np.array:
if transform_option == 'spectrogram':
return self.__spectrogram_transform()
if transform_option == 'wavelet':
return self.__wavelet_transform()
def __wavelet_transform(self):
f, t, Sxx = signal.spectrogram(self.data, self.sampling_frequency)
f = np.arange(1, max(f))
cwtmatr = signal.cwt(self.data, signal.ricker, f)
return cwtmatr, max(f)
def __spectrogram_transform(self):
f, t, Sxx = signal.spectrogram(self.data, self.sampling_frequency)
return f, t, Sxx
| true |
e09c63dfbb5358787edd98eca49dabf78da0805a | Python | shahbagdadi/py-algo-n-ds | /misc/ramdomWeights/Solution.py | UTF-8 | 537 | 3.515625 | 4 | [
"MIT"
] | permissive |
from typing import List
import random
import bisect
class Solution:
def __init__(self, w: List[int]):
self.weights , self.idx = [0], 0
for i in range(len(w)):
self.weights.append(self.weights[-1] + w[i])
def pickIndex(self) -> int:
rand = random.randint(1, self.weights[-1])
idx = bisect.bisect_left(self.weights, rand)
return idx - 1
# Your Solution object will be instantiated and called as such:
w = [1,3]
obj = Solution(w)
print(obj.pickIndex())
print(obj.pickIndex()) | true |
30d04e959d43715220b5cfdeead61b0dc528827b | Python | mnvx/neonka | /examples/network_example.py | UTF-8 | 1,028 | 2.78125 | 3 | [] | no_license | import numpy
from back_propagation.back_propagation import back_propagation
from network.layered import LayeredNetwork
from neuron.neuron import Neuron
def activation(x):
return max(x, 0)
def activation_derivative(x):
return 1 if x > 0 else 0
net = LayeredNetwork([3, 2, 1])
# print(net)
# Custom weights
net.set_neuron(0, 0, Neuron(numpy.array([[0, 1]]).T))
net.set_neuron(0, 1, Neuron(numpy.array([[0, 1]]).T))
net.set_neuron(0, 2, Neuron(numpy.array([[0, 1]]).T))
net.set_neuron(1, 0, Neuron(
numpy.array([[0, 0.7, 0.2, 0.7]]).T,
activation_function=activation,
activation_function_derivative=activation_derivative,
))
net.set_neuron(1, 1, Neuron(numpy.array([[0, 0.8, 0.3, 0.6]]).T))
net.set_neuron(2, 0, Neuron(numpy.array([[0, 0.2, 0.4]]).T))
# print(net)
examples = numpy.array([[0, 1, 1]])
print("examples")
print(examples)
expected_results = numpy.array([[1]]).T
print("expected_results")
print(expected_results)
print('')
back_propagation(net, examples, expected_results)
# print(net)
| true |
88862d0ba97e5b4a97f67ab299e030b1b2156374 | Python | Elzei/show-off | /Python/Skrypty/Python - Szkolenie_11-2015/przyklady_rec_python/PythonExample/generator.py | UTF-8 | 281 | 3.328125 | 3 | [
"MIT"
] | permissive | def lamYield(x):
yield x
def dzielniki(liczba):
dzielnik = []
dzielnik = filter(lambda x: not liczba % x, range(2,liczba // 2 + 1))
dzielnik = dzielnik if len(dzielnik) else [0]
for i in dzielnik:
yield i
for i in dzielniki(35):
print i
| true |
751352b1b5dad5f730e4a716f8add63d8e58b512 | Python | stephentannersmith/flask_mysql | /semi_restful_users/server.py | UTF-8 | 2,737 | 2.859375 | 3 | [] | no_license | from flask import Flask, redirect, render_template, request
from mysqlconnection import connectToMySQL
app = Flask(__name__)
#renders template for web app
@app.route('/')
def index():
return render_template("add_user.html")
#route that saves user info to DB
@app.route('/add_user', methods=["POST"])
def add_user():
query= "INSERT INTO users (first_name, last_name, email, created_at, updated_at) VALUES (%(fn)s, %(ln)s, %(em)s, NOW(), NOW());"
data = {
"fn": request.form["first_name"],
"ln": request.form["last_name"],
"em": request.form["email"],
}
mysql = connectToMySQL("users")
user_id = mysql.query_db(query,data) # pylint: disable=unused-variable
return redirect('/users/{}'.format(user_id))
#this route displays stored user info and is redirected from the add_user route
@app.route("/users/<user_id>")
def view_user(user_id):
query = "SELECT * FROM users WHERE id = %(id)s"
data = {
'id': user_id
}
mysql = connectToMySQL('users')
user = mysql.query_db(query, data)
print(user)
return render_template("user_details.html", user=user[0])
#route deletes user from DB and redirects to all users
@app.route("/delete/<user_id>")
def delete_user(user_id):
query = "DELETE FROM users WHERE id = %(id)s"
data = {
'id': user_id
}
mysql = connectToMySQL('users')
user = mysql.query_db(query, data) # pylint: disable=unused-variable
return redirect("/users")
#this route GETS the requested data and displays it on a new page with form
@app.route('/edit_user/<user_id>')
def edit_user(user_id):
query = "SELECT * FROM users WHERE id = %(id)s"
data = {
'id': user_id
}
mysql = connectToMySQL('users')
user = mysql.query_db(query,data)
return render_template("edit_user.html", user=user[0])
#this updates user information and redirects back to view_user
@app.route('/update_user/<user_id>', methods=["POST"])
def update_user(user_id):
query = "UPDATE users SET first_name=%(fn)s, last_name=%(ln)s, email=%(em)s, updated_at=NOW()"
data = {
"fn": request.form["first_name"],
"ln": request.form["last_name"],
"em": request.form["email"],
}
mysql = connectToMySQL('users')
mysql.query_db(query,data)
return redirect('/users/{}'.format(user_id))
#this route is used for selecting and logging console information for every page load and action
@app.route("/users")
def all_users():
query = "SELECT * FROM users"
mysql = connectToMySQL('users')
all_users = mysql.query_db(query)
print(all_users)
return render_template("all_users.html", all_users=all_users)
if __name__ == "__main__":
app.run(debug=True) | true |
4cc45e2816c930eb04ce355ddf554dcd80e148e8 | Python | RobertKoehlmoos/threading_with_python | /problems_test.py | UTF-8 | 213 | 2.671875 | 3 | [] | no_license | from problems import chars_in_sonnets
def test_chars_in_sonnets():
num_chars = chars_in_sonnets()
assert num_chars == 94084, f"Expected 94084 total characters, but chars_in_sonnets returned {num_chars}"
| true |
bd521509848ef5bed73d321aa963fc61c495c16c | Python | onekr-billy/django-ordered-model | /tests/models.py | UTF-8 | 3,932 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | from django.db import models
from ordered_model.models import OrderedModel, OrderedModelBase
# test simple automatic ordering
class Item(OrderedModel):
name = models.CharField(max_length=100)
# test Answer.order_with_respect_to being a tuple
class Question(models.Model):
pass
class TestUser(models.Model):
pass
class Answer(OrderedModel):
question = models.ForeignKey(
Question, on_delete=models.CASCADE, related_name="answers"
)
user = models.ForeignKey(TestUser, on_delete=models.CASCADE, related_name="answers")
order_with_respect_to = ("question", "user")
class Meta:
ordering = ("question", "user", "order")
def __unicode__(self):
return "Answer #{0:d} of question #{1:d} for user #{2:d}".format(
self.order, self.question_id, self.user_id
)
# test ordering whilst overriding the automatic primary key (ie. not models.Model.id)
class CustomItem(OrderedModel):
pkid = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
modified = models.DateTimeField(null=True, blank=True)
# test ordering over custom ordering field (ie. not OrderedModel.order)
class CustomOrderFieldModel(OrderedModelBase):
sort_order = models.PositiveIntegerField(editable=False, db_index=True)
name = models.CharField(max_length=100)
order_field_name = "sort_order"
class Meta:
ordering = ("sort_order",)
# test ThroughModel ordering with Pizzas/Topping
class Topping(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping, through="PizzaToppingsThroughModel")
def __str__(self):
return self.name
class PizzaToppingsThroughModel(OrderedModel):
pizza = models.ForeignKey(Pizza, on_delete=models.CASCADE)
topping = models.ForeignKey(Topping, on_delete=models.CASCADE)
order_with_respect_to = "pizza"
class Meta:
ordering = ("pizza", "order")
# Admin only allows each model class to be registered once. However you can register a proxy class,
# and (for presentation purposes only) rename it to match the existing in Admin
class PizzaProxy(Pizza):
class Meta:
proxy = True
verbose_name = "Pizza"
verbose_name_plural = "Pizzas"
# test many-one where the item has custom PK
class CustomPKGroup(models.Model):
name = models.CharField(max_length=100)
class CustomPKGroupItem(OrderedModel):
group = models.ForeignKey(CustomPKGroup, on_delete=models.CASCADE)
name = models.CharField(max_length=100, primary_key=True)
order_with_respect_to = "group"
# test ordering on a base class (with order_class_path)
# ie. OpenQuestion and GroupedItem can be ordered wrt each other
class BaseQuestion(OrderedModel):
order_class_path = __module__ + ".BaseQuestion"
question = models.TextField(max_length=100)
class Meta:
ordering = ("order",)
class MultipleChoiceQuestion(BaseQuestion):
good_answer = models.TextField(max_length=100)
wrong_answer1 = models.TextField(max_length=100)
wrong_answer2 = models.TextField(max_length=100)
wrong_answer3 = models.TextField(max_length=100)
class OpenQuestion(BaseQuestion):
answer = models.TextField(max_length=100)
# test grouping by a foreign model field (group__user)
class ItemGroup(models.Model):
user = models.ForeignKey(
TestUser, on_delete=models.CASCADE, related_name="item_groups"
)
class GroupedItem(OrderedModel):
group = models.ForeignKey(ItemGroup, on_delete=models.CASCADE, related_name="items")
order_with_respect_to = "group__user"
class CascadedParentModel(models.Model):
pass
class CascadedOrderedModel(OrderedModel):
parent = models.ForeignKey(to=CascadedParentModel, on_delete=models.CASCADE)
| true |
2f893c54567519c4ef493bb43c5c0e76db5c9427 | Python | prayushtatke/python | /common_utils/utils/alerts.py | UTF-8 | 3,073 | 2.78125 | 3 | [] | no_license | from datetime import datetime
from os import getenv
from common_utils.utils.commons import is_not_empty
from json import dumps
from common_utils.utils.logger import Logger
ENABLE_ALERTS_HANDLING = getenv('ENABLE_ALERTS_HANDLING', False)
class AlertsHandler:
"""
Usage:
from utils.alerts import AlertsHandler
ah = AlertsHandler(stream_name=<name>, stream_client=<Firehose Client>)
ah.handle(err_code=<code>, err_msg=<msg>, exception=<>, kwarg1=val1, kwarg2=val2)
"""
logger = Logger('AlertsHandler')
def __init__(self,stream_name=None, stream_client=None):
assert is_not_empty(stream_name), \
"Firehose delivery stream name cannot be empty."
assert is_not_empty(stream_client), \
"Stream Client not provided."
self.stream_name = stream_name
self.stream_client = stream_client
def handle(self, err_code=None, err_msg=None, exception=None, **kwargs ):
"""
This function takes either (err_code,err_msg) or exception as argument
It creates a payload in the form of dict:
{ 'err_code' : <>,
'err_msg' : <>,
'err_ts': <>,
// appends other named arguments.
}
this way application can handle what fields are required to be part of payload.
A json string is created out of this payload and sent to firehose.
Note: if ENABLE_ALERTS_HANDLING is false, the function call will be no-op.
:param err_code:
:param err_msg:
:param exception:
:param kwargs:
:return:
"""
if not ENABLE_ALERTS_HANDLING:
return
if exception:
self.__handle_excptn(exception, **kwargs)
elif err_code and err_msg:
self.__handle_err(err_code, err_msg, **kwargs)
def __handle_err(self, err_code, err_msg, **kwargs):
if not ENABLE_ALERTS_HANDLING:
return
assert is_not_empty(err_code) and is_not_empty(err_msg), \
"'err_code', 'err_msg' can not be empty."
alert_payload = self.create_alert_payload(err_code, err_msg, **kwargs)
alert_payload_str = dumps(alert_payload) + '\n'
self.stream_client.write_record(stream_name=self.stream_name, record=alert_payload_str.encode())
def __handle_excptn(self, exception, **kwargs):
excptn_name = type(exception).__name__
if excptn_name in ['InvalidInputException', 'UDMException', 'UnknownAppException']:
return self.__handle_err(err_code=exception.err_code.value, err_msg=exception.err_msg, **kwargs)
else:
return self.__handle_err(err_code=excptn_name, err_msg=str(exception), **kwargs)
@classmethod
def create_alert_payload(cls, err_code, err_msg, **kwargs):
utc_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
alert_payload = {
'err_code': err_code,
'err_msg': err_msg,
'err_ts': utc_time
}
alert_payload.update(kwargs)
return alert_payload
| true |
c3fc86ed84f500af2c2414a60447a219ce84bbb3 | Python | madman-bob/python-custom-imports | /custom_imports/file_module/ext_finder.py | UTF-8 | 1,288 | 2.8125 | 3 | [
"MIT"
] | permissive | import sys
from dataclasses import dataclass
from pathlib import Path
from types import ModuleType
from typing import Iterable, Optional
from custom_imports.importer import Finder
__all__ = ["FileModuleExtensionFinder"]
@dataclass(frozen=True)
class FileModuleExtensionFinder(Finder[Path]):
"""
Finder for file based modules by file extensions.
FileModuleExtensionFinder(ext)
This Finder interprets a module's name as a filename, with extension ext.
Parent modules are interpreted as directories.
This provides a relative path, which is searched for on the standard module
search path. If a file with that relative path is found, then the absolute
Path of that file is returned as its module locator.
"""
extension: str
def find_path(self, fullname: str, search_paths: Iterable[str]) -> Optional[Path]:
rel_file_path = Path(fullname.replace(".", "/") + "." + self.extension)
for path in search_paths:
abs_file_path = path / rel_file_path
if abs_file_path.is_file():
return abs_file_path
def find_module_locator(
self, fullname: str, path: Iterable[str], target: Optional[ModuleType] = None
) -> Optional[Path]:
return self.find_path(fullname, sys.path)
| true |
2511dbeba80d9772c3908ec9f4bc8ad8b687fa6b | Python | vinodbellamkonda06/myPython | /project/project@2.3.py | UTF-8 | 383 | 3.21875 | 3 | [] | no_license | print("largest palindrome number multiple of two 3 digit prime numbers")
pal=[]
for num in range(900,1000):
for num1 in range(900,1000):
k=num*num1
if k>10:
kstring=str(k)
if (kstring==kstring[::-1]):
h = num * num1
pal.append(h)
print(pal)
print(max(list(set(pal))))
#u=sorted(list(set(pal)))
#print(u[-1]) | true |
822687dbf6ad285f5baeed123f1301c9dd51232f | Python | kousikr26/tgdh | /graph.py | UTF-8 | 2,103 | 2.984375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import make_interp_spline, BSpline
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline
from scipy import arange, array, exp
import pickle
# 300 represents number of points to make between T.min and T.max
file=open("insertion_time_gdh","rb")
data=pickle.load(file)
file.close()
x,y=data
extrap = 2500
xnew = np.linspace(min(x), max(x), 30)
# y_smooth = f(xnew)
spl = make_interp_spline(x, y, k=1) # type: BSpline
y_smooth = spl(xnew)
xnewnew = np.linspace(min(x), max(x)+extrap, 30)
f = InterpolatedUnivariateSpline(xnew,y_smooth,k=1)
plt.plot(xnewnew,f(xnewnew),label="GDH")
# plt.plot(x,y)
plt.xlabel("Number of users")
plt.ylabel("Time for insertion(sec)")
file=open("insertion_time","rb")
data=pickle.load(file)
file.close()
x,y=data
lastx = x[-1]
lasty = y[-1]
x += [i for i in range(lastx+5, lastx+extrap,5)]
y+= [lasty for i in range(extrap//5 - 1)]
xnew = np.linspace(min(x), max(x), 300)
spl = make_interp_spline(x, y, k=3) # type: BSpline
y_smooth = spl(xnew)
plt.plot(xnew, y_smooth,label="TGDH")
plt.legend()
plt.savefig("./insertion_time_plot.png")
plt.show()
file=open("insertion_time_gdh","rb")
data=pickle.load(file)
file.close()
x,y=data
extrap = 2500
xnew = np.linspace(min(x), max(x), 30)
# y_smooth = f(xnew)
spl = make_interp_spline(x, y, k=1) # type: BSpline
y_smooth = spl(xnew)
xnewnew = np.linspace(min(x), max(x)+extrap, 15)
f = InterpolatedUnivariateSpline(xnew,y_smooth,k=1)
plt.plot(xnewnew,f(xnewnew),label="GDH")
# plt.plot(x,y)
plt.xlabel("Number of users")
plt.ylabel("Time for insertion(sec)")
file=open("deletion_time","rb")
data=pickle.load(file)
file.close()
x,y=data
x=x[::-1]
y=y[::-1]
lastx = x[-1]
lasty = y[-1]
print(x)
x += [i for i in range(lastx+5, lastx+extrap,5)]
y+= [lasty for i in range(extrap//5 - 1)]
xnew = np.linspace(min(x), max(x), 300)
print(x)
spl = make_interp_spline(x, y, k=3) # type: BSpline
y_smooth = spl(xnew)
plt.plot(xnew, y_smooth,label="TGDH")
plt.legend()
plt.savefig("./deletion_time_plot.png")
plt.show() | true |
227acff9099d6e655732442ffbd6725bb136d0fe | Python | WaiLynnZaw/project_euler_python | /Problem15.py | UTF-8 | 288 | 3.84375 | 4 | [] | no_license | def number_combinations(n, k):
numberOfcombinations = factorial(n) / (factorial(k) * factorial(n - k))
return numberOfcombinations
def factorial(n):
factorialValue = 1
while n > 1:
factorialValue *= n
n -= 1
return factorialValue
print(number_combinations(40,20))
| true |
5ebcf38799522df125d5172de91190fb35699b07 | Python | wuxu1019/leetcode_sophia | /medium/twodimensionalarray/test_79_Word_Search.py | UTF-8 | 2,601 | 3.859375 | 4 | [] | no_license | """
Given a 2D board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
Example:
board =
[
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
Given word = "ABCCED", return true.
Given word = "SEE", return true.
Given word = "ABCB", return false.
"""
class Solution(object):
def exist_n2_space(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not word:
return True
R, C = len(board), len(board[0])
visited = [[False] * C for _ in range(R)]
def existHelper(board, i, j, word, p):
if visited[i][j]:
return False
if board[i][j] == word[p]:
if p == len(word) - 1:
return True
visited[i][j] = True
for mv_i, mv_j in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
new_i = i + mv_i
new_j = j + mv_j
if 0 <= new_i < R and 0 <= new_j < C and existHelper(board, new_i, new_j, word, p + 1):
return True
visited[i][j] = False
return False
x
for i, row in enumerate(board):
for j, v in enumerate(row):
if v == word[0] and existHelper(board, i, j, word, 0):
return True
return False
def exist_no_space(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not word:
return True
R, C = len(board), len(board[0])
def existHelper(board, i, j, word, p):
if board[i][j] == word[p]:
if p == len(word) - 1:
return True
temp = board[i][j]
board[i][j] = '#'
for mv_i, mv_j in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
new_i = i + mv_i
new_j = j + mv_j
if 0 <= new_i < R and 0 <= new_j < C and existHelper(board, new_i, new_j, word, p + 1):
return True
board[i][j] = temp
return False
for i, row in enumerate(board):
for j, v in enumerate(row):
if v == word[0] and existHelper(board, i, j, word, 0):
return True
return False | true |
76b1a50305163aa560091b358e92250c58be4383 | Python | VBakhila/project-euler-python | /043.py | UTF-8 | 491 | 3.265625 | 3 | [] | no_license | from lib import num
def gen(nums, primes, pos=0, prefix=0):
if pos >= 4 and prefix % 10 ** 3 % primes[pos - 4] != 0:
return
if not nums:
yield prefix
return
for n in nums:
nums.remove(n)
yield from gen(nums, primes, pos + 1, 10 * prefix + n)
nums.add(n)
def main():
# trick: generate all permutations of 0..9 by extending valid prefixes
primes = (2, 3, 5, 7, 11, 13, 17)
return sum(gen(set(range(10)), primes))
if __name__ == '__main__':
print(main()) # 16695334890
| true |
f62c001f0f9626e0c8f151f9c348b76e248334d9 | Python | tadvepio/Object-Oriented-Programming | /Exercise8/Codefiles/flat.py | UTF-8 | 1,311 | 3.421875 | 3 | [] | no_license | # OOP exercise 8
# Author: Tapio Koskinen
# 8.3.2021
class Flat:
def __init__(self):
self.__floors = "Dirty"
self.__windows = "Dirty"
self.__surfaces = "Dirty"
self.__bed = "Unmade"
self.__toilet_paper = "Running out"
self.__fridge = "Empty"
def set_floors(self,state):
self.__floors = state
def get_floors(self):
return self.__floors
def set_windows(self,state):
self.__windows = state
def get_windows(self):
return self.__windows
def set_surface(self,state):
self.__surfaces = state
def get_surface(self):
return self.__surfaces
def set_bed(self,state):
self.__bed = state
def get_bed(self):
return self.__bed
def set_toilet_paper(self,state):
self.__toilet_paper = state
def get_toilet_paper(self):
return self.__toilet_paper
def set_fridge(self,state):
self.__fridge = state
def get_fridge(self):
return self.__fridge
def __str__(self):
return f"Floors: {self.get_floors()}\
\nWindows: {self.get_windows()}\
\nSurfaces: {self.get_surface()}\
\nBed: {self.get_bed()}\
\nToilet paper: {self.get_toilet_paper()}\
\nFridge: {self.get_fridge()}" | true |
64fd738e69b94071a3469065bc21815bffe16095 | Python | Javed-Akhtar/nlp-dl-prework | /The-Lego-Collector's-Dilemma/code.py | UTF-8 | 1,997 | 3.421875 | 3 | [
"MIT"
] | permissive | # --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
#Reading dataset
df = pd.read_csv(path)
#display
print(df.head())
#Storing all independent features in X
X = df.copy()
X.drop(['list_price'],axis=1,inplace=True)
#Storing target variable in y
y = df['list_price']
#Split dataframe into X_train,X_test,y_train,y_test
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)
#print(X_train)
#print(X_test)
#print(y_train)
#print(y_test)
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
#Storing all The X_train columns in cols
cols = X_train.columns
#Create 3*3 subplot
fig,axes = plt.subplots(nrows=3,ncols=3)
#To subplot scatter plots for loops are used
for i in range(3):
for j in range(3):
col=cols[i*3+j]
axes[i][j].scatter(X[col],y)
# code ends here
# --------------
# Reduce feature redundancies
#Creating correlation table of X_train
corr = X_train.corr()
print(corr)
#dropping columns from X_train and X_test which are having a correlation higher than (+/-)0.75
X_train.drop(['play_star_rating','val_star_rating','val_star_rating'],axis=1,inplace=True)
X_test.drop(['play_star_rating','val_star_rating','val_star_rating'],axis=1,inplace=True)
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
#Price Prediction
#Instantiate linear regression model
regressor = LinearRegression()
#Fitting model on the X_train and y_train
regressor.fit(X_train,y_train)
#Making predictions on the X_test
y_pred = regressor.predict(X_test)
#Finding MSE
mse = mean_squared_error(y_test,y_pred)
print(mse)
#Finding r^2 score
r2 = r2_score(y_test,y_pred)
print(r2)
# --------------
#Residual Check
#calculate residual
residual = y_test - y_pred
#print(residual)
#Histogram making
plt.hist(residual)
#display histogram
plt.show()
| true |
f076019cab85c1d7f8729a6eb7ef3276ce97155d | Python | LimonJohn/pythonProjectED | /Exercises_python/Phase_1/3.7.py | UTF-8 | 155 | 3.515625 | 4 | [] | no_license | # 3.7 Задача «Стоимость покупки»
a = int(input())
b = int(input())
n = int(input())
s = (a * 100 + b) * n
print(s // 100, s % 100)
| true |
8dea31956039f1c8903fea655df716a9eab69445 | Python | alysivji/talks | /data-science-workflows-using-docker-containers/workflow3-data-driven-app/plot_timeseries.py | UTF-8 | 1,922 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''Create live updating graph
'''
# standard library
from collections import namedtuple
import io
import os
# plot.ly modules
import dash
from dash.dependencies import Output, Event
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
TwoDPlot = namedtuple('TwoDPlot', ['x', 'y'])
def tail_file(filename, nlines):
'''Return last nlines of file
Adapted from https://gist.github.com/amitsaha/5990310
'''
with open(filename) as qfile:
qfile.seek(0, os.SEEK_END)
endf = position = qfile.tell()
linecnt = 0
while position >= 0:
qfile.seek(position)
next_char = qfile.read(1)
if next_char == "\n" and position != endf-1:
linecnt += 1
if linecnt == nlines:
break
position -= 1
if position < 0:
qfile.seek(0)
return qfile.read()
def get_data(filename, nlines=20):
'''Get data from tail of text file
'''
# read in file
input_data = tail_file(filename, nlines)
x = []
y = []
with io.StringIO(input_data) as f:
for line in f:
items = line.strip().split(', ')
x.append(items[0])
y.append(items[1])
return TwoDPlot(x, y)
app = dash.Dash()
app.layout = html.Div([
dcc.Graph(
id='live-graph',
),
dcc.Interval(
id='interval-component',
interval=1*1000 # in milliseconds
)
])
@app.callback(Output('live-graph', 'figure'),
events=[Event('interval-component', 'interval')])
def update_graph():
result = get_data('data/data.csv', nlines=200)
return {
'data': [
go.Scatter(
x=result.x,
y=result.y
)
]
}
if __name__ == '__main__':
app.run_server(host='0.0.0.0')
| true |
3c2543578592eb60a4002907c3c7820875aa10ed | Python | videogamerm/mini.games | /riskTakr/main2.py | UTF-8 | 1,167 | 3.40625 | 3 | [
"CC0-1.0",
"MIT"
] | permissive | import random
import time
import os
def printnm():
os.system("./Time.out")
file = open("name.txt","r")
print("Hello " + file.read())
printnm()
play = input("Play y/n ")
def newround():
os.system("./main.out")
n = random.randint(1,100)
n1 = random.randint(1,100)
n2 = random.randint(1,100)
n3 = random.randint(1,3)
m = 250
m1 = m/5
bet = input("Do you want to bet " + str(m1) + " Coins that box " + str(n3) + " has a number inside that is at most 10 away from box 2? y/n ")
if bet == "y":
if n3 == 1 and n-n1 in range (10,-10):
m += m1
print("Good Job")
if n3 == 2 and n1-n2 in range (10,-10):
m += m1
print("Good Job")
if n3 == 3 and n1-n2 in range (10,-10):
m += m1
print("Good Job")
else:
m -= m1
print("You lost " + m1 + "coins")
if play == "y":
m3=input("1(a),5(b),or 10(c) rounds")
if m3 == "a":
nrds = 1
if m3 == "b":
nrds = 5
if m3 == "c":
nrds = 10
while nrds > 0:
newround()
nrds -=1
| true |
44364313190f32e076fd899ceabb9f0072b4b196 | Python | EBISPOT/ICBO2017 | /tools/simple_zooma_query.py | UTF-8 | 5,332 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | """
Takes a tsv file as input, with terms to be mapped in the first column
Generates a file called mapped_terms.tsv that has in the first column the term
that was mapped, and in the second column the ontology term mappings that Zooma found.
You can specify:
-confidence
HIGH, GOOD, or MEDIUM
will restrict the mappings with the desired confidence found in Zooma
if the confidence isn't met, the term is left unmapped
default confidence is HIGH
-type
you can specify a type for the terms that are searched
and zooma will try and find a mapping under that category
e.g. -type: 'organism part' for searching the term 'liver'
-ontologies
the ontologies that the search will be restriced to if it hits OLS
enter as many as you want, but comma separated
e.g. -ontologies efo,uberon,omim
can be: -ontologies none - and no ontologies will be searched
(it will hit OLS if no mapping is found in the datasources or
if we specify filter=required:[none], i.e. ignore the datasources)
-datasources
the datasources we want to restrict the search to
enter comma separated like ontologies above
can also be none
"""
import requests
import argparse
import urllib
import csv
def main():
parser = argparse.ArgumentParser()
# parser.add_argument('-value', help='zooma propertyValue to search for')
parser.add_argument("-f", help='file with list of terms to be mapped', required = True)
parser.add_argument('-type', help='zooma propertyType to restrict the proprtyValue search to')
parser.add_argument('-ontologies', help='ontologies to restrict search to, comma separated')
parser.add_argument('-datasources', help='datasources to restrict search to, comma separated')
parser.add_argument('-confidence', help='can be: HIGH, GOOD, MEDIUM, GOOD|MEDIUM')
parser.add_argument('-tutorial', help='for the icbo tutorial, query all datasources except gwas')
args = parser.parse_args()
if args.tutorial is not None:
datasources = "cbi, eva, sysmicro, atlas, uniprot, ebisc, clinvar-xrefs, cttv"
args.datasources = datasources
confidence = get_confidence(args.confidence)
with open(args.f, 'r') as csvfile:
with open('mapped_terms.tsv', 'w') as mapped:
# reader =
for line in csv.reader(csvfile.read().splitlines(), delimiter='\t'):
value = line[0]
semantic_tags = None
if len(line) < 2:
try:
semantic_tags = get_semantic_tags_for_high_confidence(value, args.ontologies, args.datasources, confidence, args.type)
print "Queried zooma for value:", value
except ValueError:
print "Could not query zooma for value:", value
if semantic_tags is not None:
for st in semantic_tags:
stid = st.split("/")
mapped.write(value + "\t" + st + "\t" + confidence + "\t" + stid[len(stid) - 1] + "\n")
else:
mapped.write(value + "\n")
else:
mapped.write(str(line).replace("'","").replace("[","").replace("]","").replace(",","\t") + "\n")
def get_confidence(confidence):
if confidence is None:
return "HIGH"
elif confidence == "GOOD" or confidence == "MEDIUM" or confidence == "HIGH":
return confidence
else:
print "Wrong confidence input"
exit(1)
def get_semantic_tags_for_high_confidence(value, ontologies, datasources, confidence, value_type):
url_base = "http://www.ebi.ac.uk/spot/zooma/v2/api/services/annotate?"
params = {'propertyValue' : value}
datafilter = None
restrict_to_datasources = []
if datasources is not None:
for dsource in datasources.split(","):
restrict_to_datasources.append(dsource)
datafilter = "required:{}".format(str(restrict_to_datasources).replace("'","").replace(" ",""))
restrict_to_ontologies = []
if ontologies is not None:
for ontology in ontologies.split(","):
restrict_to_ontologies.append(ontology)
if datafilter is not None:
datafilter = "{},{}".format(datafilter, "ontologies:{}".format(str(restrict_to_ontologies).replace("'","").replace(" ","")))
else:
datafilter = "ontologies:{}".format(str(restrict_to_ontologies).replace("'","").replace(" ",""))
# if value_type is not None:
# params['propertyType'] = value_type
# url = "{}propertyValue={}".format(url_base, value)
url = "{}{}".format(url_base, urllib.urlencode(params))
if datafilter is not None:
url = "{}&filter={}".format(url, datafilter)
response = requests.get(url)
reply = response.json()
for mapping in reply:
st = None
if mapping['confidence'] == confidence:
if st is None:
st = mapping['semanticTags']
else:
st.append(mapping['semanticTags'])
return st
if __name__ == '__main__':
main()
| true |
ab0b2c5b6e413fd9aab3550f2fea60346d49f39b | Python | 44taka/matching | /batch/app/util.py | UTF-8 | 664 | 3.0625 | 3 | [] | no_license | import datetime
import slackweb
import config
def now():
"""
現在時刻取得
:return: 現在時刻
"""
return datetime.datetime.now()
def notify_slack(message=None):
"""
slack通知処理
url: https://github.com/satoshi03/slack-python-webhook
:param message: 通知メッセージ
:return:
"""
if message is not None:
slack = slackweb.Slack(url=config.SLACK["url"])
slack.notify(text=message)
def num2alpha(num):
if num <= 26:
return chr(64 + num)
elif num % 26 == 0:
return num2alpha(num//26-1)+chr(90)
else:
return num2alpha(num//26)+chr(64 + num % 26)
| true |
e5a858d44367f6eff642b4726cac5636df5c2a7d | Python | ak-lucas/trabalho1-IA | /regressao_logistica_regularizado.py | UTF-8 | 3,758 | 3.359375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import csv
from scipy.special import expit
# Hyperparameters:
# -Lambda: fator de regularização
# -learning_rate: taxa de aprendizado
# -epochs: número de iterações
class RegularizedLogisticRegression():
def __init__(self):
self.theta_n = []
self.theta_0 = 0.
self.loss = []
self.train_error = []
self.val_error = []
def load_dataset(self, filename, header=True):
X = []
Y = []
with open(filename, 'r') as f:
reader = csv.reader(f)
if header:
next(reader)
for r in reader:
x = r[:-1]
X.append([float(a) for a in x])
Y.append(int(r[-1]))
X = np.array(X)
Y = np.array(Y)
return X, Y
def sigmoid(self, x):
#return (1/(1+np.exp(-x)))
return expit(x)
# inicializa os pesos aleatoriamente com amostras da distribuição normal
def init_weights(self, dim):
return np.random.randn(dim).reshape(dim,1)
# função de custo: cross-entropy
def loss_function(self, Y, sigmoid_z, Lambda, m):
# resolve o problema do log(0)
eps = 1e-15
sigmoid_z = np.clip(sigmoid_z, eps, 1 - eps)
loss = -np.sum(np.multiply(Y,np.log(sigmoid_z)) + np.multiply(1-Y,np.log(1-sigmoid_z)))/m + np.multiply(np.sum(np.power(self.theta_n,2)), Lambda)/(2*m)
return loss
def prints(self, epoch):
print "--epoca %s: " % epoch
print "loss: ", self.loss[epoch]
print "theta: ", self.theta_0.reshape(self.theta_0.shape[0]), self.theta_n.reshape(self.theta_n.shape[0])
def gradient_descent(self, epochs, X, Y, X_val, Y_val, Lambda, learning_rate, m, print_results):
self.train_error = []
self.val_error = []
for i in xrange(epochs):
# calcula Z
Z = np.dot(self.theta_n.T, X) + self.theta_0
# calcula gradientes
sigmoid_z = self.sigmoid(Z) #função de ativação
gZ = sigmoid_z - Y
gTheta_n = np.dot(X, gZ.T)/m
gTheta_0 = np.sum(gZ)/m
#calcula função de custo
loss = self.loss_function(Y, sigmoid_z, Lambda, m)
self.loss.append(loss)
# atualiza pesos
self.theta_0 -= learning_rate*gTheta_0
self.theta_n = self.theta_n*(1.-(float(learning_rate*Lambda)/m)) - learning_rate*gTheta_n
if print_results:
self.prints(i)
self.train_error.append(self.binary_error(X.T,Y))
self.val_error.append(self.binary_error(X_val,Y_val))
# calcula função de custo final
Z = np.dot(self.theta_n.T, X) + self.theta_0
# função de ativação
sigmoid_z = self.sigmoid(Z)
loss = self.loss_function(Y, sigmoid_z, Lambda, m)
self.train_error.append(self.binary_error(X.T,Y))
self.val_error.append(self.binary_error(X_val,Y_val))
self.loss.append(loss)
def fit(self, X, Y, X_val=[], Y_val=[], epochs=3, learning_rate=0.01, Lambda=0.001, print_results=False):
self.loss = []
# dimensão dos dados
m = X.shape[0]
n = X.shape[1]
# inicializa os pesos aleatoriamente
self.theta_n = self.init_weights(n)*0.01
self.theta_0 = self.init_weights(1)*0.01
X = X.T
Y = Y.reshape(1,m)
self.gradient_descent(epochs, X, Y, X_val, Y_val, float(Lambda), float(learning_rate), float(m), print_results)
return self
def accuracy_score(self, X, Y):
m = X.shape[0]
Y_pred = self.predict(X)
# número de exemplos menos o número de erros dividido pelo número de exemplos
accuracy = float(m - np.sum(np.logical_xor(Y_pred, Y)))/m
return accuracy
def binary_error(self, X, Y):
return 1 - self.accuracy_score(X,Y)
def predict(self, X):
X = X.T
Z = np.dot(self.theta_n.T, X) + self.theta_0
sigmoid_z = self.sigmoid(Z) #função de ativação
# verifica se cada predição é maior ou igual a 0.5 e atribui classe 0 ou 1
Y_predict = np.greater_equal(sigmoid_z, 0.5)
return Y_predict.astype(int).flatten()
| true |
4da74b20cf22a43ec59d27d80c68b455913a919a | Python | vaideheebhise/100DaysOfCode-1 | /Day026/Exercises/ex03.py | UTF-8 | 212 | 3.421875 | 3 | [] | no_license | with open('file1.txt') as file:
numbers1 = file.readlines()
with open('file2.txt') as file:
numbers2 = file.readlines()
result = [int(number) for number in numbers1 if number in numbers2]
print(result)
| true |
10520aa4e78499ffe3a9f35564982a28473d4550 | Python | yerassyldanay/leetcode | /pyland/solutions/word_and_pattern.py | UTF-8 | 1,096 | 3.359375 | 3 | [] | no_license | from typing import List
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
def convert(word: str) -> List[int]:
p = {}
result = []
for letter in word:
a = p.setdefault(letter, len(p))
result.append(a)
# print(p)
return result
pattern = convert(pattern)
# print(pattern)
for word in words:
tpattern = convert(word)
if tpattern == pattern:
print(tpattern, pattern)
yield word
class Solution2:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
pattern = self.create_pattern(pattern)
return [word for word in words if pattern == self.create_pattern(word)]
def create_pattern(self, word) -> list:
d = {}
a = [d.setdefault(letter, len(d)) for letter in word]
print(a)
return a
s = Solution2()
a = s.findAndReplacePattern(["mmmeqq", "qqqlrr", "aaaaajeeeee"], "aaabcc")
print(list(a))
| true |
2754bba0be4d841e9b71906a1f4af446eb6e05e8 | Python | PREMIEREHELL/Amulet-NBT | /amulet_nbt/amulet_nbt_py/nbt_types/numeric.py | UTF-8 | 4,329 | 2.65625 | 3 | [] | no_license | from __future__ import annotations
from abc import ABC
from typing import (
ClassVar,
BinaryIO,
Union,
)
from struct import Struct
import numpy as np
from amulet_nbt.amulet_nbt_py.const import SNBTType
from .value import BaseImmutableTag
class BaseNumericTag(BaseImmutableTag, ABC):
_value: np.number
_data_type: ClassVar = np.number
tag_format_be: ClassVar[Struct] = None
tag_format_le: ClassVar[Struct] = None
fstring: str = None
def __init__(
self, value: Union[int, float, np.number, BaseNumericTag, None] = None
):
super().__init__(value)
@classmethod
def load_from(cls, context: BinaryIO, little_endian: bool):
if little_endian:
data = context.read(cls.tag_format_le.size)
tag = cls(cls.tag_format_le.unpack_from(data)[0])
else:
data = context.read(cls.tag_format_be.size)
tag = cls(cls.tag_format_be.unpack_from(data)[0])
return tag
def write_value(self, buffer: BinaryIO, little_endian=False):
if little_endian:
buffer.write(self.tag_format_le.pack(self._value))
else:
buffer.write(self.tag_format_be.pack(self._value))
def _to_snbt(self) -> SNBTType:
return self.fstring.format(self._value)
def _to_python(self, value):
"""Convert numpy data types to their python equivalent."""
if isinstance(value, np.floating):
return float(value)
elif isinstance(value, np.integer):
return int(value)
elif isinstance(value, np.generic):
raise ValueError(f"Unexpected numpy type {type(value)}")
else:
return value
def __add__(self, other):
return self._to_python(self.value + other)
def __radd__(self, other):
return self._to_python(other + self.value)
def __iadd__(self, other):
return self.__class__(self + other)
def __sub__(self, other):
return self._to_python(self.value - other)
def __rsub__(self, other):
return self._to_python(other - self.value)
def __isub__(self, other):
return self.__class__(self - other)
def __mul__(self, other):
return self._to_python(self.value * other)
def __rmul__(self, other):
return self._to_python(other * self.value)
def __imul__(self, other):
return self.__class__(self * other)
def __truediv__(self, other):
return self._to_python(self.value / other)
def __rtruediv__(self, other):
return self._to_python(other / self.value)
def __itruediv__(self, other):
return self.__class__(self.__class__(self / other))
def __floordiv__(self, other):
return self._to_python(self.value // other)
def __rfloordiv__(self, other):
return self._to_python(other // self.value)
def __ifloordiv__(self, other):
return self.__class__(self // other)
def __mod__(self, other):
return self._to_python(self._value % other)
def __rmod__(self, other):
return self._to_python(other % self._value)
def __imod__(self, other):
return self.__class__(self % other)
def __divmod__(self, other):
return self._to_python(divmod(self.value, other))
def __rdivmod__(self, other):
return self._to_python(divmod(other, self.value))
def __pow__(self, other, modulo=None):
return self._to_python(self.value ** other)
def __rpow__(self, other):
return self._to_python(other ** self.value)
def __ipow__(self, other):
return self.__class__(self ** other)
def __neg__(self):
return self._to_python(self._value.__neg__())
def __pos__(self):
return self._to_python(self._value.__pos__())
def __abs__(self):
return self._to_python(self._value.__abs__())
def __int__(self):
return self._value.__int__()
def __float__(self):
return self._value.__float__()
def __round__(self, n=None):
return self._value.__round__(n)
def __trunc__(self):
return self._value.__trunc__()
def __floor__(self):
return self._value.__floor__()
def __ceil__(self):
return self._value.__ceil__()
def __bool__(self):
return self._value.__bool__()
| true |
fc05b1db7407cb11eead71ed6712b4207695c2c6 | Python | Leeyp/DHS-work | /practical 1/q5_upper_to_lower.py | UTF-8 | 167 | 3.546875 | 4 | [] | no_license | __author__ = 'dhs'
import string
uppercase = input("Write something in uppercase and I convert to lowercase for you")
lowercase = uppercase.lower()
print(lowercase) | true |
f364c32a98389661c64f81fafd1b9591d2a1e14e | Python | DmitryDruzhinin/pvo-lab | /exam/1.py | UTF-8 | 220 | 3.578125 | 4 | [] | no_license | import math
len = float(input("enter yarn length: "))
diam = float(input("enter yarn diameter: "))
vol = math.pi * math.pow(diam * 2, 2) * len
rad = math.sqrt(3 * vol / (4 * math.pi))
print("coil diameter: ", rad / 2) | true |
86539228573f570672bf141c39dd44fd41780a7c | Python | ceciliegl/LiquidCrystals | /plottingcodes/energycLL.py | UTF-8 | 894 | 2.75 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import os
import sys
def get_data(filename, variables):
df = pd.read_csv(filename,\
delim_whitespace=True, \
engine='python', \
names=variables)
return df
#using pandas to read the data files
data = get_data("../data3D.nosync" + "/test_final.txt", ["c", "L", "D", "b", "E", "EE"])
f = []
cLLD = []
maxb = np.max(data["b"])
for i in range(len(data["c"])):
if data["b"][i] == maxb:
f.append(data["E"][i])
cLLD.append(data["c"][i]*data["L"][i]**2)
cLLD = np.array(cLLD)
f = np.array(f)
indices = np.argsort(cLLD)
cLLD = cLLD[indices]
f = f[indices]
#plt.plot(1/cLL, cLL/np.pi)
#plt.plot(cLLD, cLLD*3*np.pi/16) #+ np.log(cLLD)
plt.plot(cLLD, f, 'o-')
plt.xlabel(r"$cL^2D$")
plt.ylabel(r"$f$")
plt.show()
| true |
533beaba56219bc50b3a97a24b1e6fc8f34745a1 | Python | Jeremy-Fang/LeetCode | /Prefix-sum/930better.py | UTF-8 | 546 | 2.625 | 3 | [] | no_license | class Solution:
def numSubarraysWithSum(self, A, S):
res = 0
ones = [-1] + [i for i in range(len(A)) if A[i] == 1] + [len(A)]
print(ones)
if S == 0:
for i in range(1, len(ones)):
z = ones[i] - ones[i-1] - 1
res += z*(z+1)/2
else:
for i in range(1, len(ones)-S):
l = ones[i] - ones[i-1] - 1
r = ones[i+S] - ones[i+S-1] - 1
res += (l+1) * (r+1)
return int(res)
| true |
a3e3ae7a1e4aa2bb2b6e975222622d4adc9789a4 | Python | ChloeAust/birds-project | /bird color filter.py | UTF-8 | 2,020 | 2.546875 | 3 | [] | no_license | birds = [
{'common_name': 'Grey Heron', 'latin_name': 'Ardea cinerea', 'size': 'large', 'main_colour': 'grey', 'secondary_colour_1': 'white', 'secondary_colour_2': 'black'},
{'common_name': 'Sparrowhawk', 'latin_name': 'Accipiter nisus', 'size': 'medium', 'main_colour': 'brown', 'secondary_colour_1': 'beige', 'secondary_colour_2': 'white'},
{'common_name': 'Ring-necked Parakeet', 'latin_name': 'Psittacula krameri', 'size': 'medium', 'main_colour': 'green', 'secondary_colour_1': 'orange', 'secondary_colour_2': 'none'},
{'common_name': 'Woodpigeon', 'latin_name': 'Columba palumbus', 'size': 'medium', 'main_colour': 'grey', 'secondary_colour_1': 'white', 'secondary_colour_2': 'black'},
{'common_name': 'Green Woodpecker', 'latin_name': 'Picus viridis', 'size': 'medium', 'main_colour': 'green', 'secondary_colour_1': 'red', 'secondary_colour_2': 'black'},
{'common_name': 'Wren', 'latin_name': 'Troglodytes troglodytes', 'size': 'very small', 'main_colour': 'brown', 'secondary_colour_1': 'white', 'secondary_colour_2': 'none'},
{'common_name': 'Robin', 'latin_name': 'Erithacus rubecula', 'size': 'small', 'main_colour': 'brown', 'secondary_colour_1': 'orange', 'secondary_colour_2': 'red'},
{'common_name': 'Moorhen', 'latin_name': 'Gallinula chloropus', 'size': 'medium', 'main_colour': 'black', 'secondary_colour_1': 'red', 'secondary_colour_2': 'white'},
{'common_name': 'Goldcrest', 'latin_name': 'Regulus regulus', 'size': 'very small', 'main_colour': 'brown', 'secondary_colour_1': 'yellow', 'secondary_colour_2': 'black'},
{'common_name': 'Blue Tit', 'latin_name': 'Cyanistes caeruleus', 'size': 'very small', 'main_colour': 'yellow', 'secondary_colour_1': 'blue', 'secondary_colour_2': 'white'},
]
app.run(debug=True)
colour = input('What is the main colour of the bird? ')
if colour == 'main_colour':
for bird in birds:
print(bird['common_name'])
print(bird['latin_name'])
print(bird['size'])
(k for k,v in for_patient_type.iteritems() if v == 'Real')
| true |
0af3db954cf1eb9e3798daad1ff0c40b32e21317 | Python | ctyfang/vision_algorithms_eth | /Exercise 3 - Simple Keypoint Tracker/code/selectKeypoints.py | UTF-8 | 934 | 2.71875 | 3 | [] | no_license | import numpy as np
from scipy.signal import convolve2d
from copy import deepcopy
def selectKeypoints(scores, num, r):
"""% Selects the num best scores as keypoints and performs non-maximum
% supression of a (2r + 1)*(2r + 1) box around the current maximum.
Return keypoints in (row, col) format """
scores = deepcopy(scores)
keypoints = []
for i in range(num):
max_idx = np.argpartition(scores, -2, axis=None)[-1]
row, col = np.unravel_index(max_idx, scores.shape)
check_val = scores[row, col]
keypoints.append([row, col])
# NMS
patch = scores[max(row-r,0):min(row+r+1,scores.shape[0]-1),
max(col-r,0):min(col+r+1,scores.shape[1]-1)]
scores[max(row - r, 0):min(row + r + 1, scores.shape[0] - 1),
max(col - r, 0):min(col + r + 1, scores.shape[1] - 1)] = np.zeros(patch.shape)
return np.asarray(keypoints)
| true |
e76dd2e20e4fce5a6461e061437db348e234d958 | Python | solofruad/traffic-accidents | /model/classes/DatabaseToCSV.py | UTF-8 | 2,921 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 16:39:16 2019
@author: hat
"""
import pandas as pd
from datetime import timedelta
import re
class DatabaseToCSV:
def __init__(self):
self.df = pd.DataFrame()
self.dir_save = ''
def get_cursor(self,collection, start, end):
if start == 0 and end == 0:
cursor = collection.find({}, no_cursor_timeout=True)
print("No date")
else:
cursor = collection.find({ 'created_at': {'$gte': start, '$lt': end} }, no_cursor_timeout=True)
print("Yes date")
return cursor
def build_df(self, collection, start, end):
cursor = self.get_cursor(collection, start,end)
self.df = pd.DataFrame(list(cursor))
def config_collection(self,dir_save):
self.dir_save = dir_save
def first_preprocessing(self,loader=0):
for i in range(0,len(self.df)):
date = self.df.iloc[i]['created_at'] - timedelta(hours=5)
self.df.at[i,'created_at'] = date.strftime("%Y-%m-%d %H:%M:%S")
self.df.at[i, 'text'] = re.sub("[\t\n\r]",'',self.df.iloc[i]['text'])
self.df.at[i, 'place_name'] = re.sub("[\t\n\r]",'',self.df.iloc[i]['place_name'])
if self.df.iloc[i]['user_location'] != None :
self.df.at[i, 'user_location'] = re.sub("[\t\n\r]",'',self.df.iloc[i]['user_location'])
#i = i +1 # This is only visualize the progress
if loader:
if i%loader == 0:
print(i)
def first_preprocessing_paralleling(self,batch_size,threadId,collection_size,loader=0):
init = batch_size * threadId
end = init + (batch_size)
end = end if end <= collection_size else collection_size
for i in range(init,end):
date = self.df.iloc[i]['created_at'] - timedelta(hours=5)
self.df.at[i,'created_at'] = date.strftime("%Y-%m-%d %H:%M:%S")
self.df.at[i, 'text'] = re.sub("[\t\n\r]",'',self.df.iloc[i]['text'])
self.df.at[i, 'place_name'] = re.sub("[\t\n\r]",'',self.df.iloc[i]['place_name'])
if self.df.iloc[i]['user_location'] != None :
self.df.at[i, 'user_location'] = re.sub("[\t\n\r]",'',self.df.iloc[i]['user_location'])
#i = i +1 # This is only visualize the progress
if loader:
if i%loader == 0:
print("%s : thread [%s]" %(i,threadId))
filename = self.dir_save + str(threadId)+".tsv"
self.df_to_csv(filename)
def df_to_csv(self,filename):
#filename = "data/database/server_token_user.tsv"
self.df.sort_values('created_at').to_csv(filename,sep='\t',index=False)
| true |
26fec854f79aeecc676112eeae9f9fbe7a2e2775 | Python | johnjdailey/DS-Unit-3-Sprint-2-SQL-and-Databases | /module3-nosql-and-document-oriented-databases/mongo_prep.py | UTF-8 | 2,309 | 2.859375 | 3 | [
"MIT"
] | permissive | # mongo_prep.py
import os
import pymongo
from dotenv import load_dotenv
from datetime import datetime
load_dotenv()
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority"
print("----------------")
print("URI:", connection_uri)
client = pymongo.MongoClient(connection_uri)
print("----------------")
print("CLIENT:", type(client), client)
db = client.test_database
print("----------------")
print("DB:", type(db), db)
print("----------------")
print("COLLECTIONS:")
print(db.list_collection_names())
collection = db.pokemon
print("----------------")
print("COLLECTION:", type(collection), collection)
print("----------------")
print("COLLECTIONS:")
print(db.list_collection_names())
print(collection.count_documents({}))
print(collection.count_documents({"name": "Pikachu"}))
#
# INSERTS
#
mewtwo = {
"name": "Mewtwo",
"level": 100,
"exp": 76000000000,
"hp": 450,
"strength": 550,
"intelligence": 450,
"dexterity": 300,
"wisdom": 575
}
pikachu = {
"name": "Pikachu",
"level": 30,
"exp": 76000000000,
"hp": 400,
}
blastoise = {
"name": "Blastoise",
"lvl": 70,
}
characters = [mewtwo, pikachu, blastoise]
print("INSERT ONE AT A TIME...")
for character in characters:
print(character["name"])
collection.insert_one(character)
print(collection.count_documents({}), "DOCS")
print(collection.count_documents({"level": {"$gte": 50}}), "ABOVE 50")
print(collection.count_documents({"name": "Pikachu"}))
pikas_cursor = collection.find({"name": "Pikachu"})
pikas = list(pikas_cursor)
print(len(pikas), "PIKAS")
print("INSERT MANY...")
db.things.insert_one({"thing":"one"})
db.things.insert_many([{"thing":"one"}, {"thing": "two"}])
print(db.things.count_documents({"thing": "one"}))
#try:
# collection.insert_many(characters)
#except Exception as err:
# print(err)
# print("...")
#
# for char in characters:
# char["caught_at"] = str(datetime.now())
# print(characters[0])
# collection.insert_many(characters)
#
#print(collection.count_documents({"name": "Pikachu"})) | true |
76f11d1dac89d88d624982134b7d696d492d7b20 | Python | HuipengXu/leetcode | /intToRoman.py | UTF-8 | 1,957 | 3.65625 | 4 | [] | no_license | # @Time : 2019/6/28 17:31
# @Author : Xu Huipeng
# @Blog : https://brycexxx.github.io/
class Solution:
def intToRoman(self, num: int) -> str:
res, p = [], 1
int_roman_map = {1: 'I', 4: 'IV', 5: 'V', 9: 'IX', 10: 'X',
40: 'XL', 50: 'L', 90: 'XC', 100: 'C', 400: 'CD',
500: 'D', 900: 'CM', 1000: 'M'}
while num > 0:
num, cur = divmod(num, 10 ** p)
num *= 10 ** p
if cur in int_roman_map:
res.append(int_roman_map[cur])
elif 1 <= cur < 4:
res.append(int_roman_map[1] * cur)
elif 5 < cur < 9:
res.append(int_roman_map[5] + int_roman_map[1] * (cur - 5))
elif 10 < cur < 40:
res.append(int_roman_map[10] * (cur // 10))
elif 50 < cur < 90:
res.append(int_roman_map[50] + int_roman_map[10] * ((cur - 50) // 10))
elif 100 < cur < 400:
res.append(int_roman_map[100] * (cur // 100))
elif 500 < cur < 900:
res.append(int_roman_map[500] + int_roman_map[100] * ((cur - 500) // 100))
else:
res.append(int_roman_map[1000] * (cur // 1000))
p += 1
return ''.join(res[i] for i in range(len(res) - 1, -1, -1))
def intToRoman1(self, num: int) -> str:
res = []
int_roman_map = {
1000: 'M',
900: 'CM',
500: 'D',
400: 'CD',
100: 'C',
90: 'XC',
50: 'L',
40: 'XL',
10: 'X',
9: 'IX',
5: 'V',
4: 'IV',
1: 'I'
}
for int_, roman in int_roman_map.items():
res.append(num // int_ * roman)
num %= int_
return ''.join(res)
if __name__ == '__main__':
s = Solution()
num = 2344
print(s.intToRoman1(num))
| true |
b485313ccebe4e62039cdc5cdbec2e2d0358ad00 | Python | sondongmin0419/study | /python/b_15650 N과M (2).py | UTF-8 | 233 | 2.71875 | 3 | [] | no_license | N, M = map(int, input().split())
def b_15650(n):
if len(li) == M:
print(*li)
if n>N:
return
for i in range(n,N+1):
li.append(i)
b_15650(i+1)
li.pop()
return
li = []
b_15650(1) | true |
1eda0ea0fa19ffd831251f332f86969a846d8bd6 | Python | dcsm8/Udemy-Python-Course | /9._Advanced_Built_in_Functions/class_119.py | UTF-8 | 211 | 3.9375 | 4 | [] | no_license | # Generators
def hundred_numbers():
i = 0
while i < 100:
yield i
i += 1
g = hundred_numbers()
print(next(g)) # 0
print(next(g)) # 1
print(next(g)) # 2
print(list(g)) # [3 - 99]
| true |
9f9139731c59918d4c30c1f4ff8152d2a1a88571 | Python | tfaieta/CoreML_test1 | /lists_arrays_tuples.py | UTF-8 | 587 | 4.09375 | 4 | [] | no_license | # Lists/Arrays
array = ['Tony', 'Faieta']
array[0] = 'Bob'
print (array[0] + " " + array[1])
groceryArray = ['eggs', 'milk', 'carton', 'juice', 'malk', 'chicken']
listSlice = groceryArray[3:5]
slice_up_to_three = groceryArray[:3]
slice_from_three_on = groceryArray [3:]
slice_second_from_last = groceryArray[-2]
print (listSlice)
print (slice_up_to_three)
print (slice_up_to_three)
print (slice_second_from_last)
# Tuples
(x_values, y_values) = ([1, 2, 3], [-1, -2, -3])
print ('PLOT THIS POINT: ', x_values[2], ", ", y_values[0])
tuple = (x_values, y_values)
print (tuple[0][1])
| true |
b06036dbc5505e82fda13c98ca409c7c443de360 | Python | cesium12/solver-tools | /solvertools/regex.py | UTF-8 | 11,281 | 3.109375 | 3 | [] | no_license | """
Wacky tools for slicing and dicing regexes.
"""
from sre_parse import parse, CATEGORIES, SPECIAL_CHARS, SubPattern
from sre_constants import MAXREPEAT
UNKNOWN = u'/.*/'
INVALID = u'#'
REVERSE_CATEGORIES = {}
for key, value in CATEGORIES.items():
REVERSE_CATEGORIES[str(value)] = key
def is_regex(text):
"""
In solvertools, regex inputs are represented as strings that begin and end
with slashes.
>>> is_regex(u'/.../')
True
>>> is_regex(u'...')
False
>>> is_regex(u'//')
True
>>> is_regex(u'/')
False
"""
if not isinstance(text, basestring):
return False
return len(text) >= 2 and text.startswith(u'/') and text.endswith(u'/')
def bare_regex(text):
"""
Removes the slash markers from a regex if it has them. The result will
work with standard regex functions but will not pass `is_regex`.
>>> bare_regex(u'/test/')
u'test'
>>> bare_regex(u'word')
u'word'
>>> bare_regex(u'http://')
u'http://'
"""
if is_regex(text):
return text[1:-1]
else:
return text
def regex_sequence(strings):
"""
Combine regexes or plain strings together in a sequence. This operation
undoes :func:`regex_pieces`.
>>> regex_sequence(['/foo|bar/', 'baz'])
u'/(foo|bar)baz/'
>>> regex_sequence(['a', 'b'])
u'ab'
"""
pattern = []
if any(is_regex(s) for s in strings):
for s in strings:
parsed = parse(bare_regex(s))
pattern.extend(_wrap_branches(parsed))
return u'/'+unparse(pattern)+u'/'
else:
return u''.join(strings)
def _wrap_branches(struct):
result = []
for op, data in struct:
if op == 'branch':
result.append( ('subpattern', (1, [(op, data)])) )
else:
result.append( (op, data) )
return result
def regex_len(regex):
"""
Returns a tuple of the minimum and maximum possible length string that
a regex will match. Returns MAXREPEAT (65535) if a match can be
very or infinitely long.
>>> regex_len(u'test')
(4, 4)
>>> regex_len(u'/t.st/')
(4, 4)
>>> regex_len(u'/.*/')
(0, 65535)
>>> regex_len(u'.*') # not treated as a regex
(2, 2)
>>> regex_len(u'/fo?o/')
(2, 3)
>>> regex_len(u'/mo{2,7}/')
(3, 8)
>>> regex_len(u'/(foo)+/')
(3, 65535)
>>> regex_len(u'/s?e?q?u?e?n?c?e?/')
(0, 8)
"""
if not is_regex(regex):
return len(regex), len(regex)
return _regex_len_pattern(parse(bare_regex(regex)))
def regex_pieces(regex):
"""
Separates a regex into independent pieces.
>>> regex_pieces('/[abc]de+/')
[u'/[abc]/', u'd', u'/e+/']
"""
if not is_regex(regex):
return list(regex)
result = []
for piece in parse(bare_regex(regex)):
if piece[0] == 'literal':
result.append(unparse([piece]))
else:
result.append('/'+unparse([piece])+'/')
return result
def _regex_len_pattern(pattern):
"Returns the minimum and maximum length of a parsed regex pattern."
lo = hi = 0
for op, data in pattern:
if op in ('literal', 'in', 'category', 'any'):
sub_lo = sub_hi = 1
elif op == 'subpattern':
sub_lo, sub_hi = _regex_len_pattern(data[1])
elif op == 'branch':
sub_lo, sub_hi = _regex_len_branch(data[1])
elif op == 'max_repeat':
sub_lo, sub_hi = _regex_len_repeat(data)
elif op == 'at':
sub_lo = sub_hi = 0
else:
raise ValueError("I don't know what to do with this regex: "
+ str(struct))
lo += sub_lo
hi += sub_hi
return lo, min(MAXREPEAT, hi)
def _regex_len_branch(branches):
"""
Returns the minimum and maximum length of a regex branch.
This does not take into account the fact that some lengths in between may
be impossible.
"""
lo = MAXREPEAT
hi = 0
for branch in branches:
sub_lo, sub_hi = _regex_len_pattern(branch)
lo = min(lo, sub_lo)
hi = max(hi, sub_hi)
return lo, hi
def _regex_len_repeat(data):
"""
Return the minimum and maximum length of a repeating expression.
"""
min_repeat, max_repeat, pattern = data
lo, hi = _regex_len_pattern(pattern)
return min_repeat * lo, min(MAXREPEAT, max_repeat * hi)
def round_trip(regex):
"""
Send a regex through the parser and unparser, possibly simplifying it.
"""
return unparse(parse(bare_regex(regex)))
def regex_index(regex, index):
"""
Index into a regex, returning a smaller regex of the things that match
in that position.
The index can be given as a string, in which case it will be converted
to an int. If the index is itself a regex, this will give up and return
the uninformative answer /.*/.
"""
if is_regex(index):
return UNKNOWN
elif isinstance(index, basestring):
try:
index = int(index)
except ValueError:
return INVALID
elif not is_regex(regex):
return regex[index]
choices = _regex_index_pattern(parse(bare_regex(regex)), index)
if len(choices) == 0:
# not exactly sure how this would happen
return INVALID
elif len(choices) == 1:
regex = unparse(choices[0])
if choices[0][0][0] == 'literal':
# We know our choices are length-1 regexes. If we have one choice,
# and its one character has the op of 'literal', we can just return
# the bare literal.
return regex
else:
return u'/%s/' % (regex,)
else:
regex = round_trip(unparse(('branch', (None, choices))))
return u'/%s/' % (regex,)
def _regex_index(struct, index):
if isinstance(struct, (list, SubPattern)):
return _regex_index_pattern(struct, index)
else:
opcode, data = struct
if opcode in ('literal', 'in', 'category', 'any'):
if index == 0:
return [[struct]]
else:
return []
elif opcode == 'subpattern':
return _regex_index_pattern(data[1], index)
elif opcode == 'branch':
return _regex_index_branch(data[1], index)
elif opcode == 'max_repeat':
return _regex_index_repeat(data, index)
else:
raise ValueError("I don't know what to do with this regex: "
+ str(struct))
def regex_slice(expr, start, end):
"""
Get a slice of a string, which may be an uncertain regex, by calling
regex_index on each index.
Note that this can return expressions that are overly general: for example,
it can mix characters from both branches of a regex. Being more specific
than that would take more work.
"""
if not is_regex(expr):
return expr[slice(start, end)]
if start < 0 or end < 0:
raise NotImplementedError("Can't take negative slices of a regex yet")
result = u''
nonliteral_found = False
for index in xrange(start, end):
choices = _regex_index_pattern(parse(bare_regex(expr)), index)
if choices == INVALID or len(choices) == 0:
return INVALID
elif len(choices) == 1:
regex = unparse(choices[0])
if choices[0][0][0] != 'literal':
nonliteral_found = True
result += regex
else:
regex = round_trip(unparse(('branch', (None, choices))))
if u'|' in regex:
result += u'(%s)' % (regex,)
else:
result += regex
if nonliteral_found:
return u'/%s/' % result
else:
return result
def _regex_index_branch(branches, index):
choices = []
for branch in branches:
choices.extend(_regex_index_pattern(branch, index))
return choices
def _regex_index_repeat(data, index):
min_repeat, max_repeat, pattern = data
lo, hi = _regex_len_pattern(pattern)
lo = max(lo, 1) # we don't care about things that take up 0 characters
max_relevant_repeat = min(index // lo + 1, max_repeat)
newpattern = list(pattern) * max_relevant_repeat
return _regex_index_pattern(newpattern, index)
def _regex_index_pattern(pattern, index):
if isinstance(index, slice):
# we might come up with a clever way to do this
raise NotImplementedError
if index < 0:
# This is an easier case that's still not done yet
raise NotImplementedError
lo_counter = hi_counter = 0
choices = []
for sub in pattern:
lo, hi = _regex_len_pattern([sub])
next_lo = lo_counter + lo
next_hi = hi_counter + hi
if index < lo_counter:
break
elif lo_counter <= index < next_hi:
for offset in xrange(lo_counter, hi_counter+1):
sub_index = index - offset
if sub_index >= 0:
choices.extend(_regex_index(sub, sub_index))
lo_counter, hi_counter = next_lo, next_hi
# if any of the choices is 'any', it overrules everything else.
for choice in choices:
# make sure our choices are single characters
assert len(choice) == 1
op, data = choice[0]
if op == 'any':
return [choice]
return choices
def unparse(struct):
if isinstance(struct, (list, SubPattern)):
return u''.join(unparse(x) for x in struct)
elif isinstance(struct, tuple):
opcode, data = struct
if str(struct) in REVERSE_CATEGORIES:
return REVERSE_CATEGORIES[str(struct)]
elif '_unparse_%s' % opcode in globals():
unparser = globals()['_unparse_%s' % opcode]
return unparser(data)
else:
raise ValueError("I don't know what to do with this regex: "
+ str(struct))
else:
raise TypeError("%s doesn't belong in a regex structure" % struct)
def _unparse_literal(data):
char = unichr(data)
if char in SPECIAL_CHARS:
return u'\\' + char
else:
return char
def _unparse_any(data):
return u'.'
def _unparse_range(data):
start, end = data
return unichr(start) + u'-' + unichr(end)
def _unparse_in(data):
return u'[' + unparse(data) + u']'
def _unparse_category(data):
fake_value = ('in', [('category', data)])
return REVERSE_CATEGORIES[data]
def _unparse_subpattern(data):
return u'(' + unparse(data[1]) + u')'
def _unparse_branch(data):
return u'|'.join(unparse(branch) for branch in data[1])
def _unparse_max_repeat(data):
lo, hi, value = data
if lo == 0 and hi == MAXREPEAT:
symbol = u'*'
elif lo == 0 and hi == 1:
symbol = u'?'
elif lo == 1 and hi == MAXREPEAT:
symbol = u'+'
else:
symbol = u'{%d,%d}' % (lo, hi)
return unparse(value) + symbol
def _unparse_at(data):
if data == 'at_beginning':
return u'^'
elif data == 'at_end':
return u'$'
else:
raise ValueError
| true |
86e90b2d56e654cf63fc4664cd0c5adcbd53f4ff | Python | derpston/fruitfly | /example/basic/mod_example.py | UTF-8 | 484 | 3.15625 | 3 | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """
A basic fruitfly module. Sends an event once per second, and receives the same event.
"""
import fruitfly
class example(fruitfly.Module):
def init(self):
self.onesecond()
@fruitfly.repeat(1)
def onesecond(self):
self.logger.info("Sending an event")
self.send_event("foo.omg", ['test'])
@fruitfly.event('foo.*')
def foohandler(self, event, payload):
self.logger.info("event handler for foo got %s, %s", event, repr(payload))
| true |
e68ab169d2f558a21805b4101d90cb85957b57c0 | Python | M5FGN/CodeClanWork | /week_01/day_02/conditionals/ternary_operator.py | UTF-8 | 155 | 3.25 | 3 | [] | no_license | score = 3
# if score > 5:
# result = 'pass'
# else:
# result = 'fail'
# print(result)
result = "pass" if score > 5 else "fail"
print(result) | true |
ee9c25e84912ee87cec5694516bae1da926b266b | Python | pynbody/pynbody | /pynbody/family.py | UTF-8 | 2,974 | 3.15625 | 3 | [] | no_license | """
family
======
This module defines the Family class which represents
families of particles (e.g. dm, gas, star particles).
New Family objects are automatically registered so that
snapshots can use them in the normal syntax (snap.dm,
snap.star, etc).
In practice the easiest way to make use of the flexibility
this module provides is through adding more families of
particles in your config.ini.
"""
import functools
import sys
from . import config_parser
_registry = []
def family_names(with_aliases=False):
"""Returns a list of the names of all particle families.
If with_aliases is True, include aliases in the list."""
global _registry
l = []
for o in _registry:
l.append(o.name)
if with_aliases:
for a in o.aliases:
l.append(a)
return l
def get_family(name, create=False):
"""Returns a family corresponding to the specified string. If the
family does not exist and create is False, raises ValueError. If
the family does not exist and create is True, an appropriate
object is instantiated, registered and returned."""
if isinstance(name, Family):
return name
name = name.lower()
# or should it check and raise rather than just convert?
# Not sure.
for n in _registry:
if n.name == name or name in n.aliases:
return n
if create:
return Family(name)
else:
raise ValueError(name +
" not a family") # is ValueError the right thing here?
class Family:
def __init__(self, name, aliases=[]):
if name != name.lower():
raise ValueError("Family names must be lower case")
if name in family_names(with_aliases=True):
raise ValueError("Family name " + name + " is not unique")
for a in aliases:
if a != a.lower():
raise ValueError("Aliases must be lower case")
self.name = name
self.aliases = aliases
_registry.append(self)
def __repr__(self):
return "<Family " + self.name + ">"
def __reduce__(self):
return get_family, (self.name, True), {"aliases": self.aliases}
def __iter__(self):
# Provided so a single family can be treated as a list of families
yield self
def __str__(self):
return self.name
def __cmp__(self, other):
# for python 2.x
return cmp(str(self), str(other))
def __eq__(self, other):
return str(self) == str(other)
def __lt__(self, other):
return str(self) < str(other)
def __hash__(self):
return hash(str(self))
if sys.version_info[0] >= 3:
Family = functools.total_ordering(Family)
# Instantiate the default families as specified
# by the configuration file
g = globals()
for f in config_parser.options('families'):
aliases = config_parser.get('families', f)
g[f] = Family(f, list(map(str.strip, aliases.split(","))))
| true |
76ffc779c2b8a5d2a5a1f23b50a6075770c1a988 | Python | Ritvik19/CodeBook | /data/Algorithms/Graph Adjacency List.py | UTF-8 | 2,343 | 3.671875 | 4 | [] | no_license | class Vertex():
def __init__(self, node):
self.id = node
self.adjacent = {}
self.distance = float("inf")
self.visited = False
self.previous = None
def addNeighbour(self, neighbour, weight=0):
self.adjacent[neighbour] = weight
def getConnections(self):
return self.adjacent.keys()
def getVertexID(self):
return self.id
def getWeight(self, neighbour):
return self.adjacent[neighbour]
def setDistance(self, dist):
self.distance = dist
def getDistance(self):
return self.distance
def setPrevious(self, prev):
self.previous = prev
def setVisited(self):
self.visited = True
def __str__(self) :
return f"{self.id}"
class Graph():
def __init__(self):
self.vertDictionary = {}
self.numVertices = 0
def __iter__(self):
return iter(self.vertDictionary.values())
def addVertex(self, node):
self.numVertices += 1
newVertex = Vertex(node)
self.vertDictionary[node] = newVertex
return newVertex
def getVertex(self, n):
if n in self.vertDictionary.keys():
return self.vertDictionary[n]
return None
def getVertices(self):
return self.vertDictionary.keys()
def addEdge(self, frm, to, cost=0):
if frm not in self.vertDictionary.keys():
self.addVertex(frm)
if to not in self.vertDictionary.keys():
self.addVertex(to)
self.vertDictionary[frm].addNeighbour(self.vertDictionary[to], cost)
self.vertDictionary[to].addNeighbour(self.vertDictionary[frm], cost)
def getEdges(self):
edges = []
for v in G:
vid = v.getVertexID()
for w in v.getConnections():
wid = w.getVertexID()
edges.append((vid, wid, v.getWeight(w)))
return edges
if __name__ == "__main__":
G = Graph()
for a in ['a', 'b', 'c', 'd', 'e']:
G.addVertex(a)
G.addEdge('a', 'b', 4)
G.addEdge('a', 'c', 1)
G.addEdge('c', 'b', 2)
G.addEdge('b', 'e', 4)
G.addEdge('c', 'd', 3)
G.addEdge('d', 'e', 5)
print('Edges:')
print(*G.getEdges(), sep="\n") | true |
0941a3ca53d0b08c36d0b0e06bb5c2d26aea80f2 | Python | PauliKarl/pktool | /tools/dataset/sdc/shipdet/hrsc/visBar.py | UTF-8 | 2,325 | 3.171875 | 3 | [] | no_license | #{'航母': 305, '军舰': 1403, '商船': 540, '船': 624, '潜艇': 104}
h = 305
j = 1403
s = 540
c = 624
q = 104
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"C:\Windows\Fonts\simhei.ttf", size=14)
plt.bar(1, 305, label='graph 1')
plt.bar(2, 1403, label='graph 2')
# params
# #{'航母': 305, '军舰': 1403, '商船': 540, '船': 624, '潜艇': 104}
# #(Aircraft carrier)、军舰(Warcraft)、商船(Merchant Ship)、潜艇(submarine)和其他(other ship)
# import matplotlib.pyplot as plt
# from matplotlib.font_manager import FontProperties
# font = FontProperties(fname=r"C:\Windows\Fonts\simhei.ttf", size=14)
# plt.bar(1, 305, label='Aircraft carrier')
# plt.bar(2, 1403, label='Warcraft')
# plt.bar(3, 540, label='Merchant Ship')
# plt.bar(4, 104, label='Submarine')
# plt.bar(5, 624, label='Other Ship')
# # params
# # x: 条形图x轴
# # y:条形图的高度
# # width:条形图的宽度 默认是0.8
# # bottom:条形底部的y坐标值 默认是0
# # align:center / edge 条形图是否以x轴坐标为中心点或者是以x轴坐标为边缘
# plt.legend()
# plt.xlabel('class')
# plt.ylabel('number')
# plt.title(u'HRSC舰船多类别统计图', FontProperties=font)
# plt.show()
#####xview
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"C:\Windows\Fonts\simhei.ttf", size=14)
#plt.bar(1, 305, label='Maritime Vessel')#船舶大类
plt.bar(1, 2835, label='Motorboat')
plt.bar(2, 1109, label='Sailboat')
plt.bar(3, 462, label='Tugboat')
plt.bar(4, 355, label='Barge')
plt.bar(5, 1751, label='Fishing Vessel')
plt.bar(6, 400, label='Ferry')
plt.bar(7, 917, label='Yacht')
plt.bar(8, 618, label='Container Ship')
plt.bar(9, 162, label='Oil Tanker')
# params
##{'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship','Oil Tanker'}
# x: 条形图x轴
# y:条形图的高度
# width:条形图的宽度 默认是0.8
# bottom:条形底部的y坐标值 默认是0
# align:center / edge 条形图是否以x轴坐标为中心点或者是以x轴坐标为边缘
plt.legend()
plt.xlabel('number')
plt.ylabel('value')
plt.title(u'测试例子——条形图', FontProperties=font)
plt.show()
| true |
4b7716c9e80bbf48db092c72d2efde78a9ba1ba7 | Python | zubairabid/WikiSearchEngine-Mini | /20171076/wikitextparser.py | UTF-8 | 1,338 | 3.0625 | 3 | [] | no_license | import re
citematch = re.compile(r'{{[Cc]ite(.+?)}}')
def isComment(line):
return line.startswith('<!--')
def getCategory(line):
PREFIX_LENGTH = 11 # length of [[Category:
return line[PREFIX_LENGTH:-2] + ' '
def getCitations(line):
# if any <ref> in line, extract
# if any <cite> in line, extract
# for each reference:
#
allrefs = citematch.findall(line)
cites = ''
for ref in allrefs:
citesent = ''
splits = ref.split('|')
for split in splits:
word = split
if '=' in split:
word = split[split.find('=')+1:]
word = word.strip()
citesent += word + ' '
cites += citesent + ' '
return cites
def getInfobox(line):
# Each infobox either starts with a |, }, or {. All else can be ignored.
# If |, the line after '=' should be taken
# If {, the line after box should be taken
# If }, ignore
returnval = ''
if line.startswith('|'):
returnval = line[line.find('=')+1:].strip() + ' '
elif line.startswith('{'):
returnval = line[line.find(' '):].strip() + ' '
else:
pass
return returnval
def getLinks(line):
if not line.startswith('=='):
return line + ' '
else:
return ''
def getPlaintext(line):
return line
| true |
f60a34ae5279501eb9338e241d87ed74c124bb82 | Python | Dinesh-Sivanandam/Data-Structures | /Merge Sort/mergesort-implementation.py | UTF-8 | 1,797 | 4.625 | 5 | [] | no_license | #function for merge sort
"""
we are using recursion for seperating the left and right values in the arrays for sorting
after seperating leeft and right values we are merging the two sorted list
"""
def merge_sort(arr):
#if len is less than or equal to 1 just returning the array
if len(arr) <= 1:
return
#else we are taking the mid value and creating the left and right array
mid = len(arr)//2
left = arr[:mid]
right = arr[mid:]
#calling the function for sorting the left and right
merge_sort(left)
merge_sort(right)
#calling the function for merging the two sorted arrays
merge_two_sorted_lists(left, right, arr)
#function for merging two sorted list
def merge_two_sorted_lists(a,b,arr):
len_a = len(a)
len_b = len(b)
i = j = k = 0
"""
if the i and j less than len_a and len_b
we are comparing the values in the two arrays
if less we are making the value to the index from starting and incrementing the index after placing
else placing the other element in other array
"""
while i < len_a and j < len_b:
if a[i] <= b[j]:
arr[k] = a[i]
i+=1
else:
arr[k] = b[j]
j+=1
k+=1
#after placing the array if there are any other elements left placing the values
while i < len_a:
arr[k] = a[i]
i+=1
k+=1
while j < len_b:
arr[k] = b[j]
j+=1
k+=1
if __name__ == '__main__':
test_cases = [
[10, 3, 15, 7, 8, 23, 98, 29],
[],
[3],
[9,8,7,2],
[1,2,3,4,5]
]
for arr in test_cases:
merge_sort(arr)
print(arr)
| true |
48a969f9d715f9854c5831922f190bbfdc2c3ade | Python | AlexisGfly/python_stack | /python/fundamentals/For_Loop_Basic_II.py | UTF-8 | 4,099 | 4.40625 | 4 | [] | no_license | # For Loop Basic II
# 1. Tamaño grande: dada una lista, escriba una función que cambie
# todos los números positivos de la lista a "big".
# Ejemplo: biggie_size ([- 1, 3, 5, -5]) devuelve la misma lista,
# pero cuyos valores son ahora [-1, "big", "big", -5]
def biggie_size(a):
for i in range(len(a)):
if a[i] > 0:
a[i]="big"
return(a)
print(biggie_size ([- 1, 3, 5, -5]))
# 2. Contar positivos : dada una lista de números, cree una función
# para reemplazar el último valor con el número de valores positivos.
# (Tenga en cuenta que cero no se considera un número positivo).
# Ejemplo: count_positives([- 1, 1, 1,1 ]) cambia la lista original
# a [-1, 1, 1, 3] y la devuelve
# Ejemplo: count_positives([1, 6, -4, -2, -7, -2]) cambia la lista
# a [1, 6, -4, -2, -7, 2] y la devuelve
def count_positives(a):
num_pos=0
for i in range(len(a)):
if a[i] > 0:
num_pos = num_pos+1
a[-1] = num_pos
return(a)
print(count_positives([- 1, 1, 1,1 ]))
print(count_positives([1, 6, -4, -2, -7, -2]))
# 3. Suma total : crea una función que toma una lista y devuelve la
# suma de todos los valores de la matriz.
# Ejemplo: sum_total ([1,2,3,4]) debería devolver 10
# Ejemplo: sum_total ([6,3, -2]) debería devolver 7
def sum_total(a):
sum=0
for i in range(len(a)):
sum = sum + a[i]
return(sum)
print(sum_total ([1,2,3,4]))
print(sum_total ([6,3, -2]))
# 4. Promedio : crea una función que toma una lista y devuelve el
# promedio de todos los valores.
# Ejemplo: el promedio ([1,2,3,4]) debería devolver 2.5
def promedio(a):
sum=0
for i in range(len(a)):
sum = sum + a[i]
return(sum / len(a))
print(promedio ([1,2,3,4]))
# 5. Longitud : crea una función que toma una lista y devuelve la
# longitud de la lista.
# Ejemplo: la longitud ([37,2,1, -9]) debería devolver 4
# Ejemplo: longitud ([]) debería devolver 0
def longitud(a):
return(len(a))
print(longitud ([37,2,1, -9]))
print(longitud ([]))
# 6. Mínimo : crea una función que tome una lista de números y
# devuelva el valor mínimo en la lista. Si la lista está vacía,
# haga que la función devuelva False.
# Ejemplo: mínimo ([37,2,1, -9]) debería devolver -9
# Ejemplo: mínimo ([]) debería devolver False
def minimo(a):
if len(a) == 0:
return(False)
else:
min = a[0]
for i in range(len(a)):
if min > a[i]:
min = a[i]
return(min)
print(minimo ([37,2,1, -9]))
print(minimo ([]))
# 7. Máximo : crea una función que toma una lista y devuelve el
# valor máximo en la matriz. Si la lista está vacía, haga que la
# función devuelva False.
# Ejemplo: máximo ([37,2,1, -9]) debería devolver 37
# Ejemplo: máximo ([]) debería devolver False
def maximo(a):
if len(a) == 0:
return(False)
else:
max = a[0]
for i in range(len(a)):
if max < a[i]:
max = a[i]
return(max)
print(maximo ([37,2,1, -9]))
print(maximo ([]))
# 8. Análisis final : crea una función que tome una lista y devuelva
# un diccionario que tenga la suma total, promedio, mínimo, máximo
# y longitud de la lista.
# Ejemplo: ultimate_analysis ([37,2,1, -9]) debería devolver
# {'total': 31, 'promedio': 7.75, 'minimo': -9, 'maximo': 37, 'longitud': 4}
def ultimate_analysis(a):
sum = 0
max = min = a[0]
for i in range(len(a)):
sum = sum + a[i]
if max < a[i]:
max = a[i]
if min > a[i]:
min = a[i]
prom = sum / len(a)
return({'total':sum,'promedio':prom,'minimo':min,'maximo':max,'longitud':len(a)})
print(ultimate_analysis ([37,2,1, -9]))
# 9. Lista inversa : crea una función que tome una lista y la devuelva con
# los valores invertidos. Haz esto sin crear una segunda lista.
# (Se sabe que este desafío aparece durante las entrevistas técnicas básicas).
# Ejemplo: reverse_list ([37,2,1, -9]) debería devolver [-9,1,2,37]
def reverse_list(a):
return(a[::-1])
print(reverse_list ([37,2,1, -9]))
| true |
ca6a89dc1a96bd4fe1e9edf819a927f36b3e7b20 | Python | JMU-CS354-S19/numpy-sentry-robo-boiz-1 | /scripts/sentry.py | UTF-8 | 1,813 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
"""
SentryBot lets us know if an intruder walks past.
Author:
Version:
"""
import rospy
from kobuki_msgs.msg import Sound
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
class SentryNode(object):
"""Monitor a vertical scan through the depth map and create an
audible signal if the change exceeds a threshold.
Subscribes:
/camera/depth_registered/image
Publishes:
/mobile_base/commands/sound
"""
def __init__(self):
""" Set up the Sentry node. """
rospy.init_node('sentry')
self.cv_bridge = CvBridge()
rospy.Subscriber('/camera/depth_registered/image',
Image, self.depth_callback, queue_size=1)
self.c = None
self.p = None
self.avg = 1
self.alpha = .7
self.threshold = 2
rospy.spin()
def depth_callback(self, depth_msg):
""" Handle depth callbacks. """
# Convert the depth message to a numpy array
depth = self.cv_bridge.imgmsg_to_cv2(depth_msg)
x, y = depth.shape
if self.c is not None:
self.p = self.c
self.c = depth[:, x/2] # extract central column
if self.p is not None and self.c is not None:
diff_arr = self.c - self.p
d = np.nansum(np.absolute(diff_arr))
self.avg = self.avg * self.alpha + d * (1-self.alpha)
#rospy.loginfo(d/self.avg)
if d/self.avg > self.threshold:
pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=10)
sound = Sound(0)
pub.publish(sound)
if __name__ == "__main__":
SentryNode()
| true |
a2ff05ada6378df0f9c30ea9de83eddff2dce84d | Python | ervanalb/cesspool | /cesspool/downloadmanager.py | UTF-8 | 419 | 2.546875 | 3 | [] | no_license | class DownloadManager:
def __init__(self,downloaders=[]):
self.downloaders=downloaders
self.uid=0
def get_uid(self):
u=self.uid
self.uid+=1
return u
def instantiate(self,pool,type,args):
if 'pool' in args or 'uid' in args:
raise Exception('arg list cannot contain pool or uid')
uid=self.get_uid()
return dict([(dlr.TYPE_STRING,dlr) for dlr in self.downloaders])[type].instantiate(pool,uid,args)
| true |
417e0cee7e5df2b55e9cac2e306d2fe95058ae06 | Python | dborowy/pp1 | /10-SoftwareTesting/probne2.py | UTF-8 | 220 | 3.109375 | 3 | [] | no_license | class Miasto:
def __init__(self,name,population):
self.name = name
self.population = population
def __str__(self):
return f'{self.name} posiada populację o wielkości {self.population}'
| true |
3e5b8bf6bfcb1707b7fcdbb49aff9713601cfe9c | Python | glorysdj/BigDL | /python/chronos/src/bigdl/chronos/metric/forecast_metrics.py | UTF-8 | 4,048 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from numpy import ndarray
from functools import partial
from torchmetrics.functional import mean_squared_error, mean_absolute_error,\
mean_absolute_percentage_error, r2_score, symmetric_mean_absolute_percentage_error
TORCHMETRICS_REGRESSION_MAP = {
'mae': mean_absolute_error,
'mse': mean_squared_error,
'rmse': partial(mean_squared_error, squared=False),
'mape': mean_absolute_percentage_error,
'smape': symmetric_mean_absolute_percentage_error,
'r2': r2_score,
}
def _standard_input(metrics, y_true, y_pred):
"""
Standardize input functions. Format metrics,
check the ndim of y_pred and y_true,
converting 1-3 dim y_true and y_pred to 2 dim.
"""
if isinstance(metrics, str):
metrics = [metrics]
metrics = list(map(lambda x: x.lower(), metrics))
assert all(metric in TORCHMETRICS_REGRESSION_MAP.keys() for metric in metrics),\
f"metric should be one of {TORCHMETRICS_REGRESSION_MAP.keys()}, "\
f"but get {metrics}."
assert type(y_true) is type(y_pred) and isinstance(y_pred, ndarray),\
"y_pred and y_true type must be numpy.ndarray, "\
f"but found {type(y_pred)} and {type(y_true)}."
y_true, y_pred = torch.from_numpy(y_true), torch.from_numpy(y_pred)
assert y_true.shape == y_pred.shape,\
"y_true and y_pred should have the same shape, "\
f"but get {y_true.shape} and {y_pred.shape}."
if y_true.ndim == 1:
y_true = y_true.reshape(-1, 1)
y_pred = y_pred.reshape(-1, 1)
original_shape = y_true.shape
elif y_true.ndim == 3:
original_shape = y_true.shape
y_true = y_true.reshape(y_true.shape[0], y_true.shape[1]*y_true.shape[2])
y_pred = y_pred.reshape(y_pred.shape[0], y_pred.shape[1]*y_pred.shape[2])
else:
original_shape = y_true.shape
return metrics, y_true, y_pred, original_shape
class Evaluator(object):
"""
Evaluate metrics for y_true and y_pred.
"""
@staticmethod
def evaluate(metrics, y_true, y_pred, aggregate='mean'):
"""
Evaluate a specific metrics for y_true and y_pred.
:param metrics: String or list in ['mae', 'mse', 'rmse', 'r2', 'mape', 'smape']
:param y_true: Array-like of shape = (n_samples, \*). Ground truth (correct) target values.
:param y_pred: Array-like of shape = (n_samples, \*). Estimated target values.
:param aggregate: aggregation method. Currently, "mean" and None are supported,
'mean' represents aggregating by mean, while None will return the element-wise
result. The value defaults to 'mean'.
:return: Float or ndarray of floats.
A floating point value, or an
array of floating point values, one for each individual target.
"""
metrics, y_true, y_pred, original_shape = _standard_input(metrics, y_true, y_pred)
res_list = []
for metric in metrics:
if len(original_shape) in [2, 3] and aggregate is None:
res = torch.zeros(y_true.shape[-1])
for i in range(y_true.shape[-1]):
res[i] = TORCHMETRICS_REGRESSION_MAP[metric](y_pred[..., i], y_true[..., i])
res = res.reshape(original_shape[1:])
else:
res = TORCHMETRICS_REGRESSION_MAP[metric](y_pred, y_true)
res_list.append(res.numpy())
return res_list
| true |
a3823a0644e6be45d63b0309f0a494461e74bd0b | Python | kuzovkov/geoservice_python | /doc/OSM/start_filter_nodes.py | UTF-8 | 1,149 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
#coding=utf-8
help1="""
Скрипт для для запуска скриптов проверки связности
графа дорожной сети преставленного как база SQLite.
Принимает имя каталога с файлами баз данных
"""
import os
import sys
import getopt
need_update = False
try:
optlist, args = getopt.getopt(sys.argv[1:],'sud:')
dirname = filter(lambda item: item[0]=='-d',optlist)[0][1]
except:
print 'Usage: %s [-s] [-u] -d <dir_name> ' % sys.argv[0]
exit(1)
if '-s' not in map(lambda item: item[0],optlist):
print help1
if '-u' in map(lambda item: item[0],optlist):
need_update = True
count = 0
print 'Processing file in %s...' % dirname
sqlite_files = os.listdir(dirname)
print 'Found '+str(len(sqlite_files))+' files:'
for filename in sqlite_files:
count += 1
print 'Processing file %s (%d / %d)' % (filename,count,len(sqlite_files))
if need_update:
command = './filter_nodes.py -s -u -f ' + dirname+'/'+filename
else:
command = './filter_nodes.py -s -f ' + dirname+'/'+filename
os.system(command)
#print command | true |
69919780a7dcb50f0223e64b021f46b7b1071c75 | Python | sudheesha93/Python_InClassExercise | /ICE3/Source/sortlist.py | UTF-8 | 134 | 3.28125 | 3 | [] | no_license | items=input("enter the words seperated by commas")
words=[word for word in items.split(",")]
print(",".join(sorted(list(set(words))))) | true |
3e80f9e0b3662fec5d93fa6a653e0779a89b9479 | Python | antares681/Python | /PythonFundamentals/REAL MIDEXAM RETAKE/Problem_3_x.py | UTF-8 | 1,199 | 3.46875 | 3 | [] | no_license | #SOLUTION 2
neighbrhd = [int(house) for house in input().split('@')]
cur_loc_idx = 0
neighbrhd_size = len(neighbrhd)
while True:
command = input().split(" ")
if command[0] == "Love!":
break
if command[0] == "Jump":
jump_length = int(command[1])
cur_loc_idx += jump_length
if 0 <= cur_loc_idx < neighbrhd_size:
if neighbrhd[cur_loc_idx] > 0:
neighbrhd[cur_loc_idx] -= 2
if neighbrhd[cur_loc_idx] == 0:
print(f"Place {cur_loc_idx} has Valentine's day.")
else:
print(f"Place {cur_loc_idx} already had Valentine's day.")
else:
cur_loc_idx = 0
if neighbrhd[cur_loc_idx] > 0:
neighbrhd[cur_loc_idx] -= 2
if neighbrhd[cur_loc_idx] == 0:
print(f"Place {cur_loc_idx} has Valentine's day.")
else:
print(f"Place {cur_loc_idx} already had Valentine's day.")
print(f"Cupid's last position was {cur_loc_idx}.")
if not sum(neighbrhd) == 0:
print(f'Cupid has failed {neighbrhd_size - neighbrhd.count(0)} places.')
else:
print(f'Mission was successful.') | true |
1c12a638df8502a025d96ea2fd65fedd390de788 | Python | srafanz/loto | /tiragesExistants.py | UTF-8 | 3,491 | 3.03125 | 3 | [] | no_license | # coding=UTF-8
from fonction import *
import os
###########################
## DEFINITIONS GENERALES ##
###########################
# Définition des différentes grilles
grilleChaud = []
grilleFroid = []
grilleHasard = []
grilleConstante = []
grilleFinale = []
# Définition des différents histogramme
histoBoules = CREATION_histoBoules()
histoBoulesTrie = []
histoGainsChaud = CREATION_histoGains()
histoGainsFroid = CREATION_histoGains()
histoGainsHasard = CREATION_histoGains()
histoGainsConstante = CREATION_histoGains()
gainsChauds = 0
gainsFroids = 0
gainsHasards = 0
gainsConstants = 0
coutTotal = 0
date = ""
# On récupère tous les tirages déjà effectués
listeTirage = CREATION_donneesDepuisFichier(NOM_FICHIER_TIRAGES)
positionTirage = 0
##################################
##################################
### AVEC TIRAGE DEJA EFFECTUES ###
##################################
##################################
# On ne joue pas le premier coup pour avoir des données
grilleFinale = listeTirage[positionTirage][0]
histoBoules = MAJ_histoBoules(grilleFinale, histoBoules)
grilleConstante = CREATION_grilleHasard() # On génère la grille constante
positionTirage += 1
while(positionTirage < len(listeTirage)):
histoBoulesTrie = TRIE_histoBoules(histoBoules)
grilleChaud, grilleFroid = CREATION_grillesChaudFroid(histoBoulesTrie)
grilleHasard = CREATION_grilleHasard()
grilleFinale = listeTirage[positionTirage][0]
date = listeTirage[positionTirage][1]
histoGainsChaud, gainsChauds = MAJ_histoGain_GAIN(
grilleChaud, grilleFinale, histoGainsChaud, gainsChauds)
histoGainsFroid, gainsFroids = MAJ_histoGain_GAIN(
grilleFroid, grilleFinale, histoGainsFroid, gainsFroids)
histoGainsHasard, gainsHasards = MAJ_histoGain_GAIN(
grilleHasard, grilleFinale, histoGainsHasard, gainsHasards)
histoGainsConstante, gainsConstants = MAJ_histoGain_GAIN(
grilleConstante, grilleFinale, histoGainsConstante, gainsConstants)
coutTotal += MISE
# os.system("clear")
# print("## Tirage du {} ##".format(date))
# AFFICHE_grille(grilleFinale)
# print("\n\n# Grille Chaude")
# AFFICHE_grille(grilleChaud)
# print("Gain : {} €".format(round(gainsChauds-coutTotal, 1)))
# print("\n# Grille Froide")
# AFFICHE_grille(grilleFroid)
# print("Gain : {} €".format(round(gainsFroids-coutTotal, 1)))
# print("\n# Grille Constantes")
# AFFICHE_grille(grilleConstante)
# print("Gain : {} €".format(round(gainsConstants-coutTotal, 1)))
# print("\n# Grille Aléatoire")
# AFFICHE_grille(grilleHasard)
# print("Gain : {} €".format(round(gainsHasards-coutTotal, 1)))
histoBoules = MAJ_histoBoules(grilleFinale, histoBoules)
positionTirage += 1
os.system("clear")
print("## Simulation avec tirages existants ##")
print("{} grilles de jouées pour un total de {}€\n".format(
len(listeTirage)-1, coutTotal))
print("\tChaud -> {}€ ".format(round(gainsChauds-coutTotal, 1)))
AFFICHE_histogrammeGains(histoGainsChaud, debut="\t\t")
print("\n\tFroid -> {}€ ".format(round(gainsFroids-coutTotal, 1)))
AFFICHE_histogrammeGains(histoGainsFroid, debut="\t\t")
print("\n\tHasard -> {}€ ".format(round(gainsHasards-coutTotal, 1)))
AFFICHE_histogrammeGains(histoGainsHasard, debut="\t\t")
print("\n\tConstant -> {}€ ".format(round(gainsConstants-coutTotal, 1)))
AFFICHE_histogrammeGains(histoGainsConstante, debut="\t\t")
| true |
4eeab421a31099a831a438943c05d8a24a613333 | Python | alexfoglia1/PhysicsSimulations | /rocket/rocket.py | UTF-8 | 2,310 | 2.8125 | 3 | [] | no_license | from time import time, sleep
from math import log
SCREEN_WIDTH = 1200
import sys
if sys.version_info > (3, 0):
from tkinter import *
import tkinter.ttk as ttk
else:
from Tkinter import *
import ttk
from threading import Thread
callback_args = []
def mthread(positions, speeds, accs, env, dt_s):
y0 = 150
i = 0
for x in positions:
env.delete("rocket")
x0_screen = (x + 20) % SCREEN_WIDTH
xf_screen = x0_screen + 20
env.create_line(x0_screen, y0, xf_screen, y0, tag="rocket", fill="blue", width=5)
w.create_text(100, 10, text="Speed {} m/s".format(round(speeds[i],4)), tag="rocket", fill="black", font=("Courier", 14))
w.create_text(400, 10, text="Acc {} m/s^2".format(round(accs[i],4)), tag="rocket", fill="black", font=("Courier", 14))
#print("{}".format(x))
sleep(dt_s)
i += 1
def keydown(e):
positions = callback_args[0]
speeds = callback_args[1]
accs = callback_args[2]
env = callback_args[3]
dt_s = callback_args[4]
t = Thread(target=mthread, args=[positions, speeds, accs, env, dt_s])
t.daemon = True
t.start()
m_mec_kg = 1e4
m_c_kg = 1e3
u_kms = 1
dt_s = 0.01
dm_dt = 10
vf = lambda vi,u,mi,mf : vi + u*log(mi/mf)
vi_ms = 200
u_ms = u_kms * 1000
mi_kg = m_mec_kg + m_c_kg
vmax = vf(vi_ms, u_ms, mi_kg, m_mec_kg)
actv_ms = vi_ms
actm_kg = mi_kg
act_ref = actm_kg - m_mec_kg
act_t_s = 0
speeds = [vi_ms]
accs = [0]
while act_ref > 0:
nextm_kg = actm_kg - (dm_dt * dt_s)
nextv_ms = vf(actv_ms, u_ms, actm_kg, nextm_kg)
speeds.append(nextv_ms)
accs.append((nextv_ms - actv_ms) / dt_s)
actm_kg = nextm_kg
actv_ms = nextv_ms
act_ref = actm_kg - m_mec_kg
act_t_s += dt_s
positions_m = [0]
for v_ms in speeds:
next_pos = positions_m[-1] + dt_s * v_ms
positions_m.append(next_pos)
master = Tk()
master.title("Rocket Motion")
master.geometry("{}x{}".format(SCREEN_WIDTH,400))
w = Canvas(master, width=SCREEN_WIDTH, height=300)
master.bind("<Return>", keydown)
w.place(x=0, y=0)
w.create_line(0, 150, 20, 150, tag="rocket", fill="blue", width=5)
callback_args.append(positions_m)
callback_args.append(speeds)
callback_args.append(accs)
callback_args.append(w)
callback_args.append(dt_s)
master.mainloop()
| true |
311a9e92d00635fe858ac1e4779b374182afcd24 | Python | Minitour/file_printer | /main.py | UTF-8 | 3,060 | 3.171875 | 3 | [
"MIT"
] | permissive | from six.moves import urllib
from subprocess import Popen
import os
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# A test pdf to test printer
print_sample_url = 'http://unec.edu.az/application/uploads/2014/12/pdf-sample.pdf'
print_sample_text = 'The quick brown fox jumps over the lazy dog.'
# The supported formats
formats = ['pdf',
'txt',
'csv']
def print_file(file, remove):
"""
Prints a file at a certain path.
:param file: The path of the file.
:param remove: Should the file be deleted after printing.
"""
# call the system's lpr command
p = Popen(["lpr -#1 " + file], shell=True)
output = p.communicate()[0]
print('Console Output: ')
print(output)
if remove:
# delete file
os.remove(file)
def get_file(path_or_url, type_, is_test):
"""
:param path_or_url: The path of the file or the url.
:param type_: The file format.
:return: tuple containing the path to file and a boolean value
indicating if the file should be deleted or not.
"""
if path_or_url.startswith('http'):
if connection_test(path_or_url):
file = 'temp.' + type_
urllib.request.urlretrieve(path_or_url, file)
return file, True
elif is_test:
return make_def_file(), True
else:
return None, False
else:
return os.path.abspath(path_or_url), False
def validate_file(file):
"""
Checks if a given file is of a certain format and returns true if it is and which format it is.
:param file: The file which we are validating.
:return: isValid (boolean), format (string)
"""
for format_ in formats:
if file.lower().endswith('.' + format_):
return True, format_
return False, None
def connection_test(url):
"""
Check if we can reach a url
:param url: The url to test
:return: True if successfully connected to the url.
"""
try:
urllib.urlopen(url, timeout=1)
return True
except urllib.URLError as err:
return False
def make_def_file():
name = 'temp.txt'
file = open(name, 'w')
file.write(print_sample_text)
file.close()
return name
def main():
is_test = False
# check if there was an argument passed down
if len(sys.argv) >= 2:
arg = sys.argv[1]
if arg is 'test':
url_to_pdf = print_sample_url
is_test = True
else:
url_to_pdf = arg
# else do nothing
else:
url_to_pdf = ""
print('Nothing to print!')
sys.exit(0)
# validate file format
validation = validate_file(url_to_pdf)
if validation[0]:
data = get_file(url_to_pdf, validation[1], is_test)
if data[0] is not None:
print_file(data[0], data[1])
else:
print('Failed to connect to ' + url_to_pdf)
else:
print('Invalid file!')
if __name__ == '__main__':
main()
| true |
62b39ff6149c1f2eb9525f538608586c09997c0f | Python | Nmazil-Dev/Learning | /eels.py | UTF-8 | 2,174 | 4.34375 | 4 | [] | no_license | # This is a practice exercise for Codecademy on the Rock Paper Scissor Module
# this imports randomint from random
from random import randint
#This will define the options in a key for the computer and user to select [] for list
options = ["EELS", "ESCALATORS"]
# This index will provide the program with the different messages it will tell the user {} for index remember a comma after the end of each string
# Remember to close the curly braces at the end of an index
import time
messages = {
"down": "Looks like you got %s!" % options[0].lower() + """
Down
Down
Down!""",
"up": "Looks like you got %s!" % options[1].lower() + """
Up
UP
AND AWAY!""",
"down1": "Looks like the computer got %s!" % options[0].lower() + """
Down
Down
Down!""",
"up1": "Looks like the computer got %s!" % options[1].lower() + """
Up
UP
AND AWAY!""",
}
# This will define a function that will tell what you got vs the computer
def choice(user_choice, computer_choice):
print "You chose %s!" % user_choice.lower()
if user_choice == options[1]:
print messages["up"]
time.sleep(1)
print
else:
print messages["down"]
time.sleep(1)
print
print "The computer chose %s" % computer_choice.lower()
if computer_choice == options[1]:
print messages["up1"]
time.sleep(1)
print
else:
print messages["down1"]
time.sleep(1)
print
if user_choice == computer_choice:
print "You both got %s!" % user_choice.lower()
elif user_choice != computer_choice:
print "You got %s!" % user_choice + " The computer got %s!" % computer_choice.lower()
print
print "Play again?"
yn = raw_input("Enter yes or no: ").upper()
if yn == "YES":
print "Excellent!"
return play()
else:
print "Thanks for playing!"
time.sleep(1)
# This function will play the game
def play():
user_choice = raw_input("Enter eels or escalators!: ").upper()
computer_choice = options[randint(0, 1)]
choice(user_choice, computer_choice)
name = raw_input("Enter name: ")
print "Starting......"
time.sleep(2)
print
print
print "~Welcome to Eels and Escalators, %s!~" % name
print
play()
| true |
18aa31f3c22d6f46d2463756a18616a074014107 | Python | Luccifer/PythonCourseraHSE | /w03/e12.py | UTF-8 | 513 | 3.828125 | 4 | [
"Unlicense"
] | permissive | # Система линейных уравнений - 1
def system_of_linear_equations_1(a, b, c, d, e, f):
determinant = a*d - c*b
determinant_x = e*d - f*b
determinant_y = a*f - c*e
x = determinant_x / determinant
y = determinant_y / determinant
return x, y
if __name__ == '__main__':
a = float(input())
b = float(input())
c = float(input())
d = float(input())
e = float(input())
f = float(input())
print(*system_of_linear_equations_1(a, b, c, d, e, f))
| true |
ca1f785583c2b3a37ba6caaadd549e5466681ae8 | Python | edsonw/myHogwild | /myHogwildsgd/src/test.py | UTF-8 | 2,146 | 3.09375 | 3 | [] | no_license | import unittest
from hogwildsgd import HogWildRegressor
import scipy.sparse
import numpy as np
class TestHogwild(unittest.TestCase):
"""
loadData:loadData form input/w8a.txt
test_work: set the parameters and test hogwildsgd! first fit the X,y then predict the X and caculate Accuracy
"""
def loadData(self):
X = np.zeros((59245, 300), int)
y = np.zeros((59245, 1), int)
index = 0
with open('../input/w8a.txt','r') as file:
for line in file:
item = line.split(" ")
y[index] = int(item[0])
for i in range(1, len(item)-1):
splitedPair = item[i].split(":")
X[index][int(splitedPair[0]) - 1] = 1
index += 1
return X, y
def test_work(self):
iterations = 100
step = 0.005
decay = (step / 2) / iterations
batch_size = 500
n_jobs = 4
custom = input("Enter 'y/n' to choose whether to change the default parameters\n")
if custom == 'y':
iterations = int(input("Enter Iterations: "))
print()
step = float(input("Enter Step size: "))
print()
decay = float(input("Enter Decay: "))
print()
n_jobs = int(input("Enter Max Threads: "))
print()
batch_size = int(input("Enter Batch: "))
print()
X, y = self.loadData()
hw = HogWildRegressor(n_jobs = n_jobs,
iterations = iterations,
batch_size = batch_size,
step_size = step,
decay = decay,
chunk_size = 14812)
hw = hw.fit(X,y)
y_hat = hw.predict(X)
y = y.reshape((len(y),))
count = 0
for i in range(len(y)):
if y_hat[i] < 0 and y[i] < 0:
count += 1
elif y_hat[i] > 0 and y[i] > 0:
count += 1
print(count / len(y))
if __name__ == '__main__':
unittest.main()
| true |
b6b1b70d9cf6f2f7b2396449f939b737e9af6173 | Python | langenhagen/experiments-and-tutorials | /Python/attrs-hello/attrs-hello.py | UTF-8 | 780 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env python
"""Showcase basic usage of the 3rd party library `attrs`.
@author: andreasl
@version: 2021-02-08
"""
import attr
@attr.s()
class MyAttrClass:
"""Similar to dataclass but with extra features."""
my_field = attr.ib()
my_other_field = attr.ib(default=None)
print("--- 1 simple usage ---")
o = MyAttrClass(42, "answer to wh0t")
print(f"{o=}")
print(f"{o.my_field=}")
print(f"{o.my_other_field=}")
p = MyAttrClass(32)
print(f"{p=}")
# q = MyAttrClass() # doesn't work
print("--- 2 convert attr class to dict ---")
d = attr.asdict(o)
print(d)
print("--- 3 copy an object from another object ---")
o2 = attr.evolve(o)
print(f"{o2=}")
print(f"{o2.my_field=}")
print(f"{o2.my_other_field=}")
assert o is not o2 # o and o2 are different objects
| true |
584c3ba2500585e7b06a94a4d34f871a1c99e640 | Python | 981377660LMT/algorithm-study | /14_并查集/经典题/公因数并查集/1722. 执行交换操作后的最小汉明距离.py | UTF-8 | 1,297 | 3.671875 | 4 | [] | no_license | # https://leetcode.cn/problems/minimize-hamming-distance-after-swap-operations/
# 每个 allowedSwaps[i] = [ai, bi] 表示你可以交换数组 source 中下标为 ai 和 bi.
# 相同长度的两个数组 source 和 target 间的 汉明距离 是元素不同的下标数量.
# !在对数组 source 执行 任意 数量的交换操作后,返回 source 和 target 间的 最小汉明距离 。
# 1. 并查集获取帮派邻接表.
# 2. 计算每个连通块对应的source元素与target的交集.
from collections import Counter
from typing import List
from 埃氏筛和并查集 import UnionFindArray
class Solution:
def minimumHammingDistance(
self, source: List[int], target: List[int], allowedSwaps: List[List[int]]
) -> int:
n = len(source)
uf = UnionFindArray(n)
for a, b in allowedSwaps:
uf.union(a, b)
samePair = 0
for g in uf.getGroups().values():
c1 = Counter(source[i] for i in g)
c2 = Counter(target[i] for i in g)
samePair += sum((c1 & c2).values())
return n - samePair
# source = [1,2,3,4], target = [2,1,4,5], allowedSwaps = [[0,1],[2,3]]
print(Solution().minimumHammingDistance([1, 2, 3, 4], [2, 1, 4, 5], [[0, 1], [2, 3]]))
| true |
3f7f4312a4e72740b1918ae60b956747b407a748 | Python | c-yan/atcoder | /abc/abc048/abc048a.py | UTF-8 | 45 | 2.5625 | 3 | [
"MIT"
] | permissive | s = input().split()[1]
print('A%sC' % s[0])
| true |
99cc825753c58e2b8f91b3ea193662a04e6c8b88 | Python | giosermon/holberton-system_engineering-devops | /0x15-api/1-export_to_CSV.py | UTF-8 | 901 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python3
"""using a REST API, for a given employee ID, returns information about
his/her TODO list progress"""
import csv
import requests
from sys import argv
if __name__ == "__main__":
"""Your code should not be executed when imported"""
user_id = argv[1]
todos = requests.get(
"http://jsonplaceholder.typicode.com/todos?userId={}".format(
user_id))
user = requests.get(
"http://jsonplaceholder.typicode.com/users/{}".format(
user_id))
with open('{}.csv'.format(user_id), "w") as output:
writer = csv.writer(output, delimiter=',', quoting=csv.QUOTE_ALL)
for tarea in todos.json():
data = [
user.json().get('id'),
user.json().get('username'),
tarea.get('completed'),
tarea.get('title')
]
writer.writerow(data)
| true |
6fb009d0f94786ff5150d038028856d2859686ee | Python | ledinhtri97/machine-learning-python | /GradientDescent/GD_multi.py | UTF-8 | 3,200 | 3.296875 | 3 | [] | no_license | import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
# Define cost fundtion
def cost(w):
x = w[0]
y = w[1]
return (x ** 2 + y - 7) ** 2 + (x - y + 1) ** 2
# Defin grad function
def grad(w):
x = w[0]
y = w[1]
g = np.zeros_like(w)
g[0] = 2 * (x ** 2 + y - 7) * 2 * x + 2 * (x - y + 1)
g[1] = 2 * (x ** 2 + y - 7) + 2 * (y - x - 1)
return g
# Caculate grad using numerical method
def numerical_grad(w, cost):
eps = 1e-6
g = np.zeros_like(w)
for i in range(len(w)):
w_p = w.copy()
w_n = w.copy()
w_p[i] += eps
w_n[i] -= eps
g[i] = (cost(w_p) - cost(w_n)) / (2 * eps)
return g
# Compare numerical method with formular
def check_grad(w, cost, grad):
w = np.random.rand(w.shape[0], w.shape[1])
grad1 = grad(w)
grad2 = numerical_grad(w, cost)
return True if np.linalg.norm(grad1 - grad2) < 1e-4 else False
# Check result
w = np.random.randn(2, 1)
# w_init = np.random.randn(2, 1)
print('Checking gradient...', check_grad(w, cost, grad))
# GD
def gd(w_init, grad, eta):
w = [w_init]
for it in range(100):
w_new = w[-1] - eta * grad(w[-1])
if np.linalg.norm(grad(w_new)) / len(w_new) < 1e-3:
break
w.append(w_new)
# print('iter %d: ' % it, w[-1].T)
return w, it
# Define a eta
eta = 0.016
# Get GD result
w_init = np.array([[-5], [-5]])
# w_init = np.random.randn(2, 1)
w1, it1 = gd(w_init, grad, eta)
print(w1[-1])
# Create point
delta = 0.025
x = np.arange(-6.0, 5.0, delta)
y = np.arange(-20.0, 15.0, delta)
X, Y = np.meshgrid(x, y)
Z = (X ** 2 + Y - 7) ** 2 + (X - Y + 1) ** 2
# Create w0
# Caculate w using gd
w_init = np.array([[-5], [-5]])
w, it = gd(w_init, grad, eta)
# Another w0
w_init = np.array([[0], [6]])
w2, it = gd(w_init, grad, eta)
# Create plot view
fig, ax = plt.subplots(figsize=(8, 5))
plt.cla()
plt.axis([1.5, 6, 0.5, 4.5])
# x0 = np.linspace(0, 1, 2, endpoint=True)
title = '$f(x, y) = (x^2 + y -7)^2 + (x - y + 1)^2$'
# animation
def update(ii):
if ii == 0:
plt.cla()
CS = plt.contour(X, Y, Z, np.concatenate((np.arange(0.1, 50, 5), np.arange(60, 200, 10))))
manual_locations = [(-4, 15), (-2, 0), (1, .25)]
animlist = plt.clabel(CS, inline=.1, fontsize=10, manual=manual_locations)
animlist = plt.title('$f(x, y) = (x^2 + y -7)^2 + (x - y + 1)^2$')
plt.plot([-3, 2], [-2, 3], 'go')
else:
animlist = plt.plot([w[ii - 1][0], w[ii][0]], [w[ii - 1][1], w[ii][1]], 'r-')
animlist = plt.plot([w2[ii - 1][0], w2[ii][0]], [w2[ii - 1][1], w2[ii][1]], 'r-')
# Connect 2 point with a line
animlist = plt.plot(w[ii][0], w[ii][1], 'ro')
animlist = plt.plot(w2[ii][0], w2[ii][1], 'ro')
# Retext label
xlabel = '$\eta =$ ' + str(eta) + '; iter = %d/%d' % (ii, it)
ax.set_xlabel(xlabel)
return animlist, ax
anim1 = FuncAnimation(fig, update, frames=np.arange(0, it), interval=200)
plt.show()
| true |
e98e70c0b5a0252535dd3399e699efb0f68e3106 | Python | Cprocc/Week_test | /DyP/动态子结构.py | UTF-8 | 457 | 3.125 | 3 | [] | no_license | s1 = 'AB12N3B4V5A6'
s2 = '123456AA'
m, n = len(s1), len(s2)
matrixR = [[0]*n for i in range(m)]
print(matrixR)
for i in range(0, m):
for j in range(0, n):
if i == 0 or j == 0:
matrixR[i][j] = 1 if (s1[i] == s2[j]) else 0
else:
matrixR[i][j] = max(matrixR[i-1][j-1] + 1, matrixR[i-1][j], matrixR[i][j-1]) if s1[i] == s2[j] else max(matrixR[i-1][j], matrixR[i][j-1])
print(i, j)
print(matrixR[m-1][n-1])
| true |
65d903a8144f769617feb2144892e53252820838 | Python | abhishek0220/library-management | /Library/Models/publisher.py | UTF-8 | 1,089 | 2.640625 | 3 | [] | no_license | from Library import db
from sqlalchemy.orm import relationship
class PublisherModel(db.Model):
__tablename__ = "publishers"
p_id = db.Column(db.Integer, primary_key=True)
p_name = db.Column(db.String(120), nullable=False)
address = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(13), nullable=False)
books = relationship("BooksModel", back_populates="publishers")
def __init__(self,name,address, phone):
self.p_name = name
self.address = address
self.phone = phone
def __repr__(self):
return '<Publisher name %r>' % self.p_name
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def all_publisher(cls):
def to_json(x):
return {
'p_id' : x.p_id,
'p_name' : x.p_name,
'address' : x.address,
'phone' : x.phone
}
try:
return {'publishers': list(map(lambda x: to_json(x), cls.query.all()))}
except:
return None | true |
39949aed501ea632e00560aeb3dd3b1fb34b2ed8 | Python | Jarvis7923/raibert-hopper-sim | /src/sim.py | UTF-8 | 3,485 | 2.71875 | 3 | [] | no_license |
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Slider, Button, RadioButtons
import numpy as np
from scipy.integrate import odeint
from enum import Enum
import threading
import time
from src.vis import vis
class msg_type(Enum):
error = 0
system = 1
info = 2
def show_loginfo(msgtype, msg, end='\n'):
RESET = '\033[0m'
if msgtype is msg_type.error:
expr1 = '\033[1;31m[ERROR]'+ RESET
msg = ' \033[1m' + msg + RESET
elif msgtype is msg_type.system:
expr1 = '\033[1;35m[SYSTEM]'+ RESET
msg = ' \033[4m' + msg + RESET
elif msgtype is msg_type.info:
expr1 = '\033[1;32m[INFO]'+ RESET
msg = ' ' + msg + RESET
print(expr1 + msg, end=end)
class sim:
def __init__(self,
dt=0.001,
g=9.81,
damping=1e-5,
vis=True
):
show_loginfo(msg_type.system, "Simulation initializing ... ")
self._init_physics(dt, g, damping)
self._stop = False
self._vis_flag = vis
def __del__(self):
show_loginfo(msg_type.system, "Simulation Ends... ")
def _init_physics(self, dt, g, damping):
self._dt, self._g, self._damping = dt, g, damping
show_loginfo(
msg_type.info, "Physics Parameters: \n\t fixed time step: {0} sec\n\t gravity: {1} m/s^2\n\t body damping: {2} N/(m/s)".format(dt, g, damping))
def _init_graphics(self):
show_loginfo(msg_type.system, "Graphics Initializing... ")
self._fig = plt.figure()
self._vis = vis(self._fig, self._rd)
show_loginfo(msg_type.system, "Graphics Ready... ")
def spawn(self, rd, pos):
rd.g, rd.damping = self._g, self._damping
self._rd = rd
self._rd.set_state(pos)
self._rd.dt = self._dt
if self._vis_flag:
self._init_graphics()
show_loginfo(msg_type.info, "Robot spawn at:\n\t{0}".format(pos))
def run(self):
show_loginfo(msg_type.system, "Dynamics loop initializing ... ")
if self._vis_flag:
self._thread = threading.Thread(target=self._run_dynamics, args=())
self._thread.start()
self._vis.show()
self._stop = True
self._thread.join()
show_loginfo(msg_type.system, "Simulation terminating ... ")
else:
self._run_dynamics()
show_loginfo(msg_type.system, "Simulation terminating ... ")
def _run_dynamics(self):
time.sleep(2)
show_loginfo(msg_type.system, "Dynamics start")
dt = self._dt
while not(self._stop):
t0 = time.time()
tspan = [0, dt]
tau = self._rd.controller_func()
s0 = self._rd.state
if s0[1] < 0:
show_loginfo(msg_type.error, "Dynamics error!")
break
# s += dt*np.array(self._rd.model(s, tau=tau))
sol = odeint(self._rd.model, s0, tspan, args=(tau,))
self._rd.set_state(sol[-1])
self._rd.time_elapsed += dt
# show_loginfo(msg_type.info, "curent state: {0}".format(self._rd.state), end='\r')
t1 = time.time()
interval = dt - (t1 - t0)
if interval > 0 :
time.sleep(interval)
print()
show_loginfo(msg_type.system, "Dynamics loop terminating... ")
| true |
1d6bea3a4bf9a4cfb578739bf84c7386c845f937 | Python | joeyqlim/cs50 | /week7/houses/import.py | UTF-8 | 1,377 | 3.171875 | 3 | [] | no_license | from cs50 import SQL
from csv import DictReader
from sys import argv, exit
# Check command line args and file type
if len(argv) < 2:
print("Usage: python import.py characters.csv")
exit()
else:
if not (argv[1].endswith(".csv") or argv[1].endswith(".tsv")):
print("Usage: python import.py characters.csv")
exit()
# Create database by opening and closing an empty file first
open(f"students.db", "w").close()
db = SQL("sqlite:///students.db")
# Create table called students and specify columns
db.execute("CREATE TABLE students (first TEXT, middle TEXT, last TEXT, house TEXT, birth NUMERIC)")
# Open csv file
with open(argv[1], "r") as students:
# Create DictReader
reader = DictReader(students)
# Iterate over csv file
for row in reader:
fullName = row["name"].split()
# Insert student by substituting values into each ? placeholder
# Insert None as middle name if non-existent
if len(fullName) < 3:
db.execute("INSERT INTO students (first, middle, last, house, birth) VALUES(?, ?, ?, ?, ?)",
fullName[0], None, fullName[1], row["house"], row["birth"])
else:
db.execute("INSERT INTO students (first, middle, last, house, birth) VALUES(?, ?, ?, ?, ?)",
fullName[0], fullName[1], fullName[2], row["house"], row["birth"])
| true |
609075cc5f5e0f3955ffaeb2771594566092cd3a | Python | coloed3/loganalysis | /query.py | UTF-8 | 4,114 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env python3
import psycopg2 # needed to create connection to database
import pprint
"""https://docs.python.org/3/library/pprint.html
allows to format the
object we are printing"""
"""
3 queries
1. What are the most popular three articles of all time?
2. Who are the most popular article authors of all time ?
3. On Which days did more than 1% of the request lead to errors
(think about adding views to the queries)
"""
# following code reference pynative.com
# using class to connect to database using try/catch for errors
class DatabaseConnection:
def __init__(self):
try:
dbnews = 'news'
self.connection = psycopg2.connect(database=dbnews)
self.cursor = self.connection.cursor()
except:
pprint("Cannot Connect to database")
# method below will be used to get back the most
# popular three articles of all timeself
def most_popular_article(self):
# database name
dbnews = 'news'
# created variable to close()
db = psycopg2.connect(database=dbnews)
query_article = """
Select title, count(*) as poparticle
from articles join log ON articles.slug = substring(log.path, 10)
GROUP BY title ORDER BY poparticle desc limit 3;
"""
self.cursor.execute(query_article)
articles = self.cursor.fetchall()
db.close()
print(articles)
def most_popular_authors(self):
dbnews = 'news'
db = psycopg2.connect(database=dbnews)
query_authors = """SELECT authors.name, count(*) as mostpopauthor
FROM articles
JOIN authors
ON articles.author = authors.id
JOIN log
ON articles.slug = substring(log.path, 10)
WHERE log.status = '200 OK'
GROUP BY authors.name ORDER BY mostpopauthor DESC LIMIT 3;"""
self.cursor.execute(query_authors)
author = self.cursor.fetchall()
db.close()
print(author)
# for authors in author:
# print(author . "\n")
# methon below will query for the days where the errors were greater than 1%
def days_greater_than_1p(self):
dbnews = "news"
db = psycopg2.connect(database=dbnews)
"""
below query i used the following documentation to complete
https://stackoverflow.com/questions
/38136854/how-to-use-multiple-with-statements-in-one-postgresql-query"
http://www.postgresqltutorial.com/postgresql-recursive-query/
https://www.tutorialspoint.com/postgresql/postgresql_with_clause.html
https://stackoverflow.com/questions/17363023
/how-to-get-the-date-and-time-from-timestamp-in-postgresql-select-query
line 3 select now()::date - converted the now() to time matching the
table
"""
query_gt1p = """
WITH total_request AS (
SELECT time::date AS day, count(*)
FROM log
GROUP BY time::date
ORDER BY time::date
), total_errors AS (
SELECT time::date AS day, count(*)
FROM log
WHERE status != '200 OK'
GROUP BY time::date
ORDER BY time::date
), total_failures AS (
SELECT total_request.day,
total_errors.count::float / total_request.count::float * 100
AS total_error_count
FROM total_request, total_errors
WHERE total_request.day = total_errors.day
)
SELECT * FROM total_failures WHERE total_error_count > 1;
"""
self.cursor.execute(query_gt1p)
one_percent = self.cursor.fetchall()
db.close()
print(one_percent)
return
if __name__ == '__main__':
database_connection = DatabaseConnection()
database_connection.most_popular_article()
database_connection.most_popular_authors()
database_connection.days_greater_than_1p()
| true |
05545bbb6bfa793133f12dc5d7d0c21475a0ab52 | Python | whyando/lol-train | /train.py | UTF-8 | 672 | 3.078125 | 3 | [] | no_license | import numpy
from sklearn.linear_model import LogisticRegression
file = numpy.loadtxt('input_full.txt', delimiter=',')
X = file[:,1:]
y = file[:,0]
# print(X)
# print(y)
print('Begin Train')
clf = LogisticRegression(random_state=3, solver='sag', max_iter=10000).fit(X, y)
print('End Train')
# print(X[:2])
# print(y[:2])
print(clf.predict(X[:10, :]))
print(clf.predict_proba(X[:10, :]))
print(clf.score(X, y))
y_pred = clf.predict(X)
correct = 0
total = 0
for i in range(0, y.size):
total+=1
if y[i] < 0.5 and y_pred[i] < 0.5:
correct+=1
elif y[i] > 0.5 and y_pred[i] > 0.5:
correct+=1
print('Correct:', correct)
print('Total:', total)
| true |
cffe9b6af71f10a35a9fc160588518e82fdd7e10 | Python | jingchenw/Procura-to-ClientCare | /Procura_to_ClientCare_WithMiddleFile_WithoutAutoPushToResiAdmin.py | UTF-8 | 5,499 | 2.609375 | 3 | [] | no_license | import csv
import time
import sys
import shutil
import openpyxl
import os
from operator import itemgetter
# Prompt to type in file directory
print 'Welcome!\n'
print 'Please confirm the raw data files have been put in the "input_files" folder.(Press Enter to continue)\n'
sys.stdin.readline()
# Open and read ServiceTypeDesc
csv_file = open('res/ServiceTypeDesc.csv')
csv_reader = csv.reader(csv_file)
data = list(csv_reader)
row_count = len(data)
service_type_desc = [['' for x in range(2)] for y in range(row_count)]
csv_file = open('res/ServiceTypeDesc.csv')
csv_reader = csv.reader(csv_file)
row_number = 0
for row in csv_reader:
service_type_desc[row_number][0] = row[0]
service_type_desc[row_number][1] = row[1]
row_number += 1
# Open file for row counting
print "Opening source file..."
file_name = raw_input('Please type in the raw data file name, without the file extension: ')
file_dir = 'input_files/'+str(file_name)+'.csv'
csv_file = open(file_dir)
csv_reader = csv.reader(csv_file)
# Count the INVOICE row
print "Counting the rows..."
inv_count = 0
for row in csv_reader:
if row[0] == 'INVOICE':
inv_count += 1
# Create 2-D List
print "Creating index..."
inv_data = [['' for x in range(27)] for y in range(inv_count)]
# Open file for data mapping
print "Mapping data..."
csv_file = open(file_dir)
csv_reader = csv.reader(csv_file)
# Map all INVOICE data to list
row_number = 0
for row in csv_reader:
if row[0] == 'INVOICE':
for column_count in range(27):
inv_data[row_number][column_count] = row[column_count]
row_number += 1
# Change date format from YYYYMMDD to DD/MM/YYYY
print "Changing date format..."
for row_number in range(inv_count):
from_date = inv_data[row_number][1]
conv_date = time.strptime(from_date, "%Y%m%d")
target_date = time.strftime("%d/%m/%Y", conv_date)
inv_data[row_number][1] = target_date
# Transfer data into BillingSum
print "Transferring data..."
billing_sum = [['' for x in range(7)] for y in range(inv_count)]
row_number = 0
for row_number in range(inv_count):
billing_sum[row_number][0] = int(inv_data[row_number][8]) # URN
billing_sum[row_number][1] = inv_data[row_number][20] # COST CENTRE
if inv_data[row_number][22] == 'MOBILITYEXP' and inv_data[row_number][21] == 'PRIVATE':
billing_sum[row_number][2] = 'RESCONP' # MASTER ACCT
elif inv_data[row_number][22] == 'MOBILITYEXP':
billing_sum[row_number][2] = 'RESCON'
else: billing_sum[row_number][2] = inv_data[row_number][22]
billing_sum[row_number][3] = inv_data[row_number][1] # RECORD DATE
if inv_data[row_number][12] == '': # AMOUNT
inv_data[row_number][12] = 0
billing_sum[row_number][4] = float(inv_data[row_number][12])
billing_sum[row_number][5] = int(inv_data[row_number][11]) # INVOICE NUMBER
if inv_data[row_number][15] == 'CC': # NOTES
flag_count = 0
for row in service_type_desc:
if inv_data[row_number][16] == row[0]:
if inv_data[row_number][16] == 'CC':
billing_sum[row_number][6] = row[1] + " " + inv_data[row_number][18] + " day/s"
else: billing_sum[row_number][6] = row[1]
else: flag_count += 1
if flag_count == len(service_type_desc):
billing_sum[row_number][6] = "Please update the Billing Service Type definition lookup table"
elif inv_data[row_number][16] == 'HCPADJCR':
for row in service_type_desc:
if row[0] == inv_data[row_number][16]:
billing_sum[row_number][6] = row[1]
elif inv_data[row_number][16] == 'HCPADJDB':
for row in service_type_desc:
if row[0] == inv_data[row_number][16]:
billing_sum[row_number][6] = row[1]
else: billing_sum[row_number][6] = inv_data[row_number][4] + " Service Fee"
# Sum-up (Excel Macro Function)
# STEP 1 - Create a dictionary
# STEP 2 - Use URN + COST CENTRE + MASTER ACCT + RECORD DATE + INVOICE NUMBER + NOTES as key, and put AMOUNT as value
# STEP 3 - If the key has not appeared in the dictionary, add the key in, as well as the entire line of data
# If the key has already existed in the dictionary, update the value as a sum of existing value and the current AMOUNT
# STEP 4 - Repeat STEP 2 & 3, and a final output contains the sum up of client billing amount would be generated
# STEP 5 - Transfer the final data from the dictionary into a list for further data processing
sum_up_dict = dict()
for row in billing_sum:
key = str(row[0]) + str(row[1]) + str(row[2]) + str(row[3]) + str(row[5]) + str(row[6])
if key in sum_up_dict.keys():
sum_up_dict[key][4] = float("{0:.2f}".format(float(sum_up_dict[key][4]) + float(row[4])))
else: sum_up_dict[key] = row
output_list = sum_up_dict.values()
output_list.sort(key=itemgetter(0))
# Export
print "Exproting..."
output_name = raw_input('Enter the output file name, without file extension: ') + '.csv'
output_dir = 'output_files/'+str(output_name)
csv_file = open(output_dir, 'wb')
csv_file_writerow = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_NONE)
for item in output_list:
csv_file_writerow.writerow(item)
inv_dir = 'output_files/AR_'+str(output_name)
csv_file = open(inv_dir, 'wb')
csv_file_writerow = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_NONE)
for item in inv_data:
csv_file_writerow.writerow(item) | true |
37b38ce4378af68857054ace3e3b48c1e3e74470 | Python | AlexNedyalkov/Python-OOP-2020 | /polymorphism_06/exercises/groups_02.py | UTF-8 | 1,764 | 4.5 | 4 | [] | no_license | '''
Create a class called Person. Upon initialization it will receive a name (str) and a surname (str).
Create another class called Group. Upon initialization it should receive a name (str) and people (list of Person instances).
Implement the needed magic methods, so the test code below works
'''
class Person:
def __init__(self, name: str, surname: str):
self.name = name
self.surname = surname
def __add__(self, other):
return Person(self.name, other.surname)
def __str__(self):
return f'{self.name} {self.surname}'
class Group:
def __init__(self, name: str, people_list: list):
self.name = name
self.people = people_list
def __str__(self):
members_names = [member.__str__() for member in self.people]
members = (', ').join(members_names)
return f'Group {self.name} with member {members}'
def __len__(self):
return len(self.people)
def __add__(self, other):
new_group = Group(self.name + other.name, [])
for member in self.people:
new_group.people.append(member)
for member in other.people:
new_group.people.append(member)
return new_group
def __getitem__(self, item):
return f'Person {item}: ' + str(self.people[item])
if __name__ == '__main__':
p0 = Person('Aliko', 'Dangote')
p1 = Person('Bill', 'Gates')
p2 = Person('Warren', 'Buffet')
p3 = Person('Elon', 'Musk')
p4 = p2 + p3
first_group = Group('__VIP__', [p0, p1, p2])
second_group = Group('Special', [p3, p4])
third_group = first_group + second_group
print(len(first_group))
print(second_group)
print(third_group[0])
for person in third_group:
print(person)
| true |
ebbe1a4277a3336ad580c1c7614e79c9d758a25d | Python | jackyluo-learning/NAS_test | /abc_tutorials/abc_base.py | UTF-8 | 563 | 2.765625 | 3 | [] | no_license | import abc
import tensorflow as tf
class PluginBase(object, metaclass=abc.ABCMeta):
# metaclass: the stuff that creates classes.
@abc.abstractmethod
def load(self, input):
"""Retrieve data from the input source and return an object."""
return
@abc.abstractmethod
def save(self, output, data):
"""Save the data object to the output."""
return
x=tf.Variable([[[1,1],
[2,2]],
[[3,3],
[4,4]]],tf.float32)
print(x.shape)
x=tf.pad(x,[[0,0],[1,0],[0,0]])
print(x)
| true |
1efe81c4b1bf76b7f5540666611d78ad12793649 | Python | Hi5Austin/SimpleMovieSearcher | /app.py | UTF-8 | 836 | 2.53125 | 3 | [] | no_license | from flask import Flask
from flask import render_template
import requests
import json
app = Flask(__name__)
app.config["DEBUG"] = True
apiURL = "http://www.omdbapi.com/?"
@app.route("/")
def start():
return render_template('hello.html')
@app.route("/movie/<title>")
def get_movie(title):
parameters = {'t':title}
r = requests.get(apiURL,params=parameters)
results = json.loads(r.text)
print results
plot = results['Plot']
poster = results['Poster']
return render_template('movie.html',movie=title,plot=plot,poster=poster)
if __name__ == "__main__":
app.run()
# parameters = {'access_token': token,"message":"I sent this using the Facebook API, I hope it works!!!"}
#
# r = requests.get('https://graph.facebook.com/v2.5/me/inbox', params=parameters)
# result = json.loads(r.text)
# print result
| true |