blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3f214bf68dad4e09b209464c408542c7c390d85a | Python | bk-ikram/Data-Wrangling-with-MongoDB | /audit.py | UTF-8 | 4,830 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 13 15:09:13 2017
@author: IKRAM
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import json
import codecs
FILENAME="doha_qatar.osm"
problem_words=["school","district","compound","office","schule","mall","mart","center","parking",
"clinic","station","kindergarten","centre","complex","lounge","restaurant","grocery",
"supermarket","cafeteria","group","village","villa","villas","hotel","nursery"]
test_string="Rohingya madrasa"
pattern = re.compile('|'.join(r'\b{}\b'.format(word) for word in problem_words))
alt_street_types=["avenue","av","ave","av.","ave.","boulevard","bvd","bvd.","way"]
st_pattern = re.compile('|'.join(r'\b{}\b'.format(word) for word in alt_street_types))
def sorted_dict_by_val(input_dict):
return sorted(input_dict.items(), key = lambda x: x[1], reverse=True)
def get_street_names(filename):
names=[]
for event,elem in ET.iterparse(filename):
if elem.tag =="way":
for k in elem:
if k.get("k")=="name":
names.append(k.get("v"))
return names
def check_problem_streets(names):
problems=[]
for street in names:
if (pattern.search(street.lower()) != None) and ("street" not in street.lower()):
problems.append(street)
return problems
def check_alt_streets(names):
alt_street=[]
for street in names:
if (st_pattern.search(street.lower()) != None):
alt_street.append(street)
return alt_street
def audit_nodes(filename,key):
key_values=defaultdict(int)
for event,elem in ET.iterparse(filename):
if elem.tag=="node":
for k in elem:
if k.get("k")==key:
key_values[k.get("v")]+=1
return sorted_dict_by_val(key_values)
CREATED = [ "version", "changeset", "timestamp", "user", "uid"]
def process_element(elem):
if elem.tag=="node" or elem.tag=="way":
entry={}
entry["id"]=elem.get("id")
entry["tag_type"]=elem.tag
entry["visible"]=elem.get("visible")
entry["created"]={}
for doc in CREATED:
entry["created"][doc]=elem.get(doc)
if elem.get("lat") and elem.get("lon"):
position=[float(elem.get("lat")),float(elem.get("lon"))]
if position:
entry["pos"]=position
for k in elem.iter("tag"):
#ignore false streets
if elem.tag=="way":
if k.get("k")=="name":
street=k.get("v")
if ((pattern.search(street.lower()) == None) and (st_pattern.search(street.lower()) != None)):
return
if ":" not in k.get("k"):
entry[k.get("k")]=k.get("v")
else:
key_parts=k.get("k").split(":")
if len(key_parts)==2:
entry[key_parts[0]]={}
entry[key_parts[0]][key_parts[1]]=k.get("v")
else:
entry[key_parts[0]]={}
entry[key_parts[0]][key_parts[1]]={}
entry[key_parts[0]][key_parts[1]][key_parts[2]]=k.get("v")
if elem.tag=="way":
references=[]
for nd in elem.iter("nd"):
references.append(nd.get("ref"))
if references:
entry["node_ref"]=references
if (entry.has_key("name")) and (type(entry.has_key("name"))==list) and (entry["name"].has_key("en")):
engname=entry["name"]["en"]
entry["othernames"]=entry["name"]
entry["name"]=engname
if (entry.has_key("addr") and entry["addr"].has_key("city")):
entry["addr"]["city"]="Doha"
return entry
def process_map(filename):
data=[]
outfile = "{0}.json".format(filename)
with codecs.open(outfile, "w") as fo:
for _, element in ET.iterparse(filename):
el = process_element(element)
if el:
data.append(el)
fo.write(json.dumps(el) + "\n")
print "done"
print len(data)
def test():
#streets=get_street_names(FILENAME)
#names=get_street_names(FILENAME)
#print names
#problems=check_problem_streets(names)
#print len(problems)
#alt_streets=check_alt_streets(names)
#print alt_streets[1:50]
#print len(alt_streets)
#int alt_streets
#print audit_nodes(FILENAME,"addr:city")
#print len(problems)
#print problems
#process_map(FILENAME)
pass
if __name__=="__main__":
test() | true |
87e4a9a56a0b5dc68c2517d459ba93da6f16e576 | Python | zaccaromatias/AlgoritmosGeneticos | /Ejercicio3/ProgramView.py | UTF-8 | 1,045 | 2.984375 | 3 | [] | no_license | from tkinter import *
from Ejercicio3.AlgoritmoGeneticoView import AlgoritmoGeneticoView
from Ejercicio3.HeuristicaView import HeuristicaView
class ProgramView:
def __init__(self):
self.top = Tk()
self.top.wm_title("Ejercicio 3 - Viajante")
self.top.wm_geometry("370x250")
self.top.resizable(False, False)
# self.top.eval('tk::PlaceWindow . center')
def ShowHeuristicaView(self):
HeuristicaView(self.top)
def ShowAlgoritmoGeneticoConfigurationView(self):
AlgoritmoGeneticoView(self.top)
def Show(self):
btnHeuristica = Button(self.top, text="Heuristica", command=lambda: self.ShowHeuristicaView())
btnAlgoritmoGenetico = Button(self.top, text="Algoritmos Geneticos",
command=lambda: self.ShowAlgoritmoGeneticoConfigurationView())
btnHeuristica.pack()
btnHeuristica.place(x=80, y=83)
btnAlgoritmoGenetico.pack()
btnAlgoritmoGenetico.place(x=200, y=83)
self.top.mainloop()
| true |
d887c96c4df906dc658f65b6ded2d598ca34ca04 | Python | chasecolford/Leetcode | /problems/1482.py | UTF-8 | 3,123 | 3.53125 | 4 | [] | no_license | # def minDays(bloomDay, mBouquets, kAdjacent):
# # we can never make them if we need more than the total flowers
# if mBouquets * kAdjacent > len(bloomDay): return -1
# checker = [0] * kAdjacent # this will be what we need for a range to be value (i.e. 0 represents its bloomed)
# day = 0
# baseCase = max(bloomDay)
# # the most we could ever wait is the max day in the array
# while day <= baseCase:
# # first, check if we can make the bouquets now
# valid = 0 # track how many bouquets we can make
# i = 0
# while i < len(bloomDay):
# # if we have room to check (minus one since we include i in window)
# if i + (kAdjacent - 1) < len(bloomDay):
# # check a sliding window of size kAdjacent (not -1 here for adjacent since exclusive bound)
# # print(bloomDay[i:i+kAdjacent])
# if bloomDay[i:i+kAdjacent] == checker:
# valid += 1 # increment the count we can make
# i += kAdjacent # increment the slider (no overlap)
# # else, move the window 1 slot
# else:
# i += 1
# # if we dont have room to check
# else:
# break
# if valid >= mBouquets:
# return day
# # if none of that worked, decrement all of the values by 1
# # print(bloomDay)
# for v in range(len(bloomDay)):
# if bloomDay[v] != 0:
# bloomDay[v] -= 1
# # else, incrment the day and try again
# day += 1
# # if we exit the above and never returned a valid day, we couldnt make it
# return -1
def minDays(bloomDay, mBouquets, kAdjacent):
"""
Step through bloomdays, check if we have room to make this many bouguets
if we do, find the location of mBouquets pain with the SMALLEST max(kadjacent) values,
then return the max of those
"""
if mBouquets * kAdjacent > len(bloomDay): return -1
i = 0
kSmallest = []
while i < len(bloomDay):
# if we have room to check (minus one since we include i in window)
if i + (kAdjacent - 1) < len(bloomDay):
# check a sliding window of size kAdjacent (not -1 here for adjacent since exclusive bound)
# if we have none, add this one
if kSmallest == []:
kSmallest.append(max(bloomDay[i:i+kAdjacent]))
i += kAdjacent
else:
# sort them
kSmallest.sort()
for j in range(len(kSmallest)):
if max(bloomDay[i:i+kAdjacent]) < kSmallest[j]:
kSmallest[j] = max(bloomDay[i:i+kAdjacent])
break
i += 1
else:
i += 1
return max(kSmallest)
# expected: 9
print(minDays(bloomDay=[1,10,2,9,3,8,4,7,5,6], mBouquets=4, kAdjacent=2))
#expected 3
print(minDays(bloomDay=[1,10,3,10,2], mBouquets=3, kAdjacent=1)) | true |
0ba6a3493d78b10ad13bc64eb4cd6d16058d9fd7 | Python | loalberto/Springboard-Capstone3 | /Preprocessing_Modeling.py | UTF-8 | 6,015 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Preprocessing
# In[1]:
import os
from tensorflow.keras.preprocessing import image
import numpy as np
import multiprocessing
import random
import pandas as pd
import multiprocessing
import gc
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, MaxPool2D
from keras.utils import np_utils
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
# In[2]:
def get_file_names(s):
# retrieves all the filenames in a list of strings
path = './transformed_images/{}'.format(s)
vals = []
for root, dirs, files in os.walk(path):
for filename in files:
if os.path.getsize(path + '/'+ filename) == 0 or filename == '.DS_Store':
continue
vals.append(filename)
return sorted(vals)
# In[3]:
def tonp(func, list_of_images, size=(300, 300)):
# for img in list_of_images:
path = func(list_of_images)
# Transforming all the images to size 400x400
current_img = image.load_img(path, target_size=size, color_mode='grayscale')
# makes a matrix
img_ts = image.img_to_array(current_img)
# converts to a vector
img_ts = [img_ts.ravel()]
current_img.close()
try:
# Brings all the new vectors into one giant array
full_mat = np.concatenate((full_mat, img_ts))
except UnboundLocalError:
full_mat = img_ts
return full_mat
# In[4]:
def tonp_wrapper(args):
return tonp(*args)
# In[5]:
def get_cat_filepath(img_name):
# Returns the filepath of a given string
return './transformed_images/Cat/{}'.format(img_name)
# In[6]:
def get_dog_train_filepath(img_name):
# Returns the filepath of a given string
return './transformed_images/DogTrain/{}'.format(img_name)
# In[7]:
def get_dog_test_filepath(img_name):
# Returns the filepath of a given string
return './transformed_images/DogTest/{}'.format(img_name)
# In[8]:
def display_image_np(np_array):
# The functiton takes in an np_array to display the image
# This will display the image in grayscale
plt.imshow(np_array, vmin=0, vmax=255, cmap='Greys_r')
plt.axis('off')
plt.grid(True)
plt.show()
plt.show()
# In[9]:
def set_up_data(cat_filenames, dogtrain_filenames, dogtest_filenames, sample_amount=5000):
cat_data = []
dogtrain_data = []
dogtest_data = []
# for i in range(len(cat_filenames)):
for i in range(sample_amount):
cat_data.append(tonp(get_cat_filepath, cat_filenames[i]))
# for i in range(len(dogtrain_filenames)):
for i in range(sample_amount):
dogtrain_data.append(tonp(get_dog_train_filepath, dogtrain_filenames[i]))
# for i in range(len(dogtest_filenames)):
for i in range(sample_amount):
dogtest_data.append(tonp(get_dog_test_filepath, dogtest_filenames[i]))
dog_data = np.concatenate((dogtest_data, dogtrain_data))
del dogtest_data
del dogtrain_data
gc.collect()
sample_cat = random.sample(cat_data, sample_amount)
cat_label = np.array([1 for _ in range(len(cat_data))])
dog_label = np.array([0 for _ in range(len(dog_data))])
all_data_label = np.concatenate((cat_label[:sample_amount], dog_label))
all_data = np.concatenate((sample_cat, dog_data))
del sample_cat
del dog_data
gc.collect()
split_limit = int(np.floor(0.7 * len(all_data)))
random_index = random.sample(range((len(all_data))), split_limit)
test_idx = set(np.arange(0, sample_amount)) - set(random_index)
X_train = [all_data[i] for i in random_index]
y_train = np.asarray([all_data_label[i] for i in random_index])
X_test = [all_data[i] for i in test_idx]
y_test = np.asarray([all_data_label[i] for i in test_idx])
del cat_data
gc.collect()
return X_train, y_train, X_test, y_test
# In[10]:
cat_filenames = get_file_names('Cat')
dogtrain_filenames = get_file_names('DogTrain')
dogtest_filenames = get_file_names('DogTest')
# In[11]:
X_train, y_train, X_test, y_test = set_up_data(cat_filenames, dogtrain_filenames, dogtest_filenames,
sample_amount=100)
num_classes = 2
# In[12]:
X_train = np.asarray(X_train).reshape(np.array(X_train).shape[0], 300, 300, 1)
X_test = np.asarray(X_test).reshape(np.array(X_test).shape[0], 300, 300, 1)
# In[13]:
X_train.shape
# In[14]:
X_test.shape
# In[15]:
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
# In[16]:
y_train.shape
# # Modeling
# In[17]:
print(X_train.shape, y_train.shape)
X_test.shape, y_test.shape
# In[18]:
# building a linear stack of layers with the sequential model
model = Sequential()
# hidden layer
model.add(Conv2D(25, kernel_size=(3,3), padding='valid',
activation='relu', input_shape=(300,300,1)))
# output layer
model.add(MaxPool2D(pool_size=(1,1)))
# flatten output of conv
model.add(Flatten())
# hidden layer
model.add(Dense(100, activation='relu'))
# output layer
model.add(Dense(2, activation='softmax'))
# compiling the sequential model
model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer='adam')
# training the model for 10 epochs
model.fit(X_train, y_train, epochs=3, validation_data=(X_test, y_test))
# In[22]:
model.predict(X_test)
# In[ ]:
model = Sequential()
# input_shape = (height, width, 1 if it's grayscale)
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(300,300,1), padding='same'))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(64, activation='sigmoid'))
model.add(Dense(2))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3)
# In[ ]:
y_pred = model.predict(X_test)
y_pred
# In[ ]:
y_test
# In[ ]:
f1_score(y_pred, y_test)
# 0.51% accuracy for the first model.
| true |
a372c4c11a934561ab228e2eefc744be1c48e8b8 | Python | IfYouThenTrue/Simple-Programs-in-Python | /RockPaperScissors.py | UTF-8 | 928 | 3.453125 | 3 | [] | no_license | #!/bin/python3
from random import randint
def rps():
playerCh = input('Choose rock, paper or scissors')
print('You chose '+playerCh)
computerCh = randint(1,3)
if computerCh == 1:
computerCh = 'rock'
print('Computer chose '+ computerCh )
elif computerCh == 2:
computerCh = 'paper'
print('Computer chose '+ computerCh )
else:
computerCh = 'scissors'
print('Computer chose '+ computerCh )
return(computerCh, playerCh)
c, p = rps()
if c == p:
print('Tie')
elif c == 'rock' and p == 'paper':
print('Player Wins')
elif c == 'rock' and p == 'scissors':
print('Computer Wins')
elif c == 'paper' and p == 'rock':
print('Computer Wins')
elif c == 'paper' and p == 'scissors':
print('Player Wins')
elif c == 'scissors' and p == 'rock':
print('Player Wins')
elif c == 'scissors' and p == 'paper':
print('Computer Wins')
| true |
e5bfb6b63fc0ab0438256d87b719b72b57ad309b | Python | betadayz/Task-3 | /Task==3.py | UTF-8 | 577 | 3.671875 | 4 | [] | no_license | from math import sqrt
def primeCount(arr, n):
max_val = arr[0];
for i in range(len(arr)):
if(arr[i] > max_val):
max_val = arr[i]
prime =[ True for i in range(max_val + 1)]
prime[0] = False
prime[1] = False
k = int(sqrt(max_val)) + 1
for p in range(2, k, 1):
if (prime[p] == True):
for i in range(p * 2, max_val + 1, p):
prime[i] = False
count = 0
for i in range(0, n, 1):
if (prime[arr[i]]):
count += 1
return count
if __name__ == '__main__':
arr = [1, 2, 3, 4, 5, 6, 7]
n = len(arr)
print(primeCount(arr, n)) | true |
816354722f886068bbaec107b78d092fc6680998 | Python | ravisjoshi/python_snippets | /Array/Pascal'sTriangleII.py | UTF-8 | 942 | 3.765625 | 4 | [] | no_license | """
Given a non-negative index k where k ≤ 33, return the kth index row of the Pascal's triangle.
Note that the row index starts from 0.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Input: 3 / Output: [1,3,3,1]
"""
class Solution:
def getRow(self, rowIndex):
if rowIndex == 0:
return [1]
elif rowIndex == 1:
return [1, 1]
inputList = [[1], [1, 1]]
for index in range(1, rowIndex):
tempList = []
for num in range(len(inputList[-1])-1):
if num == 0:
tempList.append(1)
x = inputList[-1][num]+inputList[-1][num+1]
tempList.append(x)
tempList.append(1)
inputList.append(tempList)
rList = inputList
return rList[-1]
if __name__ == '__main__':
s = Solution()
rowIndex = 3
print(s.getRow(rowIndex))
| true |
1b92404bee8b471dd5add7dcbdc8f7b6c7459c94 | Python | BayoAdepegba/Python | /ex13.py | UTF-8 | 592 | 3.5625 | 4 | [] | no_license | #Import = add features to script from python feature set
#argv is the argument variable = holds the arguments you pass
#to your python script when you run it
from sys import argv
#Unpacks argv- assigns to four variables
script, first, second, third = argv
print "The script is called:", script
print "Your first variable is:", first
print "Your second variable is:", second
print "Your third variable is:", third
#feature-Modules make python program do more/ give you features
#You create the 3 variables for the script to run if not error
#will occurr
age = raw_input("how old are you?" )
| true |
d19b23eea7cdc05c8c18a68d67562dc3ea5253c7 | Python | diegoshakan/curso-em-video-python | /Desafio56M02.py | UTF-8 | 1,014 | 4.5 | 4 | [] | no_license | '''Crie um programa que leia o nome de quatro pessoas, idade e o sexo e mostre:
1- A média de idade do grupo:
2 - Qual é o nome do homem mais velho
3 - Quantas mulheres tem menos de 20 anos.
'''
soma = 0
cont = 0
contm = 0
velhonome = ''
velhoidade = 0
for c in range(1, 5):
nome = input('Digite um nome: ')
idade = int(input('Idade: '))
cont += 1 # este contador será usado para contar a variável para fazer a média.
soma = soma + idade # soma para fazer a média da idade do grupo.
sexo = input('Sexo [M/F]: ')
if sexo in 'Mm' and idade > velhoidade: # verificar se é homem e se sua idade é maior que a idade inicial e as demais.
velhoidade = idade
velhonome = nome
if sexo in 'Ff' and idade < 20:
contm += 1
print(f'A média das idades {soma / cont}.') # F-String print(f'{exemplo}') substitui o .format() no python3.6
print(f'O homem mais velho se chama {velhonome} e tem {velhoidade} anos.')
print(f'Há {contm} mulher(es) menor(es) de 20 anos.')
| true |
db92a2676f4311b6ab733a95609783ea2a89b346 | Python | Aasthaengg/IBMdataset | /Python_codes/p02903/s073062547.py | UTF-8 | 260 | 3.3125 | 3 | [] | no_license | h,w,a,b = map(int,input().split())
ans = [[0]*w for i in range(h)]
for i in range(h):
if i >= h-b:
for j in range(w-a,w):
ans[i][j] = 1
else:
for j in range(w-a):
ans[i][j] = 1
for i in ans:
print(*i,sep="") | true |
561c826629f3cbb742bb3e738a6407a04919b15b | Python | cyclopsprotel/Jamming_Detection | /Capacity_Estimation/plot_MI.py | UTF-8 | 437 | 2.984375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('MIs.csv')
df = df.sort_values(['Prob'])
print(df)
snrs = np.unique(df['SNR'].values)
for i in snrs:
temp = df.loc[df['SNR'] == i]
lab = "True MI - SNR=" + str(i)
plt.plot(temp['Prob'], 0.5*temp['Iy1'] + 0.5*temp['Iy1y2'], label=lab)
plt.xlabel('Input Probability')
plt.ylabel('True MI')
plt.legend()
plt.savefig("MIideal_curve.png")
plt.show() | true |
72d7fcb584754750d97e568ef2130573d1b381ec | Python | fdkz/libaniplot | /example/qaniplot.py | UTF-8 | 4,090 | 2.6875 | 3 | [
"MIT"
] | permissive | import sys
import math
import time
from PySide import QtCore, QtGui
sys.path.append('..')
from aniplot import AniplotWidget
class SignalGenerator(object):
''' This can be used for testing purposes '''
seed = 0
def __init__(self):
SignalGenerator.seed += 1
self.i = self.seed
def get(self):
if 1:
s = math.sin(time.time()*(self.i+1)*30+self.i*2) * 1.
s += math.sin(time.time()*(self.i+1)*32.3+self.i*2) * 2.
s += math.sin(time.time()*(self.i+1)*33.3+self.i*2) * 1.
s += math.sin(time.time()*(self.i+1)*55.3+self.i*2) * 1.
s += math.sin(time.time()*(self.i+1)*1.1+self.i*2) * 20.
s += math.sin(time.time()*(self.i+1)*1.3+self.i*2) * 20.
s += math.sin(time.time()*(self.i+1)*.2124+self.i*2) * 40.
s += math.sin(time.time()*(self.i+1)*.0824+self.i*2) * 40.
s += math.sin(time.time()*(self.i+1)*.0324+self.i*2) * 40.
s += 127.
else:
s = math.sin(time.time()*(self.i+1)*.5+self.i*2) * 133.
s += 127.
s = min(s, 255.)
s = max(s, 0.)
return s
if __name__ == '__main__':
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
# setup GUI
centralWidget = QtGui.QWidget()
self.setCentralWidget(centralWidget)
self.aniplot = AniplotWidget()
self.glWidgetArea = QtGui.QScrollArea()
self.glWidgetArea.setWidget(self.aniplot)
self.glWidgetArea.setWidgetResizable(True)
self.glWidgetArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
self.glWidgetArea.setMinimumSize(50, 50)
centralLayout = QtGui.QGridLayout()
centralLayout.addWidget(self.glWidgetArea, 0, 0)
centralWidget.setLayout(centralLayout)
self.setWindowTitle("QAniplotTest")
self.resize(400, 300)
# setup data source
self.source1 = SignalGenerator()
self.source2 = SignalGenerator()
# interestingly, every frequency larger than screen refresh rate results in incorrect speed and jerkyness.
self.ch1 = self.aniplot.create_channel(frequency=60, value_min=0., value_min_raw=0., value_max=5., value_max_raw=255., legend="fast data")
self.ch2 = self.aniplot.create_channel(frequency=5, value_min=0., value_min_raw=0., value_max=3.3, value_max_raw=255., legend="slow data", color=QtGui.QColor(0, 238, 0))
self.timer1 = QtCore.QTimer(self)
self.timer1.timeout.connect(self.timer1_fired)
self.timer2 = QtCore.QTimer(self)
self.timer2.timeout.connect(self.timer2_fired)
self.aniplot.start()
# NB! still NEVER use timers for this in real life. timers can skip updates, and
# the graphs will lose their sync and it could be invisible. always update the slowest
# graph at every tenth fastest graph update or something like that.. only the fastest
# graph can use timers.
self.timer1.start(1. / self.ch1.freq * 1000.)
self.timer2.start(1. / self.ch2.freq * 1000.)
def timer1_fired(self):
self.ch1.append(self.source1.get())
def timer2_fired(self):
self.ch2.append(self.source2.get())
app = QtGui.QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
if sys.platform == "darwin":
# this line is here because on macosx the main window always starts below the terminal that opened the app.
# the reason for getattr is that myapp.raise() caused a syntax error.
getattr(mainWin, "raise")()
sys.exit(app.exec_())
| true |
c455662616f193278a3f6551f0e41c64cbda83ef | Python | mskailash/myexercise | /myexercise/s_scan_duplicates.py | UTF-8 | 881 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python
#Description: This Program displays all the duplicate files in the given Directory
#Author: Kailash.M.S
#Date: April 2018
#Version: 1.0
__author__ = "M.S.Kailash"
import os, argparse
from m_duplicates_in_dir import duplicates_in_dir
# To Get the directory name from command line argument
# Check the Argument count and alert if it is not 2
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="Debugging Mode Turned on",action="store_true")
parser.add_argument("scan_directory", help="Scans the Directory and displays Redundant filenames")
arguments = parser.parse_args()
if arguments.debug: #Set Debug Options
import pdb
pdb.set_trace()
obj_scan_dir = duplicates_in_dir(os.path.realpath(arguments.scan_directory))
print "Number of files in the Directory: ", obj_scan_dir.file_count
obj_scan_dir.scan_duplicate_filenames()
| true |
bceb970f8a08c9c7f93df8850707aa0bacc270e1 | Python | shannon112/DLCVizsla | /hw3_dcgan_acgan_dann/gta/pre_dataset.py | UTF-8 | 779 | 2.796875 | 3 | [] | no_license | import torch.utils.data as data
from PIL import Image
import os
import glob
class GetLoader(data.Dataset):
def __init__(self, img_root, transform=None):
self.img_root = img_root
self.transform = transform
self.img_paths = sorted(glob.glob(os.path.join(img_root, '*.png')))
self.len = len(self.img_paths)
def __getitem__(self, item):
""" Get a sample from the dataset """
img_path = self.img_paths[item]
""" INPUT: image part """
img = Image.open(img_path).convert('RGB')
if self.transform is not None:
img = self.transform(img)
""" INPUT: image_name part """
img_fn = img_path.split('/')[-1]
return img, img_fn
def __len__(self):
return self.len
| true |
4790cdf79cee470128e7507da2c3253a4de041a9 | Python | Brian-Tomasik/python-utilities | /replace_Google_Docs_urls_with_redirects.py | UTF-8 | 1,796 | 3 | 3 | [] | no_license | import requests
from lxml.html import fromstring
import argparse
import re
parser = argparse.ArgumentParser(description='Replace Google-Docs urls with the urls they redirect to.')
parser.add_argument('infile', help='input HTML file')
parser.add_argument('outfile', help='output HTML file')
args = parser.parse_args()
n_urls = 0
with open(args.infile,'rb') as infile:
with open(args.outfile,'wb') as outfile:
for line in infile:
matches = re.findall("href=\"(https?://www\.google\.com/url\?q=[^\"]+)\"", line)
if matches:
for url in matches:
n_urls += 1
print "Original url:\n{}".format(url)
resp = requests.get(url)
redirected_url = re.search(">([^<]+)</a>", resp.text).group(1)
print "Redirected url:\n{}".format(redirected_url)
# Get the title of the page too
try: # Below lines are from http://stackoverflow.com/a/26812545/1290509
redirected_url_resp = requests.get(redirected_url)
tree = fromstring(redirected_url_resp.content)
title = tree.findtext('.//title').strip()
if "403" in title or "404" in title:
title = ""
else:
print "Title:\n{}".format(title)
except:
title = ""
replace_old_url_with_this = redirected_url + """" title="'{}'""".format(title)
line = line.replace(url, replace_old_url_with_this.encode('utf-8'))
print ""
outfile.write(line)
print "Found and replaced {} urls.".format(n_urls) | true |
9973427aee958e1ab5335097650b71c6e584ad4b | Python | sureshbvn/nlpProject | /nGramModel/evaluate.py | UTF-8 | 3,595 | 2.75 | 3 | [
"MIT"
] | permissive | from __future__ import division
import re
import numpy as np
import sklearn.metrics as grading_metrics
import utility as util
import string
fw=open("tempout.txt","w+")
def transform(line):
if len(line)==0:
return
if line[-1] is not '.' and line[-1] is not ',':
line = line + '.'
line += '$'
newline = re.sub("\, ",",",line)
newline = re.sub("\. ",".",newline)
newline = re.sub(" "," epsilon ",newline)
newline = re.sub("\. ",' .PERIOD ',newline)
newline = re.sub("\.\$"," .PERIOD ",newline)
newline = re.sub("\,",' ,COMMA ',newline)
fw.write(newline+"\n")
return newline+'\n'
def determine_class(word,punc):
class_label = 100
if util.isCaptilized(word) and punc=="epsilon":
class_label = 0
elif util.isCaptilized(word) and punc=='.PERIOD':
class_label = 1
elif util.isCaptilized(word) and punc==',COMMA':
class_label = 2
elif punc == '.PERIOD':
class_label = 3
elif punc == ',COMMA':
class_label = 4
else:
class_label = 5
return class_label
if __name__=="__main__":
string = open('uncleaned_test_data.txt').readlines()
new_string = []
for line in string:
str1=line[:-1]
if len(str1)>0:
newline = transform(str1)
new_string.append(newline)
string = open('output.txt','r')
final_actual_list= []
final_predicted_list = []
for line_actual in new_string:
line_predicted = string.readline()
if len(line_predicted)==1:
line_predicted = string.readline()
line_predicted = line_predicted[:-1]
actual_list = line_actual.split(" ")
actual_list = actual_list[:-1]
if len(line_predicted)<=0:
continue
predicted_list = line_predicted.split()
if len(actual_list)!=len(predicted_list):
continue
for i in xrange(0,len(actual_list),2):
word = actual_list[i]
actual_punc = actual_list[i+1]
predicted_punc = predicted_list[i+1]
actual_class = determine_class(word,actual_punc)
predicted_class = determine_class(word,predicted_punc)
final_actual_list.append(actual_class)
final_predicted_list.append(predicted_class)
new_accuracy = grading_metrics.accuracy_score(final_actual_list,final_predicted_list)
print "accuracy = "
print new_accuracy
#f1_score = grading_metrics.f1_score(actual,predicted,average=None)
f_score = grading_metrics.precision_recall_fscore_support(final_actual_list,final_predicted_list,average=None)
print("\nprecision :" )
class_index = 0
for class_precision in f_score[0]:
print("for class ")
print class_index+1
print class_precision
class_index+=1
print("\nrecall :" )
class_index = 0
for class_recall in f_score[1]:
print("for class "+str(class_index+1) + " "+ str(class_recall))
class_index+=1
print("\nf_measure :" )
class_index = 0
for class_f in f_score[2]:
print("for class "+str(class_index+1) + " "+ str(class_f))
class_index+=1
f1_score = grading_metrics.f1_score(final_actual_list,final_predicted_list,average=None)
print("\nf1_measure :" )
class_index = 0
for class_f1 in f1_score:
print("for class "+str(class_index+1) + " "+ str(class_f1))
class_index+=1 | true |
a78c924de8d8bc04d5bdb9143d911c9315de75d5 | Python | Thirumurugan-12/Python-programs-11th | /0 22 4444 666.py | UTF-8 | 99 | 3.375 | 3 | [] | no_license | #pgm 2
for r in range(0,4):
for c in range(0,r+1):
print(2*r,end=" ")
print()
| true |
40739eb8d05c77759155a46f9af55c354c9d8ea0 | Python | yangyangmei/fisher | /app/web/book.py | UTF-8 | 3,694 | 2.65625 | 3 | [] | no_license | """
created by yangyang on 2018/9/29.
"""
from flask import jsonify, request, render_template, flash
from app.libs.helper import is_isbn_or_key
from app.models.gift import Gift
from app.models.wish import Wish
from app.spider.yushu_book import YuShuBook
from app.view_models.trade import TradeViewModel
from . import web
from app.forms.book import SearchForm
from app.view_models.book import BookViewModel, BookCollection
from flask_login import current_user
import json
__author__ = "yangyang"
@web.route("/book/<isbn>/detail")
def book_detail(isbn):
has_in_wishes = False
has_in_gifts = False
if current_user.is_authenticated:
if Gift.query.filter_by(uid = current_user.id,isbn=isbn,
launched=False).first():
has_in_gifts = True
if Wish.query.filter_by(uid = current_user.id,isbn=isbn,
launched=False).first():
has_in_wishes = True
yushu_book = YuShuBook()
yushu_book.search_by_isbn(isbn)
book = BookViewModel(yushu_book.first)
trade_gifts = Gift.query.filter_by(isbn=isbn,launched=False).all()
trad_wishes = Wish.query.filter_by(isbn=isbn,launched=False).all()
trade_gifts_model = TradeViewModel(trade_gifts)
trade_wishes_model = TradeViewModel(trad_wishes)
return render_template('book_detail.html',book=book,
wishes=trade_wishes_model,gifts=trade_gifts_model,
has_in_gifts = has_in_gifts, has_in_wishes=has_in_wishes)
@web.route("/book/search") # 第三版
def search():
form = SearchForm(request.args)
books = BookCollection()
if form.validate():
q = form.q.data.strip()
page = form.page.data
isbn_or_key = is_isbn_or_key(q)
yushu_book = YuShuBook()
if isbn_or_key == "isbn":
yushu_book.search_by_isbn(q)
else:
yushu_book.search_by_keyword(q, page)
books.fill(q, yushu_book)
# return json.dumps(books, default= lambda book:book.__dict__) # json序列化,自定义default函数
else:
# return jsonify(form.errors)
flash("搜索的关键字不符合要求,请重新输入")
return render_template("search_result.html", books=books)
# @web.route("/book/search") # 改成?q=jin&page=1的形式 第二版
# def search():
# form = SearchForm(request.args)
# if form.validate():
# q = form.q.data.strip()
# page = form.page.data
# isbn_or_key = is_isbn_or_key(q)
# if isbn_or_key == "isbn":
# result = YuShuBook.search_by_isbn(q)
# result = BookViewModel.package_single(result, q)
# else:
# result = YuShuBook.search_by_keyword(q, page)
# result = BookViewModel.package_collection(result, q)
#
# return jsonify(result)
#
# else:
# return jsonify(form.errors)
# @web.route("/book/search/<q>/<page>") 第一版
# def search(q,page):
# ISBN 13 0-9数字组成
# ISBN 10个0-9的数字组成,包含一些"-"
# q = request.args["q"]
# page = request.args["page"]
# isbn_or_key = is_isbn_or_key(q)
# if isbn_or_key == "isbn":
# result = YuShuBook.search_by_isbn(q)
# else:
# result = YuShuBook.search_by_keyword(q)
#
# # return json. dumps(result) , 200, {"content-type":"application/json"}
# return jsonify(result)
# @web.route("/test")
# def test():
# data = {
# "name":"yy",
# "age":18
# }
# flash("hello flash ", category="error")
# flash("hello qiyue", category="warning")
#
#
# return render_template("test.html", data=data)
| true |
73ac6b557967e5a203b25518ffee52e2c8199989 | Python | pfuntner/toys | /bin/cols.py | UTF-8 | 2,690 | 3.046875 | 3 | [] | no_license | #! /usr/bin/env python3
"""
Print lines to identify the columns, keyed off the width of the screen. Useful to know how long lines are, what column a character/field is in, etc.
"""
import re
import sys
import math
import getopt
import subprocess
def syntax(msg=None):
if msg:
sys.stderr.write('{msg}\n'.format(**locals()))
sys.stderr.write('Syntax: {pgm} [-v|--verbose|-a|--all] [INT]\n'.format(pgm=sys.argv[0]))
exit(1)
def debug(msg, loud=False):
if verbose or loud:
sys.stderr.write('{msg}\n'.format(**locals()))
cols = 80
p = subprocess.Popen(["stty", "size"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
rc = p.wait()
verbose = False
(opts,args) = ([],[])
try:
(opts,args) = getopt.getopt(sys.argv[1:], 'va', ['verbose', 'all'])
except Exception as e:
syntax('Caught: {e!s}'.format(**locals()))
for (opt,arg) in opts:
if opt in ['-v', '--verbose', '-a', '--all']:
verbose = not verbose
else:
syntax('Unexpected option: {opt!r}'.format(**locals()))
if len(args) == 1:
cols = int(args[0])
assert cols > 0
debug('Overriding columns with {cols}'.format(**locals()))
elif len(args) > 1:
syntax('Unexpected arguments: {remain}'.format(remain=args[1:]))
else:
if stdout and (not stderr) and (rc == 0):
match = re.search('\d+\s+(\d+)', str(stdout))
if stdout:
cols = int(match.group(1))
debug('cols={cols}, `stty size` returned {rc}, {stdout!r}, {stderr!r}'.format(**locals()))
else:
debug('defaulting to cols={cols} because `stty size` could not be parsed: {stdout!r}\n'.format(**locals()), loud=True)
elif not all:
debug('defaulting to cols={cols} because `stty size` returned {rc}, {stdout!r}, {stderr!r}\n'.format(**locals()), loud=True)
digits = math.log10(cols)
debug('cols={cols}, digits={digits}, int(digits)={int_digits}'.format(int_digits=int(digits), **locals()))
if digits == int(digits):
debug('power of 10!')
digits = int(digits)+1
else:
digits = int(math.ceil(digits))
debug('digits: {digits}'.format(**locals()))
"""
if digits >= 4:
print(''.join([str(num)*1000 for num in range(10)] * int(math.ceil(cols/10000+1)))[1:cols+1])
if digits >= 3:
print(''.join([str(num)*100 for num in range(10)] * int(math.ceil(cols/1000+1)))[1:cols+1])
if digits >= 2:
print(''.join([str(num)*10 for num in range(10)] * int(math.ceil(cols/100+1)))[1:cols+1])
print ('1234567890' * int(math.ceil(cols/10+1)))[:cols]
"""
for digit in range(digits, 0, -1):
print(''.join([str(num)*int(math.pow(10, digit-1)) for num in range(10)] * int(math.ceil(cols/math.pow(10, digit)+1)))[1:cols+1])
| true |
86d8d21ec522f7ea0df2e6d4e360d90af1a243a7 | Python | TarasRudnyk/Students_health_records | /data_processing.py | UTF-8 | 11,273 | 2.6875 | 3 | [] | no_license | import cx_Oracle
def get_configuration():
with open("config", encoding='utf-8') as config_file:
parameters = {}
for line in config_file:
parameter, value = line.split(": ")
parameter = parameter.rstrip()
value = value.strip()
parameters[parameter] = value
info = "{}/{}@{}/{}".format(parameters["name"],
parameters["password"],
parameters["server and port"],
parameters["database service"])
print("Server started with parameters:", info)
return info
info = get_configuration()
con = cx_Oracle.connect(info)
cur = con.cursor()
def authorize_user(login, password):
global con
global cur
authorize_result = {"success": False,
"role": 'user'}
cur.execute('SELECT user_login, user_password, user_role FROM users')
for result in cur:
if login == result[0] and password == result[1]:
user_role = result[2]
authorize_result["success"] = True
authorize_result["role"] = user_role
return authorize_result
def get_user_diagnoses(login):
global con
global cur
diagnoses_result = {"success": True,
"diagnose_name": '',
"diagnose_date": '',
"diagnose_doctor": '',
"diagnose_time": ''}
diagnose_name = []
diagnose_date = []
diagnose_time = []
diagnose_doctor = []
diagnose_number = []
cur.execute('SELECT user_card_number FROM users WHERE user_login = \'{0}\''.format(login))
for result_card_number in cur:
user_card_number = result_card_number[0]
try:
cur.execute('SELECT diagnose_number FROM MEDICALCARD WHERE user_card_number = \'{0}\''.format(user_card_number))
for result_diagnose_number in cur:
diagnose_number.append(result_diagnose_number[0])
except:
pass
if len(diagnose_number) > 1:
diagnose_number_tuple = tuple(diagnose_number)
elif len(diagnose_number) == 1:
diagnose_number_tuple = diagnose_number[0]
else:
diagnose_number_tuple = 0
cur.execute('SELECT disease_name, diagnose_date, diagnose_doctor, diagnose_time FROM DIAGNOSES WHERE diagnose_number IN {0}'.format(diagnose_number_tuple))
for result_diagnose in cur:
diagnose_name.append(result_diagnose[0])
diagnose_date.append(str(result_diagnose[1].strftime('%d-%b-%Y')))
diagnose_doctor.append(result_diagnose[2])
diagnose_time.append(result_diagnose[3])
diagnoses_result["diagnose_name"] = diagnose_name
diagnoses_result["diagnose_date"] = diagnose_date
diagnoses_result["diagnose_doctor"] = diagnose_doctor
diagnoses_result["diagnose_time"] = diagnose_time
return diagnoses_result
def get_all_users():
global con
global cur
users_result = {"success": True,
"users_card_numbers": '',
"users_full_names": '',
"users_groups": ''}
users_cards_numbers = []
users_full_names = []
users_groups = []
cur.execute('SELECT user_card_number, user_full_name, user_group FROM users WHERE user_role != \'admin\'')
for result in cur:
users_cards_numbers.append(result[0])
users_full_names.append(result[1])
users_groups.append(result[2])
users_result["users_card_numbers"] = users_cards_numbers
users_result["users_full_names"] = users_full_names
users_result["users_groups"] = users_groups
return users_result
def add_new_user(add_users_result):
result = {
"success": True
}
global con
global cur
add_users_result["user_full_name"] = add_users_result["user_full_name"][0] + " " + add_users_result["user_full_name"][1]
try:
cur.execute('set transaction isolation level serializable')
cur.execute('INSERT INTO users (USER_CARD_NUMBER, USER_LOGIN, USER_PASSWORD, USER_FULL_NAME,'
'USER_PHONE_NUMBER, USER_GROUP, USER_EMAIL, USER_ROLE)'
'VALUES (\'{0}\',\'{1}\', \'{2}\', \'{3}\', \'{4}\', \'{5}\', \'{6}\', \'{7}\')'.format(
add_users_result['user_card_number'],
add_users_result['username'],
add_users_result['password'],
add_users_result['user_full_name'],
add_users_result['user_phone_number'],
add_users_result['user_group'],
add_users_result['user_email'],
add_users_result['user_role']))
con.commit()
except:
result["success"] = False
con.rollback()
return result
def edit_user_info_select_data(user_card_number):
global con
global cur
user_data = {'success': True,
'user_full_name': '',
'user_group': '',
'user_email': '',
'user_phone_number': ''}
try:
cur.execute('SELECT user_full_name, user_group, user_email, user_phone_number '
'FROM users WHERE user_card_number =\'{0}\''.format(user_card_number))
for result_user_data in cur:
user_data['user_full_name'] = result_user_data[0]
user_data['user_group'] = result_user_data[1]
user_data['user_email'] = result_user_data[2]
user_data['user_phone_number'] = result_user_data[3]
except:
user_data["success"] = False
return user_data
def edit_user_info_select_diagnoses(user_card_number):
global con
global cur
result = {
"success": True
}
user_diagnose_numbers = []
user_diagnose_names = []
user_diagnose_dates = []
user_diagnose_time = []
# Selecting diagnose_numbers to get all users diagnoses
cur.execute('SELECT diagnose_number FROM MEDICALCARD'
' WHERE user_card_number = \'{0}\''.format(user_card_number))
for result_user_diagnose_number in cur:
user_diagnose_numbers.append(result_user_diagnose_number[0])
if len(user_diagnose_numbers) > 1:
user_diagnose_numbers_tuple = tuple(user_diagnose_numbers)
elif len(user_diagnose_numbers) == 1:
user_diagnose_numbers_tuple = user_diagnose_numbers[0]
else:
user_diagnose_numbers_tuple = 0
# Selecting users diagnoses
try:
cur.execute('SELECT disease_name, diagnose_date, diagnose_time FROM DIAGNOSES '
'WHERE diagnose_number IN {0}'.format(user_diagnose_numbers_tuple))
except:
result["success"] = False
for result_diagnose in cur:
user_diagnose_names.append(result_diagnose[0])
user_diagnose_dates.append(str(result_diagnose[1].strftime('%d-%b-%Y')))
user_diagnose_time.append(str(result_diagnose[2]))
result["diagnoses"] = user_diagnose_names
result["dates"] = user_diagnose_dates
result["times"] = user_diagnose_time
return result
def edit_user_info_update_data(user_edited_data):
global con
global cur
result = {
"success": True
}
if user_edited_data["user_phone_number"] == 'None':
user_edited_data["user_phone_number"] = ""
try:
cur.execute('set transaction isolation level serializable')
cur.execute('UPDATE users '
'SET user_full_name = \'{0}\','
'user_group = \'{1}\','
'user_email = \'{2}\','
'user_phone_number = \'{3}\' '
'WHERE user_card_number = \'{4}\''.format(user_edited_data['user_full_name'],
user_edited_data['user_group'],
user_edited_data['user_email'],
user_edited_data['user_phone_number'],
user_edited_data['user_card_number']))
con.commit()
except:
result["success"] = False
con.rollback()
return result
def edit_user_select_all_diseases():
global con
global cur
disease_names = []
result = {
"success": True
}
cur.execute('SELECT disease_name FROM diseases ')
for result_diseases in cur:
disease_names.append(result_diseases[0])
result["diseases"] = disease_names
return result
def edit_user_info_add_diagnose(diagnose_data, card_number):
global con
global cur
result = {
"success": True
}
try:
# print(diagnose_data['diagnose_number'])
cur.execute('set transaction isolation level serializable')
cur.execute('INSERT INTO DIAGNOSES (DIAGNOSE_NUMBER, DISEASE_NAME, DIAGNOSE_DATE, DIAGNOSE_DOCTOR, DIAGNOSE_TIME) '
'VALUES (\'{0}\',\'{1}\', \'{2}\', \'{3}\', \'{4}\')'.format(
diagnose_data['diagnose_number'],
diagnose_data['disease_name'],
diagnose_data['diagnose_date'],
diagnose_data['diagnose_doctor'],
diagnose_data['diagnose_time']))
cur.execute('INSERT INTO MEDICALCARD (DIAGNOSE_NUMBER, USER_CARD_NUMBER) '
'VALUES (\'{0}\', \'{1}\')'.format(diagnose_data['diagnose_number'], card_number))
con.commit()
except:
result["success"] = False
con.rollback()
return result
def delete_selected_users(user_card_number):
global con
global cur
result = {
"success": True
}
try:
cur.execute('set transaction isolation level serializable')
cur.execute('DELETE FROM MEDICALCARD '
'WHERE user_card_number = {0}'.format(user_card_number))
cur.execute('DELETE FROM users '
'WHERE user_card_number = {0}'.format(user_card_number))
con.commit()
except:
result["success"] = False
con.rollback()
return result
def get_diagnose_number():
global con
global cur
# con = cx_Oracle.connect('taras/orcl@localhost/orcl')
# cur = con.cursor()
result = {
"success": True
}
diagnose_number = 1
cur.execute('SELECT MAX(diagnose_number) FROM DIAGNOSES')
for result_diagnose_number in cur:
diagnose_number = result_diagnose_number[0]
result["count"] = diagnose_number
return result
| true |
8fc3f99def3895cf6849f21996a81100f5082ca5 | Python | jakejg/WTforms-adoption | /forms.py | UTF-8 | 1,135 | 2.84375 | 3 | [] | no_license | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SelectField, BooleanField
from wtforms.validators import InputRequired, URL, AnyOf, NumberRange, Optional
class AddPet(FlaskForm):
name = StringField("Name of Pet", validators=[InputRequired()])
species = StringField("Type of Animal", validators=[InputRequired(), AnyOf(["Cat", "Dog"], message="You must pick either Cat, Dog, or Porcupine")])
photo_url = StringField("Picture of Pet (URL)", validators=[Optional(), URL()])
age = IntegerField("Age of the Pet", validators=[InputRequired(), NumberRange(min=0, max=30, message="Is your pet really over 30?")])
notes = StringField("Any Notes About the Pet")
class EditPet(FlaskForm):
name = StringField("Name of Pet", validators=[InputRequired()])
photo_url = StringField("Picture of Pet (URL)", validators=[Optional(), URL()])
age = IntegerField("Age of the Pet", validators=[InputRequired(), NumberRange(min=0, max=30, message="Is your pet really over 30?")])
notes = StringField("Any Notes About the Pet")
available = BooleanField("Check Box if Still Available")
| true |
22370cf8fa86a6bea03aff090c9113d300be18a9 | Python | lpk-py/pymodes | /pymodes/tests/test_eigenfrequencies.py | UTF-8 | 6,999 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Tests for the eigenfrequency rootfinding functions.
:copyright:
Martin van Driel (Martin@vanDriel.de), 2016
:license:
None
'''
import inspect
import numpy as np
import os
import pymesher
from .. import eigenfrequencies
# Most generic way to get the data directory.
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(
inspect.currentframe()))), "data")
def test_analytical_eigen_frequencies():
freq = eigenfrequencies.analytical_eigen_frequencies(
omega_max=0.01, omega_delta=0.00001, l=10, rho=1e3, vs=1e3, vp=1.7e3,
R=6371e3, mode='T')
freq_ref = np.array([0.00185089, 0.00264984, 0.00325404, 0.00381579,
0.00435724, 0.00488677, 0.00540855, 0.00592491,
0.00643731, 0.00694673, 0.00745382, 0.00795906,
0.00846281, 0.00896532, 0.00946681, 0.00996743])
np.testing.assert_allclose(freq, freq_ref, atol=1e-8)
freq = eigenfrequencies.analytical_eigen_frequencies(
omega_max=0.01, omega_delta=0.00001, l=10, rho=1e3, vs=1e3, vp=1.7e3,
R=6371e3, mode='S')
freq_ref = np.array([0.00261509, 0.00304317, 0.00341088, 0.00389115,
0.00417137, 0.00456348, 0.00497069, 0.00519278,
0.00562593, 0.00597034, 0.00619746, 0.00665583,
0.00692063, 0.0072023, 0.00766575, 0.00784553,
0.0082065, 0.00864818, 0.00877123, 0.00920807,
0.00957729, 0.00973326])
np.testing.assert_allclose(freq, freq_ref, atol=1e-8)
# test values from Dahlen & Tromp, section 8.7.4, for l = 1
freq = eigenfrequencies.analytical_eigen_frequencies(
omega_max=0.002, omega_delta=0.00001, l=1, rho=1e3, vs=1e3, vp=1.7e3,
R=6371e3, mode='T')
np.testing.assert_allclose(freq / 1e3 * 6371e3, [5.76, 9.10, 12.32],
atol=1e-2)
def test_analytical_eigen_frequencies_catalogue():
cat = eigenfrequencies.analytical_eigen_frequencies_catalogue(
omega_max=0.01, omega_delta=0.00001, lmax=10, rho=1e3, vs=1e3,
vp=1.7e3, R=6371e3, mode='T')
cat_ref = np.array([0., 0.00590478, 0.00165038, 0.00713279, 0.00288998,
0.00835924, 0.00411931, 0.00958474, 0.00534555, np.nan,
0.00657045, 0.00209772, 0.00779465, 0.00341957,
0.00901844, 0.00468094, np.nan, 0.00592491, np.nan])
np.testing.assert_allclose(cat.flatten()[::11], cat_ref, atol=1e-8)
def test_integrate_eigen_frequencies():
# compare to analytical solution
l = 5
rho = 1e3
vs = 1e3
vp = 1.7e3
R = 6371e3
omega_max = 0.01
# TOROIDAL
freq_ref = eigenfrequencies.analytical_eigen_frequencies(
omega_max=omega_max, omega_delta=0.00001, l=l, rho=rho, vs=vs, vp=vp,
R=6371e3, mode='T')
freq = eigenfrequencies.integrate_eigen_frequencies(
omega_max, l, rho=rho, vs=vs, vp=vp, R=R, mode='T', nsamp_per_layer=10,
integrator_rtol=1e-8, rootfinder_tol=1e-8)
np.testing.assert_allclose(freq, freq_ref, atol=1e-8)
# CI
model = pymesher.model.built_in('prem_iso')
freq = eigenfrequencies.integrate_eigen_frequencies(
omega_max=0.04, l=20, model=model, mode='T', nsamp_per_layer=10,
integrator_rtol=1e-7, rootfinder_tol=1e-6)
freq_ref = np.array([0.0175524, 0.02627687, 0.03191921, 0.03658574])
np.testing.assert_allclose(freq, freq_ref, atol=1e-6)
# SPHEROIDAL
freq_ref = eigenfrequencies.analytical_eigen_frequencies(
omega_max=omega_max, omega_delta=0.00001, l=l, rho=rho, vs=vs, vp=vp,
R=6371e3, mode='S', gravity=False)
freq = eigenfrequencies.integrate_eigen_frequencies(
omega_max, l, rho=rho, vs=vs, vp=vp, R=R, mode='S', nsamp_per_layer=10,
integrator_rtol=1e-8, rootfinder_tol=1e-8, gravity=False)
np.testing.assert_allclose(freq, freq_ref, atol=1e-8)
# CI
model = pymesher.model.read(os.path.join(DATA_DIR, 'prem_iso_solid.bm'))
freq = eigenfrequencies.integrate_eigen_frequencies(
omega_max=0.04, l=20, model=model, mode='S', nsamp_per_layer=10,
integrator_rtol=1e-7, rootfinder_tol=1e-6, gravity=False)
freq_ref = np.array([0.0182855, 0.02485917, 0.02911758, 0.03383443,
0.03860865])
np.testing.assert_allclose(freq, freq_ref, atol=1e-6)
def test_integrate_eigen_frequencies_catalogue():
# compare to analytical solution
lmax = 3
rho = 1e3
vs = 1e3
vp = 1.7e3
R = 6371e3
omega_delta = 0.00001
# TOROIDAL
omega_max = 0.005
model = pymesher.model.read(os.path.join(DATA_DIR, 'homo_model.bm'))
ref_cat_t = eigenfrequencies.analytical_eigen_frequencies_catalogue(
omega_max, omega_delta, lmax, rho, vs, vp, R, mode='T')
cat_t = eigenfrequencies.integrate_eigen_frequencies_catalogue(
omega_max, lmax, model=model, nsamp_per_layer=10, integrator_rtol=1e-6,
rootfinder_tol=1e-6)
np.testing.assert_allclose(ref_cat_t, cat_t, atol=1e-6)
# CI
omega_max = 0.005 * 2 * np.pi
model = pymesher.model.built_in('prem_iso')
cat_t = eigenfrequencies.integrate_eigen_frequencies_catalogue(
omega_max, lmax, model=model, nsamp_per_layer=10,
integrator_rtol=1e-6, rootfinder_tol=1e-6)
ref_cat_t = np.array([0., 0.00782317, 0.01386107, 0.020281, 0.02723831,
0.00240334, 0.00835497, 0.01413261, 0.02047369,
0.02737895, 0.00371385, 0.00910818, 0.01453733,
0.02076128, 0.02758907])
np.testing.assert_allclose(ref_cat_t, cat_t.flatten(), atol=1e-7)
# SPHEROIDAL
omega_max = 0.002
model = pymesher.model.read(os.path.join(DATA_DIR, 'homo_model.bm'))
ref_cat_s = eigenfrequencies.analytical_eigen_frequencies_catalogue(
omega_max, omega_delta, lmax, rho, vs, vp, R, mode='S', gravity=False)
cat_s = eigenfrequencies.integrate_eigen_frequencies_catalogue(
omega_max, lmax, model=model, nsamp_per_layer=10, integrator_rtol=1e-6,
rootfinder_tol=1e-6, mode='S', gravity=False)
# remove zero from analytical catalogue
ref_cat_s[0, :-1] = ref_cat_s[0, 1:]
ref_cat_s = ref_cat_s[:, :-1]
np.testing.assert_allclose(ref_cat_s, cat_s, atol=1e-6)
# CI
omega_max = 0.01
model = pymesher.model.read(os.path.join(DATA_DIR, 'prem_iso_solid.bm'))
cat_s = eigenfrequencies.integrate_eigen_frequencies_catalogue(
omega_max, lmax, model=model, nsamp_per_layer=10,
integrator_rtol=1e-6, rootfinder_tol=1e-6, mode='S', gravity=False)
ref_cat_s = np.array([0.00242908, 0.00516044, 0.00807586, np.nan,
0.00378237, 0.00462673, 0.00738657, 0.00874269,
0.00524572, 0.00627948, 0.00929735, np.nan])
np.testing.assert_allclose(ref_cat_s, cat_s.flatten(), atol=1e-7)
| true |
7397d7ad5d5b00498bc67a3a10338a99e13a4359 | Python | Joserra13/TFG | /Web App IoT/encender.py | UTF-8 | 202 | 3.078125 | 3 | [] | no_license | import serial
arduino = serial.Serial('/dev/ttyACM0', 9600)
comando = 'H' #Input
arduino.write(comando) #Send the command to Arduino
print("LED ON")
arduino.close() #End the communication | true |
c0bdebdddc273da113c0ae4d5901dcc71bbd95d6 | Python | redvasily/lighttpdrecipe | /lighttpdrecipe/recipe.py | UTF-8 | 2,402 | 2.546875 | 3 | [
"BSD-3-Clause"
] | permissive | import re
import os
from os.path import join, dirname, abspath
import logging
import zc.buildout
import buildoutjinja
hostname_regexp = re.compile(r'^[-a-z\.0-9]*$', re.I)
def is_simple_host(s):
return not ((len(s.splitlines()) > 1) or (not hostname_regexp.match(s)))
def is_true(s):
if s.lower() in set(['yes', 'y', 'true', 'enable', 'enabled']):
return True
return False
class Lighttpd:
def __init__(self, buildout, name, options):
self.name, self.options = name, options
self.logger = logging.getLogger(name)
self.options = options
if 'host' not in options:
msg = "Required option 'host' is not specified."
self.logger.error(msg)
raise zc.buildout.UserError(msg)
redirect_to = options['host'].splitlines()[0].strip()
if ('redirect_to' not in options and 'redirect_from' in options and
not is_simple_host(redirect_to)):
msg = ("Redirect location looks like a regexp. Please specify"
" redirect destination with 'redirect_to' option")
self.logger.error(msg)
raise zc.buildout.UserError(msg)
default_options = {
'priority': '11',
'config_name': options.get('redirect_to', redirect_to),
'redirect_to': redirect_to,
}
for key, value in default_options.iteritems():
if key not in options:
options[key] = value
options['config_file'] = (options['priority'] + '-' +
options['config_name'] + '.conf')
def host_regexp(h):
return ('|'.join('(%s)' % h for h in h.split()))
template_name = options.get('template', 'djangorecipe_fcgi.jinja')
template_search_paths = [
dirname(abspath(__file__)),
buildout['buildout']['directory'],
]
self.result = buildoutjinja.render_template(
template_search_paths,
template_name,
buildout,
options,
tests={
'simple_host': is_simple_host,
'true': is_true,
},
)
def install(self):
open(self.options['config_file'], 'w').write(self.result)
self.options.created(self.options['config_file'])
return self.options.created()
def update(self):
return self.install()
| true |
2e74f7a6ca18020b944bc583373c142c747c8c24 | Python | leequant761/Fluent-python | /02-array-seq/bisect_demo.py | UTF-8 | 1,373 | 3.609375 | 4 | [
"MIT"
] | permissive | # BEGIN BISECT_DEMO
import bisect
import sys
HAYSTACK = [1, 4, 5, 6, 8, 12, 15, 20, 21, 23, 23, 26, 29, 30] # 정렬된 시퀀스에
NEEDLES = [0, 1, 2, 5, 8, 10, 22, 23, 29, 30, 31] # 정렬을 유지한 채 니들 추가하고 싶다.
ROW_FMT = '{0:2d} @ {1:2d} {2}{0:<2d}'
def demo(bisect_fn):
for needle in reversed(NEEDLES):
position = bisect_fn(HAYSTACK, needle) # <1> 삽입 위치를 찾기
offset = position * ' |' # <2>
print(ROW_FMT.format(needle, position, offset)) # <3>
if __name__ == '__main__':
if sys.argv[-1] == 'left': # <4> 만약 명령행 인수에 left를 넣고 실행하면
bisect_fn = bisect.bisect_left # tie가 발생했을 때 왼쪽에 삽임
else:
bisect_fn = bisect.bisect
print('DEMO:', bisect_fn.__name__) # <5> 선택된 함수명
print('haystack ->', ' '.join('%2d' % n for n in HAYSTACK))
demo(bisect_fn)
# 예시 : 시험 점수를 입력받아 등급문자를 반환하는 grade함수
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect.bisect(breakpoints, score)
return grades[i]
print([grade(score) for score in [33, 99, 77, 89, 90, 100]])
# bisect는 정렬된 긴 숫자 시퀀스를 검색할 때 index보다 더 빠르다.
# lo, hi를 설정해서 검색 범위를 설정할 수도 있다. | true |
6d873405e6bb2d7602b29ae94d80f34dc982cf17 | Python | AlfredZuo/PythonTest | /myTest.01/leet_code_94_二叉树的中序遍历_DFS.py | UTF-8 | 1,053 | 4.03125 | 4 | [] | no_license | '''
94. 二叉树的中序遍历 DFS
给定一个二叉树的根节点 root ,返回 它的 中序 遍历 。
示例 1:
输入:root = [1,null,2,3]
输出:[1,3,2]
示例 2:
输入:root = []
输出:[]
示例 3:
输入:root = [1]
输出:[1]
提示:
树中节点数目在范围 [0, 100] 内
-100 <= Node.val <= 100
'''
# 节点类
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
def dfs(node: TreeNode, rlist: []):
if node is not None:
dfs(node.left, rlist)
rlist.append(node.val)
dfs(node.right, rlist)
class Solution:
def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
result = []
dfs(root, result)
print(result)
return result
| true |
04ecc97fe9cdb5e928c7cc069a8d11e183776f52 | Python | alltej/kb-python | /tests/coding_challenge/test_working_hours.py | UTF-8 | 1,458 | 3.046875 | 3 | [] | no_license | from nose.tools import assert_equal
import working_hours
class TestWorkingHours(object):
def is_working_hours_func(self, func):
assert_equal(func(9), True)
assert_equal(func(11), True)
assert_equal(func(13), True)
assert_equal(func(15), True)
assert_equal(func(18), True)
assert_equal(func(8), False)
assert_equal(func(20), False)
assert_equal(func(23), False)
assert_equal(func(0), False)
assert_equal(func(1), False)
assert_equal(func(3), False)
assert_equal(func(5), False)
print('Success: test_working_hours')
def is_non_working_hours_func(self, func):
assert_equal(func(9), False)
assert_equal(func(11), False)
assert_equal(func(13), False)
assert_equal(func(15), False)
assert_equal(func(18), False)
assert_equal(func(0), True)
assert_equal(func(1), True)
assert_equal(func(3), True)
assert_equal(func(5), True)
assert_equal(func(8), True)
assert_equal(func(20), True)
assert_equal(func(23), True)
print('Success: test_non_working_hours')
def main():
test = TestWorkingHours()
try:
wh = working_hours.WorkingHours()
test.is_working_hours_func(wh.is_working_hours)
test.is_non_working_hours_func(wh.is_non_working_hours)
except NameError:
pass
if __name__ == '__main__':
main()
| true |
6ee9d78a9573f99a984f0ff9d524fead5f5278d7 | Python | ConfickerVik/home_work | /laba13/mission13_2/CreateXML.py | UTF-8 | 1,226 | 2.75 | 3 | [] | no_license | from xml.dom import minidom
class CreateXml:
def create_xml(self, mas):
doc = minidom.Document()
# Создание основного тега 'soap:Envelope'
root = doc.createElement('soap:Envelope')
root.setAttribute('xmlns:soap', 'http://example.schemas.xmlsoap.org/soap/envelope/')
doc.appendChild(root)
# Создание тега 'Body'
Body = doc.createElement('soap:Body')
root.appendChild(Body)
# Создание подтега в тег 'productID'
productID = doc.createElement(mas[0])
d = doc.createTextNode('12345')
productID.appendChild(d)
Body.appendChild(productID)
# Создание подтега в тег 'Year'
Year = doc.createElement(mas[1])
w = doc.createTextNode('2019')
Year.appendChild(w)
Body.appendChild(Year)
# Создание подтега в тег 'Month'
Month = doc.createElement(mas[2])
e = doc.createTextNode('July')
Month.appendChild(e)
Body.appendChild(Month)
xml_str = doc.toprettyxml(indent=" ")
with open("created_xml.xml", "w") as f:
f.write(xml_str)
| true |
c80b26a41d86ec4f2f702aab0922b86eec368e84 | Python | Brucehanyf/python_tutorial | /file_and_exception/file_reader.py | UTF-8 | 917 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | # 读取圆周率
# 读取整个文件
# with open('pi_digits.txt') as file_object:
# contents = file_object.read()
# print(contents)
# file_path = 'pi_digits.txt';
# \f要转义
# 按行读取
file_path = "D:\PycharmProjects\practise\\file_and_exception\pi_digits.txt";
# with open(file_path) as file_object:
# for line in file_object:
# print(line)
# file_object.readlines()
# with open(file_path) as file_object:
# lines = file_object.readlines()
# for line in lines:
# print(line)
# 使用文件中的内容
with open(file_path) as file_object:
lines = file_object.readlines()
result = '';
for line in lines:
result += line.strip()
print(result)
print(result[:10]+'......')
print(len(result))
birthday = input('请输入您的生日')
if birthday in result:
print("your birthday appears in pai digits")
else:
print("your birthday does not appears in pai digits")
| true |
a52e403dc724e2ace4d4a45c4158425487f7bfe3 | Python | blengerich/Personalized_Regression_ISMB18 | /distance_matching.py | UTF-8 | 18,791 | 2.859375 | 3 | [
"MIT"
] | permissive | # Personalized Regression with Distance Matching Regularization
import numpy as np
np.set_printoptions(precision=4)
import time
from utils import *
from sklearn.preprocessing import normalize
from multiprocessing.pool import ThreadPool
class DistanceMatching():
def __init__(self, init_beta,
f, f_prime,
gamma, n_neighbors, calc_closest_every,
rho_beta, rho_beta_prime,
init_phi_beta, psi_beta, psi_beta_prime,
init_phi_u, psi_u, psi_u_prime,
init_beta_scale, psi_beta_scale, psi_beta_scale_prime,
intercept, log_dir="./logs", n_threads=1):
"""
Create a new DistanceMatching object.
Arguments
==========
init_beta: numpy array of the initial model parameters. Should be of size N x P.
f : Python function for error of prediction error.
Should take X^{(i)}, Y^{(i)}, beta^{(i)} and return a non-negative real value.
f_prime : Python function for sub-gradient of prediction error.
Should take X^{(i)}, Y^{(i)}, beta^{(i)} and return a sub-gradient vector of size P.
gamma : Hyperparameter for DMR strength.
n_neighbors : Integer number of neighbors for each point.
calc_closest_every: Integer number of iterations for which to re-calculate neighbors.
Currently, neighbors are random so they should be computed relatively frequently.
rho_beta : Python function for regularization of beta.
Should take beta^{(i)} and return a non-negative real value.
rho_beta_prime : Python function for sub-gradient of beta regularization.
Should take beta^{(i)} and return a sub-gradient vector of size P.
init_phi_beta : numpy array of the initial phi_beta vector. Should be of size P.
psi_beta : Python function for regularization on phi_beta.
Should take phi_beta and return a non-negative real value.
psi_beta_prime : Python function for sub-gradient of phi_beta regularization.
Should take phi_beta and return a sub-gradient vector of size P.
init_phi_u : numpy array of the initial phi_u vector. Should be of size K.
psi_u : Python function for regularization on phi_u.
Should take phi_u and return a non-negative real value.
psi_u_prime : Python function for sub-gradient of regularization of phi_u.
Should take phi_u and return a sub-gradient vector of size K.
init_beta_scale : Positive hyperparameter for the amount of personalization.
Lower implies more personalization, as described in the paper.
psi_beta_scale : Python function for regularization on beta_scale.
Should take a postiive real value and return a non-negative real value.
psi_beta_scale_prime: Python function for sub-gradient of beta scale regularization.
Should take a positivie real value and return a sub-gradient.
intercept : Boolean, whether to fit an intercept term.
log_dir : string, directory to save output.
n_threads : integer, max number of threads to use for multiprocessing.
Returns
==========
None
"""
self.init_beta = init_beta
self.f = f
self.f_prime = f_prime
self.gamma = gamma
self.n_neighbors = n_neighbors
self.calc_closest_every = calc_closest_every
self.rho_beta = rho_beta
self.rho_beta_prime = rho_beta_prime
self.init_phi_beta = init_phi_beta
self.psi_beta = psi_beta
self.psi_beta_prime = psi_beta_prime
self.psi_beta_scale = psi_beta_scale
self.psi_beta_scale_prime = psi_beta_scale_prime
self.init_phi_u = init_phi_u
self.psi_u = psi_u
self.psi_u_prime = psi_u_prime
self.init_beta_scale = init_beta_scale
self.intercept = intercept
self.log_dir = log_dir
self.n_threads = n_threads
if self.n_threads > 0:
self.pool = ThreadPool(processes=self.n_threads)
self.map = self.pool.map
else:
self.pool = None
self.map = lambda x, y: list(map(x, y))
def _check_shapes(self, X, Y, U=None, dU=None, delta_U=None):
""" Does some basic checks on the shapes on the parameters. """
N = X.shape[0]
P = X.shape[1]
if U:
assert(U.shape[0] == N)
K = U.shape[1]
if dU:
K = len(dU)
if delta_U:
assert(delta_U.shape[0] == N)
assert(delta_U.shape[1] == N)
K = delta_U.shape[2]
return N, P, K
def make_covariate_distances(self, U, dU, K, N, should_normalize=True, verbose=True):
""" Make fixed pairwise distance matrix for co-variates. """
t = time.time()
if verbose:
print("Making Co-Variate Distance Matrix of Size {}x{}x{}".format(N, N, K))
D = np.zeros((N, N, K))
get_dist = lambda i, j: np.array([dU[k](U[i, k], U[j, k]) for k in range(K)], dtype="float32")
for i in range(1, N):
if verbose:
print("{}\t/{}".format(i, N), end='\r')
D[i, 0:i, :] = self.map(lambda j: get_dist(i, j), range(i))
for i in range(1, N):
for j in range(i):
D[j, i, :] = D[i, j, :] # could cut memory in half by only storing lists.
if verbose:
print("Finished making unnormalized version.")
if should_normalize:
normalized = np.array([normalize(D[:, :, k]) for k in range(K)])
# Now the first axis references k. Move it to the back.
normalized = np.swapaxes(normalized, 0, 1)
D = np.swapaxes(normalized, 1, 2)
if verbose:
print("Finished normalizing.")
print("Took {:.3f} seconds.".format(time.time() - t))
return D
def make_covariate_distance_function(self, U, dU, K):
""" If N is large, it is more effecient to compute the covariate distances lazily. """
func = lambda i,j: np.array([dU[k](U[i,k], U[j,k]) for k in range(K)])
return func
def _calc_personalized_reg_grad(self, phi_beta, phi_u, beta_hat, beta_scale,
dist_errors, N, delta_U, delta_beta, closest):
""" Calculates the gradients for the distance matching regularization.
Arguments
==========
phi_beta : numpy vector, current estimate of phi_beta
phi_u : numpy vector, current estimate of phi_u
beta_hat : numpy matrix, current estimate of beta_hat
beta_scale : float, current estimate of beta_scale
dist_errors : list of lists of errrors.
N : integer number of samples.
delta_U : numpy matrix, static pairwise distance matrix.
delta_beta : Python function which calculates pairwise model distances.
closest : list of lists of closest indices.
Returns
=======
grad_beta : numpy matrix, sub-gradient wrt beta.
grad_phi_beta : numpy vector, sub-gradient wrt phi_beta.
grad_phi_u : numpy vector, sub-gradient wrt phi_u.
grad_beta_scale : float, sub-gradient wrt beta_scale.
"""
grad_phi_beta = self.psi_beta_prime(phi_beta)
grad_phi_u = self.psi_u_prime(phi_u)
grad_beta = np.zeros_like(beta_hat)
grad_beta_scale = self.psi_beta_scale_prime(beta_scale)
def _calc_one_beta(i):
return np.multiply(
np.mean(np.array(
[dist_errors[i, idx]*np.sign(beta_hat[i] - beta_hat[j]) for idx, j in enumerate(closest[i])]), axis=0), phi_beta.T)
def _calc_one_phi_beta(i):
return np.mean(np.array([dist_errors[i, idx]*delta_beta(i, j) for idx, j in enumerate(closest[i])]), axis=0)
def _calc_one_phi_u(i):
return -np.mean(np.array([dist_errors[i, idx]*delta_U[i, j] for idx, j in enumerate(closest[i])]), axis=0)
def _calc_one_beta_scale(i):
return -np.mean(np.array([dist_errors[i, idx]*delta_beta(i, j) for idx, j in enumerate(closest[i])]), axis=0).dot(phi_beta)
grad_beta += self.gamma*np.array(self.map(_calc_one_beta, range(N)))
grad_phi_beta += self.gamma*np.mean(np.array(self.map(_calc_one_phi_beta, range(N))), axis=0)
grad_phi_u += self.gamma*np.mean(np.array(self.map(_calc_one_phi_u, range(N))), axis=0)
grad_beta_scale += self.gamma*np.mean(np.array(self.map(_calc_one_beta_scale, range(N))), axis=0)
return grad_beta, grad_phi_beta, grad_phi_u, grad_beta_scale
def _single_restart(self, X, Y, delta_U, neighborhoods, init_lr, lr_decay,
init_patience, max_iters, tol, verbosity, log,
record_distances=False, calc_com=False):
""" Execute a single restart of the optimization.
Arguments
=========
X : numpy matrix of size NxP, design matrix
Y : numpy vector of size Nx1, responses
delta_U : numpy tensor of size NxNxK, constant covariate distances
neighborhoods : list of list of neighbors
init_lr : float, initial learning rate
lr_decay : float, multiplicative factor by which to decay the learning rate.
init_patience : integer, non-negative number of permitted iterations which
don't decrease the loss functions.
max_iters : integer, maximum number of iterations.
tol : float, minimum amount by which the loss must decrease each iteration.
verbosity: integer, every n iterations the current state will be logged.
log : file pointer, log file
record_distances : Boolean, whether to record pairwise distances during optimization.
calc_com : Boolean, whether to calculate the center of mass (COM)
deviation during optimiation.
Returns
========
beta_hat : numpy matrix of size NxP, estimate of model parameters.
phi_beta : numpy vector of size P, estimate of phi_beta.
beta_scale : float, estimate of personalization scaling factor.
phi_u : numpy vector of size K, estimate of phi_u
loss : float, final loss
distances_over_time : list of distances during optimization
losses_over_time : list of loss amounts during optimization
"""
N, P, K = self._check_shapes(X, Y, delta_U=delta_U)
beta_hat = self.init_beta.copy()
beta_scale = self.init_beta_scale
phi_beta = self.init_phi_beta.copy()
phi_u = self.init_phi_u.copy()
beta_prev = self.init_beta.copy()
phi_beta_prev = self.init_phi_beta.copy()
phi_u_prev = self.init_phi_u.copy()
patience = init_patience
lr = init_lr
prev_loss = np.inf
distances_over_time = []
losses_over_time = []
if neighborhoods is None:
print("No neighborhoods supplied. Will calculate neighbors randomly.")
find_closest_neighbors = lambda phi_u: np.random.choice(N, size=(N, self.n_neighbors))
else:
print("Neighborhoods supplied. Will use those.")
find_closest_neighbors = lambda phi_u: neighborhoods
closest = find_closest_neighbors(phi_u)
delta_beta = lambda i, j: np.abs(beta_hat[i] - beta_hat[j])
dist_helper = lambda i, j: beta_scale*delta_beta(i, j).dot(phi_beta) - delta_U[i, j].dot(phi_u)
calc_dist_errors = lambda i: np.array([dist_helper(i, j) for j in closest[i]])
t = time.time()
for iteration in range(1, max_iters+1):
print("Iteration:{} of Max {}. Last Iteration Took {:.3f} seconds.".format(
iteration, max_iters, time.time() - t), end='\r')
t = time.time()
if iteration % self.calc_closest_every == 0:
closest = find_closest_neighbors(phi_u)
loss1 = np.mean([self.f(X[i], Y[i], beta_hat[i].T) for i in range(N)])
dist_errors = np.array(self.map(lambda i: calc_dist_errors(i), range(N)))
loss2 = 0.5*self.gamma*np.mean(np.mean(np.square(dist_errors), axis=1))
loss3 = np.mean([self.rho_beta(beta_hat[i]) for i in range(N)])
loss4 = self.psi_beta(phi_beta)
loss5 = self.psi_u(phi_u)
loss6 = self.psi_beta_scale(beta_scale)
loss = loss1 + loss2 + loss3 + loss4 + loss5 + loss6
losses_over_time.append([loss1, loss2, loss3, loss4, loss5, loss6])
if record_distances:
distances = np.square(dist_errors)
distances_over_time.append(np.mean(distances))
if iteration % verbosity == 0:
log_string = "Iteration: {:d} Total Loss:{:.3f} Pred:{:.3f} Dist:{:.3f} l1:{:.3f} Phi_beta:{:.3f} Phi_u:{:.3f}, Beta_Scale:{:.3f}".format(
iteration, loss, loss1, loss2, loss3, loss4, loss5, loss6)
if calc_com:
com = np.linalg.norm(np.mean(beta_hat, axis=0) - self.init_beta[0, :], ord=2)
mad = np.mean(np.array([
np.abs(beta_hat[i] - self.init_beta[0, :]) for i in range(N)]), axis=0)
mad = np.linalg.norm(mad, ord=2) # Easier to read the logs if this is a single number, instead of per-feature.
log_string += "\nCOM Divergence:{}\nMAD:{}".format(com, mad)
print(log_string, file=log)
if loss > 1e8:
print("Diverged at iteration: {}".format(iteration))
break
if loss > prev_loss - tol:
patience -= 1
if patience <= 0:
print("Reached local minimum at iteration {:d}.".format(iteration))
beta_hat = beta_prev
phi_beta = phi_beta_prev
phi_u = phi_u_prev
break
lr *= lr_decay
beta_prev = beta_hat.copy()
phi_u_prev = phi_u.copy()
phi_beta_prev = phi_beta.copy()
prev_loss = loss
# Calculate Gradients for Personalization Regularization
grad_beta, grad_phi_beta, grad_phi_u, grad_beta_scale = self._calc_personalized_reg_grad(
phi_beta, phi_u, beta_hat, beta_scale, dist_errors, N, delta_U, delta_beta, closest)
# Calculate Gradients for Prediction
for i in range(N):
grad_beta[i] += self.f_prime(X[i], Y[i], beta_hat[i].T)
grad_beta[i] += self.rho_beta_prime(beta_hat[i])
beta_hat -= lr*grad_beta
phi_beta = soft_normalize(phi_beta - lr*grad_phi_beta)
if self.intercept:
phi_beta[-1] = 0. # intercept term does not count for personalization.
phi_u = soft_normalize(phi_u - lr*grad_phi_u)
beta_scale = np.max([1e-5, beta_scale - 1e-2*lr*grad_beta_scale])
log.flush()
return beta_hat, phi_beta, beta_scale, phi_u, loss, distances_over_time, losses_over_time
def fit(self, X, Y, U, dU, delta_U=None, neighborhoods=None,
init_lr=1e-3, lr_decay=1-1e-6, n_restarts=1,
init_patience=10, max_iters=20000, tol=1e-3,
verbosity=100, log_file=None):
""" Fit the personalized model.
Arguments
=========
X : numpy matrix of size NxP, design matrix
Y : numpy vector of size Nx1, responses
U : numpy matrix of size NxK, covariates
dU: list of length K, each entry is a Python function for covariate-specific distance metric.
delta_U: numpy tensor of size NxNxK, static covariate distances.
If None, will be calculated before optimization starts.
neighborhoods: list of list of neighbors.
If None, neighborhoods will be generated during optimization.
init_lr: float, learning rate.
lr_decay: float, decay rate for learning rate.
n_restarts : integer, number of restarts.
init_patience: integer, number of iterations with non-decreasing loss before convergence is assumed.
max_iters : integer, maximum number of iterations.
tol : float, minimum decrease in loss.
verbosity : integer, print output to log file every n iterations.
log_file : str, filename of log file. If None, a new file will be created with the current datetime.
Returns
=======
beta_hat : numpy matrix of size NxP, personalized model parameters
phi_beta : numpy vector of size P, estimate of phi_beta
phi_u : numpy vector of size K, estimate of phi_u
distances_over_time : list of pairwise distances during optimization
losses_over_time : list of losses during optimization
"""
N, P, K = self._check_shapes(X, Y, U, dU)
if delta_U is None:
print("Making Distances...")
t = time.time()
delta_U = self.make_covariate_distances(U, dU, K, N, should_normalize=True)
print("Finished Making Distances. Took {:.3f} seconds.".format(time.time() - t))
best_loss = np.inf
if log_file is None:
log_file = "{}/distance_matching_{}.log".format(
self.log_dir, time.strftime("%Y_%m_%d-%H_%M_%S"))
with open(log_file, 'a') as log:
for restart in range(n_restarts):
t = time.time()
print("Restart {} of {}".format(restart+1, n_restarts))
(beta_hat, phi_beta, beta_scale,
phi_u, loss, distances_over_time, losses_over_time) = self._single_restart(
X, Y, U, delta_U, neighborhoods, init_lr, lr_decay,
init_patience, max_iters, tol, verbosity, log)
print("Took {:.3f} seconds.".format(time.time() - t))
if loss < best_loss:
best_loss = loss
print("** New best solution **")
self.loss = loss
self.beta_hat = beta_hat.copy()
self.phi_beta = phi_beta.copy()
self.phi_u = phi_u.copy()
self.distances_over_time = distances_over_time.copy()
self.losses_over_time = losses_over_time.copy()
return self.beta_hat, self.phi_beta, self.phi_u, self.distances_over_time, self.losses_over_time | true |
ae5e2cefa70f885a61f0ed905f4e0683ae0fe134 | Python | bkgoksel/squid | /test/test_predictor.py | UTF-8 | 4,770 | 2.90625 | 3 | [] | no_license | """
Module for testing predictor model utilities
"""
import unittest
from unittest.mock import Mock
import numpy as np
import torch as t
import torch.nn as nn
from torch.nn.utils.rnn import (
PackedSequence,
pack_padded_sequence,
pad_packed_sequence,
pad_sequence,
)
from model.predictor import DocQAConfig
from model.util import get_last_hidden_states
class PredictorTestCase(unittest.TestCase):
def setUp(self):
self.batch_size = 3
self.seq_len = 4
self.input_dim = 2
self.hidden_size = 10
self.config = Mock(DocQAConfig)
def get_input(self) -> PackedSequence:
seqs = []
lens = list(range(self.seq_len, self.seq_len - self.batch_size, -1))
for seq_len in lens:
seqs.append(t.randn((seq_len, self.input_dim)))
seqs = pad_sequence(seqs, batch_first=True)
return pack_padded_sequence(seqs, lens, batch_first=True)
def get_rnn(self, num_layers: int = 1, bidirectional: bool = False):
"""
Returns RNN with weights all set to 1
"""
self.config.n_directions = 1 + int(bidirectional)
self.config.total_hidden_size = self.config.n_directions * self.hidden_size
rnn = nn.RNN(
self.input_dim,
self.hidden_size,
num_layers,
batch_first=True,
bidirectional=bidirectional,
)
return rnn
def check_match(self, all_states, last_hidden_state, seq_lens):
"""
all_states[sample][len][:hidden_size] (forward last hidden state)
all_states[sample][0][hidden_size:] (backward last hidden state)
"""
for sample in range(self.batch_size):
hidden = t.cat(
[
all_states[sample][seq_lens[sample] - 1][
: self.hidden_size
].detach(),
all_states[sample][0][self.hidden_size :].detach(),
]
)
self.assertTrue(
np.allclose(hidden, last_hidden_state[sample].detach()),
"Calculated last hidden state doesn't match expected. \n Calculated: %s \n Expected: %s"
% (last_hidden_state[sample], all_states[sample][seq_lens[sample] - 1]),
)
def test_get_last_hidden_states_simple(self):
"""
Checks that get_last_hidden_states correctly gets the last
hidden state of a unidirectional single layer RNN
"""
rnn = self.get_rnn()
inpt = self.get_input()
all_states, states = rnn(inpt)
all_states, lens = pad_packed_sequence(all_states)
all_states.transpose_(0, 1)
last_hidden_state = get_last_hidden_states(
states, self.config.n_directions, self.config.total_hidden_size
)
self.check_match(all_states, last_hidden_state, lens)
def test_get_last_hidden_states_two_layers(self):
"""
Checks that get_last_hidden_states correctly gets the last
hidden state of a unidirectional two layer RNN
"""
rnn = self.get_rnn(num_layers=2)
inpt = self.get_input()
all_states, states = rnn(inpt)
all_states, lens = pad_packed_sequence(all_states)
all_states.transpose_(0, 1)
last_hidden_state = get_last_hidden_states(
states, self.config.n_directions, self.config.total_hidden_size
)
self.check_match(all_states, last_hidden_state, lens)
def test_get_last_hidden_states_bidirectional(self):
"""
Checks that get_last_hidden_states correctly gets the last
hidden state of a bidirectional single layer RNN
"""
rnn = self.get_rnn(bidirectional=True)
inpt = self.get_input()
all_states, states = rnn(inpt)
all_states, lens = pad_packed_sequence(all_states)
all_states.transpose_(0, 1)
last_hidden_state = get_last_hidden_states(
states, self.config.n_directions, self.config.total_hidden_size
)
self.check_match(all_states, last_hidden_state, lens)
def test_get_last_hidden_states_bidirectional_two_layer(self):
"""
Checks that get_last_hidden_states correctly gets the last
hidden state of a bidirectional two layer RNN
"""
rnn = self.get_rnn(num_layers=2, bidirectional=True)
inpt = self.get_input()
all_states, states = rnn(inpt)
all_states, lens = pad_packed_sequence(all_states)
all_states.transpose_(0, 1)
last_hidden_state = get_last_hidden_states(
states, self.config.n_directions, self.config.total_hidden_size
)
self.check_match(all_states, last_hidden_state, lens)
| true |
6dfbc741f7a56ffb3604202c52d7312d8f4ef611 | Python | prodProject/WorkkerAndConsumerServer | /Enums/passwordEnum.py | UTF-8 | 252 | 2.59375 | 3 | [
"MIT"
] | permissive | from enum import Enum
class PasswordMode(Enum):
UNKNOWN_PASSWORD = 0;
GENERATE_PASSWORD = 1;
VERIFY_PASSWORD = 2;
GENEREATE_NEW_PASSWORD = 3;
@staticmethod
def getEnum(name):
return PasswordMode.__getattr__(name=name)
| true |
cffc4e258e730169d3bfcf52b8012fb7c4c9bed5 | Python | monalan/myGitProject | /tryforPython/drawPic/world_population.py | UTF-8 | 1,533 | 3.296875 | 3 | [] | no_license | import json
import pygal
from pygal_maps_world.i18n import COUNTRIES
# 将数据加载到一个列表中
filename = 'population_data.json'
with open(filename) as f:
pop_data = json.load(f)
"""# 打印每个国家 2010 年的人口数量
for pop_dict in pop_data:
if pop_dict['Year'] == '2010':
country_name = pop_dict['Country Name']
population = pop_dict['Value']
print(country_name + ": " + population)
"""
def get_country_code(country_name):
""" 根据指定的国家,返回 Pygal 使用的两个字母的国别码 """
for code, name in COUNTRIES.items():
if name == country_name:
return code
# 如果没有找到指定的国家,就返回 None
return None
# 创建一个包含人口数量的字典
cc_populations = {}
for pop_dict in pop_data:
if pop_dict['Year'] == '2010':
country = pop_dict['Country Name']
population = int(float(pop_dict['Value']))
code = get_country_code(country)
if code:
cc_populations[code] = population
# 根据人口数量将所有的国家分成三组
cc_pops_1, cc_pops_2, cc_pops_3 = {}, {}, {}
for cc, pop in cc_populations.items():
if pop < 10000000:
cc_pops_1[cc] = pop
elif pop < 1000000000:
cc_pops_2[cc] = pop
else:
cc_pops_3[cc] = pop
# 看看每组分别包含多少个国家
print(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))
wm = pygal.maps.world.World()
wm.title = 'World Population in 2010, by Country'
wm.add('0-10m', cc_pops_1)
wm.add('10m-1bn', cc_pops_2)
wm.add('>1bn', cc_pops_3)
wm.render_to_file('Degree_world_population.svg')
| true |
43c16f27d22d5d28b09211143d3a4a4ef55e953c | Python | qibolun/DryVR | /Thermostats/Thermostats_ODE.py | UTF-8 | 851 | 2.734375 | 3 | [] | no_license |
from scipy.integrate import odeint
import numpy as np
def thermo_dynamic(y,t,rate):
dydt = rate*y
return dydt
def TC_Simulate(Mode,initialCondition,time_bound):
time_step = 0.05;
time_bound = float(time_bound)
initial = [float(tmp) for tmp in initialCondition]
number_points = int(np.ceil(time_bound/time_step))
t = [i*time_step for i in range(0,number_points)]
if t[-1] != time_step:
t.append(time_bound)
y_initial = initial[0]
if Mode == 'On':
rate = 0.1
elif Mode == 'Off':
rate = -0.1
else:
print('Wrong Mode name!')
sol = odeint(thermo_dynamic,y_initial,t,args=(rate,),hmax = time_step)
# Construct the final output
trace = []
for j in range(len(t)):
#print t[j], current_psi
tmp = []
tmp.append(t[j])
tmp.append(sol[j,0])
trace.append(tmp)
return trace
# sol = TC_Simulate('Off',[60],10)
# print(sol) | true |
803c5743bde21d2baf97f3b4e7b1589b3a1037a5 | Python | crt379/sift | /ttss/vvvvvfff.py | UTF-8 | 2,600 | 2.859375 | 3 | [] | no_license | import sys
import os
from PyQt5.Qt import * # noqa
class DirectoryTreeWidget(QTreeView):
def __init__(self, path=QDir.currentPath(), *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_model(path)
self.expandsOnDoubleClick = False
self.header().setSectionResizeMode(0, QHeaderView.ResizeToContents)
self.setAutoScroll(True)
def init_model(self, path):
self.extensions = ["*.*"]
self.path = path
model = QFileSystemModel(self)
model.setRootPath(QDir.rootPath())
model.setReadOnly(False)
model.setFilter(QDir.AllDirs | QDir.NoDot | QDir.AllEntries)
self.setModel(model)
self.set_path(path)
def set_path(self, path):
self.path = path
model = self.model()
index = model.index(str(self.path))
if os.path.isfile(path):
self.setRootIndex(model.index(
os.path.dirname(str(self.path))))
self.scrollTo(index)
self.setCurrentIndex(index)
else:
self.setRootIndex(index)
class Foo(QWidget):
def __init__(self, path):
super().__init__()
self.path = path
self.tree_view = DirectoryTreeWidget(path=".")
self.tree_view.show()
bt = QPushButton(f"Update {path}")
bt.pressed.connect(self.update_file)
layout = QVBoxLayout()
layout.addWidget(self.tree_view)
layout.addWidget(bt)
self.setLayout(layout)
# New file will automatically refresh QFileSystemModel
self.create_file()
def create_file(self):
with open(self.path, "w") as f:
data = "This new file contains xx bytes"
f.write(data.replace("xx", str(len(data))))
def update_file(self):
model = self.tree_view.model()
# Updating a file won't refresh QFileSystemModel, the question is,
# how can we update that particular item to be refreshed?
data = "The file updated is much larger, it contains xx bytes"
with open(self.path, "w") as f:
f.write(data.replace("xx", str(len(data))))
# file_info = self.obj.model.fileInfo(index)
# file_info.refresh()
index = model.index(self.path)
model.setData(index, model.data(index))
QMessageBox.about(None, "Info", f"{self.path} updated, new size is {len(data)}")
def main():
app = QApplication(sys.argv)
foo = Foo("foo.txt")
foo.setMinimumSize(640, 480)
foo.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main() | true |
4efa3f0724a2eebcf25e4f9e1b0ae78566d0aaf8 | Python | thkoeln/dlaproject | /src/datasets/music_dataset.py | UTF-8 | 6,683 | 2.59375 | 3 | [] | no_license | import tensorflow as tf
import matplotlib as mpl
import numpy as np
import os
import pandas as pd
basepath = "src/datasets/arrays/"
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
BASE_BPM = 100.0
BPM_MODIFIER = 100.0
# input/output size (for us=(88)*3 + 1 = 265)
FEATURE_SIZE = 177
# Is actually seems to do data windowing (@see https://www.tensorflow.org/tutorials/structured_data/time_series#data_windowing)
# bisher verarbeitete samples
def data_windowing(dataset, target, start_index : int, end_index : int, history_size : int,
target_size : int, step: int, single_step=False):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i - history_size, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i + target_size])
else:
labels.append(target[i:i + target_size])
return np.array(data), np.array(labels)
def get_dataset(batch_size=32, buffer_size=10000, train_split_pct=0.5, seed=13, debug=True, plot=False, past_history=1024, future_target=64, step_size=16, single_step=True, composer=None):
# Load Dataset from csv to arrays (filtered by composer)
dataset_csv_files = []
if composer != None:
_, _, csv_files = next(os.walk(basepath + composer))
csv_filenames_after = []
for csv_file in csv_files:
csv_file = basepath + composer + "/" + csv_file
csv_filenames_after.append(csv_file)
dataset_csv_files.extend(csv_filenames_after)
else:
_, composers, _ = next(os.walk(basepath))
for composer in composers:
for (dirpath, dirnames, filenames) in os.walk(basepath + composer):
csv_filenames_after = []
for csv_file in filenames:
csv_file = basepath + composer + "/" + csv_file
csv_filenames_after.append(csv_file)
dataset_csv_files.extend(csv_filenames_after)
break
if debug:
print(dataset_csv_files[:10])
complete_dataframe_set = pd.DataFrame()
for dataset_csv_file in dataset_csv_files:
dataframe = pd.read_csv(dataset_csv_file, delimiter=";")
if debug:
print("Creating Pandas DataFrame for: " + dataset_csv_file)
print("First Line: " + str(dataframe.to_numpy()[0]))
complete_dataframe_set = complete_dataframe_set.append(dataframe)
# Vllt null-puffer zwischen musikstücken einfügen, damit kein aprupter übergang vorhanden ist?
#complete_dataframe_set = pd.concat(dataframes, ignore_index=True)
if debug:
print(complete_dataframe_set.head(10))
# Also get first line completely:
print(complete_dataframe_set.to_numpy()[0])
print(complete_dataframe_set.to_numpy()[16])
print(complete_dataframe_set.to_numpy()[32])
print(complete_dataframe_set.to_numpy()[64])
print(complete_dataframe_set.to_numpy()[128])
# set random seed
tf.random.set_seed(13)
# get the data from the dataset and define the features (metronome and notes) + normalization to float values
features = complete_dataframe_set.to_numpy()
features_extended = np.zeros((features.shape[0], FEATURE_SIZE), dtype=np.float)
if debug:
print("Amount of 16th-Note-Rows in Dataset: " + str(features.shape[0]))
print("Iterate over these...")
for x in range(features.shape[0]):
features_extended[x][0] = (features[x][0]-BPM_MODIFIER)/BASE_BPM
for y in range(1,89):
if features[x][y] == 0:
# features_extended[x][y*3 - 2] = 1.0 # Reducing this value does not help training
continue
if features[x][y] == 1:
features_extended[x][y*2 - 1] = 1.0
continue
if features[x][y] == 2:
features_extended[x][y*2+1 - 1] = 1.0
continue
print("*** ERROR on feature normalization: There are values not fitting here ***")
features = None
if debug:
print(features_extended[0])
print(features_extended[16])
print(features_extended[32])
print(features_extended[64])
print(features_extended[128])
# normalize data (splitting per amount of notes etc)
# TODO: might not be needed due to scramble_data -> was multivariate_data() @ https://github.com/thdla/DLA2020/blob/master/Homework/dla_project/datasets/multivariate_timeseries.py
# split for train and validation set
#dataset = features.values
dataset = features_extended
dataset_size = dataset.shape[0]
print("Dataset contains {} rows, splitting by {}%".format(dataset_size, train_split_pct*100.0))
train_split = int(train_split_pct*dataset_size)
# ??? vvv was macht das?
#data_mean = dataset[:train_split].mean(axis=0)
#data_std = dataset[:train_split].std(axis=0)
#dataset = (dataset - data_mean) / data_std
# ??? ^^^
# TODO: Check this, is this feasible here?
x_train_single, y_train_single = data_windowing(dataset, dataset, 0,
train_split, past_history,
future_target, step_size,
single_step=single_step)
x_val_single, y_val_single = data_windowing(dataset, dataset,
train_split, None, past_history,
future_target, step_size,
single_step=single_step)
# debug output
if debug:
print('Single window of past history : {}'.format(x_train_single[0].shape))
if plot:
features.plot(subplots=True)
# transform to tensorflow dataset
train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single))
train_data_single = train_data_single.cache().shuffle(buffer_size).batch(batch_size) # .repeat()
val_data_single = tf.data.Dataset.from_tensor_slices((x_val_single, y_val_single))
val_data_single = val_data_single.batch(buffer_size) # .repeat()
return train_data_single, val_data_single, x_train_single.shape[-2:]
if __name__ == '__main__':
# execute only if run as the entry point into the program
get_dataset() | true |
1ef43fb524b4aae783f34b0c24032e3010795a1d | Python | RazvanRotari/iaP | /services/utils/new.py | UTF-8 | 6,589 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
from __future__ import print_function
import yaml
import sys
import re
import pprint
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#GOD = General Object Description
DEFAULT_URI = "http://razvanrotari.me/terms/"
DEFAULT_URI_PREFIX = "rr"
FUNCTION_TEMPLATE = """
import json
class ModelEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Model):
return {"data": obj.data, "URI": obj.URI, "class": obj.__class__.__name__}
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def create_insert(object_list):
prefix_text = ""
insert = "INSERT DATA {\\n"
for obj in object_list:
d = obj.create_partial_insert()
insert += d[0]
prefix_text += d[1]
insert += "\\n}"
insert = prefix_text + insert
return insert
class Model:
def to_json(self):
return json.dumps(self, cls=ModelEncoder)
@classmethod
def query(cls, **args):
pass
@staticmethod
def from_json(input):
model = json.loads(input)
return Model.from_dict(model)
@staticmethod
def from_dict(model):
cls = globals()[model["class"]]
obj = cls(model["URI"])
obj.data = model["data"]
#recreate inner objects
for prop in obj.data.items():
val = prop[1]
if "ref" in val and val["ref"] and val["value"]:
#We have a valid inner object. Let's go recursive
inner_obj = Model.from_dict(val["value"])
setattr(obj, prop[0], inner_obj)
return obj
def __getattribute__(self,name):
data = object.__getattribute__(self, "data")
if name == "data":
return data
if name not in data:
return object.__getattribute__(self, name)
return data[name]["value"]
def __setattr__(self, name, value):
if name in ["data", "URI"]:
object.__setattr__(self, name, value)
return
data = object.__getattribute__(self, "data")
if name not in data:
raise NameError(name)
data[name]["value"] = value
def __dir__(self):
return super().__dir__() + [str(x) for x in self.data.keys()]
def __eq__(self, model):
if len(self.data) != len(model.data):
return False
def create_partial_insert(self):
child_prefix = []
child_objects = []
insert = "<{URI}> ".format(URI=self.URI)
prefix_set = {}
for prop in self.data.items():
if "ref" in prop[1] and prop[1]["ref"]:
if prop[1]["value"]:
value = prop[1]["value"].URI
(tmp_insert, tmp_prefix) = prop[1]["value"].create_partial_insert()
child_prefix.extend(tmp_prefix)
child_objects.append(tmp_insert)
else:
value = None
else:
value = prop[1]["value"]
if value is None:
continue
line = "{link} \\"{value}\\" ;".format(link=prop[1]["link"][2] + ":" + prop[1]["link"][1], value=value)
prefix_list = prop[1]["link"]
prefix = prop[1]["link"][2]
prefix_set[prefix] = prefix_list[0]
insert += line
prefix = ""
for p in prefix_set.items():
prefix += "PREFIX {prefix}: <{base_url}>\\n".format(prefix=p[0], base_url=p[1])
for p in child_prefix:
prefix += p
insert = insert[::-1].replace(";", ".", 1)[::-1]
for insert_obj in child_objects:
insert += insert_obj
return (insert, prefix)
"""
INIT_TEMPLATE = """
class {class_name}(Model):
def __init__(self, URI):
self.URI = URI
self.data = {data_struct}
"""
#example
"""
class Object
def __init__(self, URI):
self.data = {"prop": {
"link":["http://razvanrotari.me/terms/","prop", "rr"},
"value":None}
self.URI = URI
"""
def create_class(definition):
class_name = definition[0]
props = definition[1]["properties"]
data_dict = {"class_name": {"link":[DEFAULT_URI, "className", DEFAULT_URI_PREFIX], "value": class_name}}
depend = []
for prop in props.items():
name = prop[0]
attr = prop[1]
if name == "URI":
continue
link = [DEFAULT_URI, name, DEFAULT_URI_PREFIX]
if "description" in attr:
description = attr["description"]
PATTERN = "^\[(.*?)\]"
r = re.findall(PATTERN, description)
if len(r) != 0:
link = r[0]
tmp = link.split(",")
print(tmp)
link = [tmp[0], tmp[1], tmp[2]]
ref = None
if "$ref" in attr:
ref = attr["$ref"].split("/")[-1]
depend.append(attr["$ref"].split("/")[-1])
data_dict[name] = {"link": link, "value": None, "ref": ref}
pp = pprint.PrettyPrinter(indent=4)
text = INIT_TEMPLATE.format(class_name=class_name, data_struct=pp.pformat(data_dict))
# text += "\n" + BODY_TEMPLATE
return {
"name":class_name,
"body": text,
"dependencies":depend}
def main():
if len(sys.argv) < 2:
print("Usage: create_god.py <swagger_file>")
swagger = sys.argv[1]
data = ""
with open(swagger) as data_file:
data = data_file.read()
structure = yaml.load(data)
defs = structure["definitions"]
# for d in defs.items():
# print(d)
text = FUNCTION_TEMPLATE
classes = {}
for d in defs.items():
cls = create_class(d)
classes[cls["name"]] = cls
# print(text)
# text += "\n" + create_class(d)
sorted_dep = []
#resolve dependency
#https://www.electricmonk.nl/docs/dependency_resolving_algorithm/dependency_resolving_algorithm.html
def resolve_dep(node, resolved):
for edge in [classes[x] for x in node["dependencies"]]:
resolve_dep(edge, resolved)
resolved.append(node)
resolved = []
resolve_dep(classes[list(classes.keys())[0]], resolved)
inserted = [x["name"] for x in resolved]
for item in classes.items():
name = item[0]
if name not in inserted:
resolved.append(item[1])
for cls in resolved:
text += '\n' + cls["body"]
print(text)
if __name__ == "__main__":
main()
| true |
57bd7ade32e05e3730d561b3e4b5a7b8a9f37f37 | Python | jimtin/CryptoTradingPlatform_python | /Trading/TradingSettings.py | UTF-8 | 4,223 | 3.515625 | 4 | [
"MIT"
] | permissive |
# Class to set up settings for trading
class TradeSettings:
def __init__(self, BaselineToken, BaselinePercentageHold, PercentageTrade):
self.BaselineToken = BaselineToken
self.BaselinePercentageHold = BaselinePercentageHold
self.PercentageTrade = PercentageTrade
print(f'Trade settings as follows: BaselineToken = {self.BaselineToken}, BaselinePercentageHold = {self.BaselinePercentageHold}, PercentageTrade = {self.PercentageTrade}')
# Method to change the baseline token
def changebaselinetoken(self, NewToken):
self.BaselineToken = NewToken
print(f'Baseline Token changed to {self.BaselineToken}')
# Notify user of new Trade Settings
print(f'New Trade settings are: BaselineToken = {self.BaselineToken}, BaselinePercentageHold = {self.BaselinePercentageHold}, PercentageTrade = {self.PercentageTrade}')
# Method to change BaselinePercentageHold
def changebaselinepercentagehold(self, NewPercentageHold):
self.BaselinePercentageHold = NewPercentageHold
print(f'Baseline Percentage Hold changed to {self.BaselinePercentageHold}')
# Notify user of new trade settings
print(f'New Trade settings are: BaselineToken = {self.BaselineToken}, BaselinePercentageHold = {self.BaselinePercentageHold}, PercentageTrade = {self.PercentageTrade}')
# Method to change Percentage Trade amount
def changepercentagetrade(self, NewPercentageTrade):
self.PercentageTrade = NewPercentageTrade
print(f'Baseline Percentage Trade changed to {self.PercentageTrade}')
# Notify user of new Trade Settings
print(
f'New Trade settings are: BaselineToken = {self.BaselineToken}, BaselinePercentageHold = {self.BaselinePercentageHold}, PercentageTrade = {self.PercentageTrade}')
# Method to change all settings
def changeallsettings(self, NewToken, NewPercentageHold, NewPercentTrade):
self.BaselineToken = NewToken
self.BaselinePercentageHold = NewPercentageHold
self.PercentageTrade = NewPercentTrade
# Notify user of the changes
print(
f'New Trade settings are: BaselineToken = {self.BaselineToken}, BaselinePercentageHold = {self.BaselinePercentageHold}, PercentageTrade = {self.PercentageTrade}')
# Method to confirm and save trade settings
def confirm(self):
# Get input from user if they are happy with settings
outcome = input("Please confirm with 'Y' or 'y' if happy with new settings")
if outcome == "Y":
print("Settings accepted, updating database")
elif outcome == "y":
print("Settings accepted, updating database")
else:
newsetting = input("Select which option to change: "
"1. All "
"2. Baseline Token "
"3. Baseline Percentage Hold "
"4. Percentage Trade ")
print(newsetting)
print(type(newsetting))
if newsetting == "1":
settings = []
result = input("Input new baselinetoken symbol")
settings.append(result)
result = input("Input new baseline percentage hold")
result = int(result)
settings.append(result)
result = input("Input new percentage trade amount")
result = int(result)
settings.append(result)
self.changeallsettings(settings[0], settings[1], settings[2])
elif newsetting == "2":
settings = input("Input new baselinetoken symbol")
self.changebaselinetoken(settings)
elif newsetting == "3":
settings = input("Input new baseline percentage hold")
settings = int(settings)
self.changebaselinepercentagehold(settings)
elif newsetting == "4":
settings = input("Input new percentage trade amount")
settings = int(settings)
self.changepercentagetrade(settings)
else:
"Wrong selection made, try again"
| true |
ad01db5644e216dfcfc3ca6aec67e1dca3b3bfcc | Python | ashkankzme/QAforMisinformation | /data_preparation/data_cleaning.py | UTF-8 | 4,280 | 2.671875 | 3 | [] | no_license | import json
import math
import random
import sys
import numpy as np
sys.path.insert(1, '../paragraph_ranking')
from utils import get_paragraphs, get_bert_marked_text, tokenizer
with open('../data/news.json') as news_file:
news = json.load(news_file)
with open('../data/stories.json') as story_file:
stories = json.load(story_file)
articles = news + stories
print(str(len(articles)) + ' articles loaded.')
news_criteria = [c['question'] for c in articles[0]['criteria']]
story_criteria = [c['question'] for c in articles[2000]['criteria']]
to_be_deleted = []
# this is for removing duplicate articles
# there were a few articles that had
# page not found as their original text
# and we had to remove them.
# we also remove unnecessarily long articles
original_articles_map = {}
for i, _article in enumerate(articles):
_criteria = _article['criteria'] if 'criteria' in _article else []
paragraphs = get_paragraphs(_article['original_article']) if 'original_article' in _article else []
if 'rating' not in _article or _article['rating'] == -1 or 'criteria' not in _article or len(
_article['criteria']) < len(news_criteria) or 'original_article' not in _article or _article[
'original_article'].isspace() or len(paragraphs) > 50 or len(paragraphs) <= 5 or len(
[1 for p in paragraphs if len(tokenizer.tokenize(get_bert_marked_text(p))) > 512]) > 0 or \
len([1 for q in _criteria if len(tokenizer.tokenize(get_bert_marked_text(q['explanation']))) > 512]) > 0 or \
'Error code 404'.lower() in _article['original_article'].lower():
to_be_deleted.append(i)
elif _article['original_article'] not in original_articles_map:
original_articles_map[_article['original_article']] = [i]
else:
original_articles_map[_article['original_article']].append(i)
duplicate_indices = [original_articles_map[duplicate_article] for duplicate_article in original_articles_map if
len(original_articles_map[duplicate_article]) > 1]
for index_list in duplicate_indices:
for i in index_list:
to_be_deleted.append(i)
doc_lens = []
temp_counter = 0
index_map = {}
for i, article in enumerate(articles):
if 'original_article' not in article or i in to_be_deleted:
continue
tokenized_article = tokenizer.tokenize(get_bert_marked_text(article['original_article']))
doc_lens.append(len(tokenized_article))
index_map[temp_counter] = i
temp_counter += 1
avg_doc_len = np.mean(doc_lens)
print("Average doc len: {}".format(avg_doc_len))
std_doc_len = np.std(doc_lens)
print("Std doc len: {}".format(std_doc_len))
for i in index_map:
if doc_lens[i] < avg_doc_len - std_doc_len:
if random.uniform(0, 1) < 0.1:
print(articles[index_map[i]]['original_article'])
print('########################################')
to_be_deleted.append(index_map[i])
to_be_deleted = list(set(to_be_deleted))
for index in sorted(to_be_deleted, reverse=True):
del articles[index]
if index < len(news):
del news[index]
else:
del stories[index - len(news)]
print('data cleaned. ' + str(len(articles)) + ' articles left. Count of story reviews: ' + str(
len(stories)) + ', count of news reviews: ' + str(len(news)) + '.')
# extracting questions and their explanations
for i in range(10):
qi = [{'article': article['original_article'], 'question': article['criteria'][i]['question'],
'explanation': article['criteria'][i]['explanation'],
'answer': 0 if article['criteria'][i]['answer'] == 'Not Satisfactory' else 1 if article['criteria'][i][
'answer'] == 'Satisfactory' else 2}
for article in articles]
# train/dev/test split: 70/15/15
seed = i # for getting the same random results every time
random.Random(seed).shuffle(qi)
split_index = math.floor(0.8 * len(qi))
qi_train = qi[:split_index]
qi_test = qi[split_index:]
with open('../data/ttt/q{}_train.json'.format(i + 1), 'w') as f:
f.write(json.dumps(qi_train))
with open('../data/ttt/q{}_test.json'.format(i + 1), 'w') as f:
f.write(json.dumps(qi_test))
| true |
d48b0361c3d8df8174b198a721231f04f199cb0a | Python | suryatmodulus/stock-pickz | /stockpicker.py | UTF-8 | 6,804 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
import pathlib
import csv
import typing as T
import difflib
from datetime import datetime,timedelta,date
import statistics
from sys import maxsize
stock_codes = []
min_date = None
max_date = None
stock_data = {}
stock_dates = []
stock_prices = []
start_date = None
end_date = None
def resetStockData():
global min_date,max_date,stock_data,stock_dates,stock_prices,start_date,end_date
min_date = None
max_date = None
stock_data = {}
stock_dates = []
stock_prices = []
start_date = None
end_date = None
def initStockPicker(filepath):
global stock_codes
with open(filepath, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
stock_codes.append(row["StockName"])
def validateStockCode(stock_code):
if(not stock_code in stock_codes):
closest_code = difflib.get_close_matches(stock_code, stock_codes, n=1)
if(len(closest_code)==0):
new_stock_code = input("[x] Stock Not Found!\n=> Re-Enter Stock Code : ")
return False,new_stock_code
answer = input(f"[!] Did you mean {closest_code[0]} ? (yes|no) : ").lower()
if(answer=="yes"):
return True,closest_code[0]
else:
new_stock_code = input("=> Re-Enter Stock Code : ")
return False,new_stock_code
return True,stock_code
def getStockCode(filepath):
global stock_data,min_date,max_date
with open(filepath, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
stock_code = input("=> Enter Stock Code : ")
code_valid = False
while(not code_valid):
code_valid,stock_code = validateStockCode(stock_code.upper())
for row in csv_reader:
if(row["StockName"]==stock_code):
stock_data[row["StockDate"]] = float(row["StockPrice"])
date = datetime.strptime(row["StockDate"],'%d-%b-%Y').date()
if(min_date is None and max_date is None):
min_date = max_date = date
if(date<min_date):
min_date = date
if(date>max_date):
max_date = date
def validateDate(date,dtype):
for fmt in ('%d-%b-%Y','%d-%m-%Y','%d/%b/%Y','%d/%m/%Y','%Y-%b-%d','%Y-%m-%d','%Y/%b/%d','%Y/%m/%b'):
try:
date = datetime.strptime(date,fmt).date()
if(dtype=="End" and date <= start_date):
print("[x] End Date should be greater that Start date! ")
new_date = input("=> Re-Enter End Date : ")
return False, new_date
elif(date<min_date or date>=max_date):
if(dtype=="Start"):
answer = input(f"[x] Date out of range! Do you want to set (Start date) to {min_date.strftime(fmt)} ? (yes|no) : ").lower()
if(answer=="yes"):
return True, min_date
else:
break
elif(dtype=="End"):
if(date==max_date):
return True,date
answer = input(f"[x] Date out of range! Do you want to set (End date) to {max_date.strftime(fmt)} ? (yes|no) : ").lower()
if(answer=="yes"):
return True, max_date
else:
break
else:
return True, date
except ValueError:
pass
new_date = input(f"[x] Date not Valid!\n=> Re-Enter {dtype} Date : ")
return False, new_date
def getStockDates():
global start_date,end_date
date_valid = False
start_date = input("=> Enter Start Date : ").upper()
while(not date_valid):
date_valid, start_date = validateDate(start_date,'Start')
date_valid = False
end_date = input("=> Enter End Date : ").upper()
while(not date_valid):
date_valid, end_date = validateDate(end_date,'End')
def parseStockData():
delta = end_date - start_date
for i in range(delta.days+1):
date = (start_date + timedelta(days=i)).strftime('%d-%b-%Y')
if(date in stock_data):
stock_prices.append(stock_data[date])
stock_dates.append(date)
else:
if(len(stock_dates)>0):
stock_prices.append(stock_prices[-1])
stock_dates.append(date)
def maxProfit(prices,size):
max_start = 0
max_end = 0
max_profit = 0
cur_start = 0
cur_end = 0
cur_buy = prices[0]
cur_sell = 0
cur_profit = 0
for i in range(1,size):
if(prices[i]==cur_buy):
continue
elif(prices[i] < cur_buy):
if(i==size-1):
continue
if(cur_profit > max_profit):
max_start = cur_start
max_end = cur_end
max_profit = cur_profit
cur_buy = prices[i]
cur_sell = prices[i+1]
cur_start = i
cur_end = i+1
cur_profit = 0
else:
if(prices[i] >= cur_sell):
cur_sell = prices[i]
cur_end = i
else:
continue
if(cur_sell>cur_buy):
cur_profit = cur_sell - cur_buy
if(max_profit > cur_profit):
return max_start,max_end,max_profit
else:
return cur_start,cur_end,cur_profit
def getOutput():
if(len(stock_prices)>=2):
print("\n******* Output *******\n")
print(f"Median : {statistics.mean(stock_prices):.2f}")
print(f"Std : {statistics.stdev(stock_prices):.2f}")
i,j,profit = maxProfit(stock_prices,len(stock_prices))
if(profit > 0.0):
print(f"Buy Date : {stock_dates[i]}")
print(f"Sell Date : {stock_dates[j]}")
print(f"Profit : Rs. {(profit*100):.2f} (For 100 shares)")
else:
print("[!] No Profitable purchases can be made!")
print("\n******* xxxxxx *******\n")
else:
print("[x] Insufficient Data Points, Try Different Dates or Stocks!")
def stockPicker(filepath):
initStockPicker(filepath)
getStockCode(filepath)
getStockDates()
parseStockData()
getOutput()
class StockDataSchema(T.NamedTuple):
StockName: str
StockDate: str
StockPrice: float
@classmethod
def from_row(cls, row: dict):
return cls(**{
key: type_(row[key]) for key, type_ in cls._field_types.items()
})
def validateCSV(filepath):
with open(filepath, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
try:
StockDataSchema.from_row(row)
except:
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Stock Picker v1 @author=Surya T")
parser.add_argument("filepath",type=str,help="Path to CSV file")
args = parser.parse_args()
filepath = args.filepath
if(filepath==None):
parser.error("Specify path to CSV file")
elif(not pathlib.Path(filepath).exists()):
print("[x] CSV File Does Not Exist!")
elif(not validateCSV(filepath)):
print("[x] Not Valid CSV File or Corrupted Data!")
else:
do_exit = False
while(not do_exit):
stockPicker(filepath)
answer = input("Do you wish to continue ? (yes|no) : ").lower()
if(answer=="yes"):
print("\n")
resetStockData()
else:
do_exit = True
| true |
7ee6a48e8387a0d45ea82cf44b3e7b679dfcc503 | Python | kongxilong/python | /mine/chaptr3/readfile.py | UTF-8 | 341 | 3.59375 | 4 | [] | no_license | #!/usr/bin/python3
'readTextFile.py--read and display text file'
#get file name
fname = input('please input the file to read:')
try:
fobj = open(fname,'r')
except:
print("*** file open error" ,e)
else:
#display the contents of the file to the screen.
for eachline in fobj:
print(eachline,)
fobj.close()
| true |
159381f399c7295e1892bdf3012d40c547b47d59 | Python | TianhengZhao/LeetCode | /[2]两数相加.py | UTF-8 | 1,476 | 3.875 | 4 | [] | no_license | # 给出两个 非空 的链表用来表示两个非负的整数。其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。
#
# 如果,我们将这两个数相加起来,则会返回一个新的链表来表示它们的和。
#
# 您可以假设除了数字 0 之外,这两个数都不会以 0 开头。
#
# 示例:
#
# 输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)
# 输出:7 -> 0 -> 8
# 原因:342 + 465 = 807
#
# Related Topics 链表 数学
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
p1, p2 = l1, l2
# 哨兵结点
sen = p = ListNode(-1)
# 进位标志
up = 0
# 当p2不为空 或 p1不为空 或 进位不为0时,均继续计算
while p1 or p2 or up:
tmp = (p1.val if p1 else 0) + (p2.val if p2 else 0) + up
# 无论是否有进位,均可模10
node = ListNode(tmp % 10)
# 用//10判断进位
up = tmp // 10
p.next = node
p = p.next
p1 = p1.next if p1 else None
p2 = p2.next if p2 else None
return sen.next
# leetcode submit region end(Prohibit modification and deletion)
| true |
3249f954b1dd55d10a478aad5fafbf44576bc207 | Python | ContinuumIO/PyTables | /tables/undoredo.py | UTF-8 | 4,165 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | ########################################################################
#
# License: BSD
# Created: February 15, 2005
# Author: Ivan Vilata - reverse:net.selidor@ivan
#
# $Source$
# $Id$
#
########################################################################
"""
Support for undoing and redoing actions.
Functions:
* undo(file, operation, *args)
* redo(file, operation, *args)
* moveToShadow(file, path)
* moveFromShadow(file, path)
* attrToShadow(file, path, name)
* attrFromShadow(file, path, name)
Misc variables:
`__docformat__`
The format of documentation strings in this module.
`__version__`
Repository version of this file.
"""
from tables.path import splitPath
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
__version__ = '$Revision$'
"""Repository version of this file."""
def undo(file_, operation, *args):
if operation == 'CREATE':
undoCreate(file_, args[0])
elif operation == 'REMOVE':
undoRemove(file_, args[0])
elif operation == 'MOVE':
undoMove(file_, args[0], args[1])
elif operation == 'ADDATTR':
undoAddAttr(file_, args[0], args[1])
elif operation == 'DELATTR':
undoDelAttr(file_, args[0], args[1])
else:
raise NotImplementedError("""\
the requested unknown operation %r can not be undone; \
please report this to the authors""" % operation)
def redo(file_, operation, *args):
if operation == 'CREATE':
redoCreate(file_, args[0])
elif operation == 'REMOVE':
redoRemove(file_, args[0])
elif operation == 'MOVE':
redoMove(file_, args[0], args[1])
elif operation == 'ADDATTR':
redoAddAttr(file_, args[0], args[1])
elif operation == 'DELATTR':
redoDelAttr(file_, args[0], args[1])
else:
raise NotImplementedError("""\
the requested unknown operation %r can not be redone; \
please report this to the authors""" % operation)
def moveToShadow(file_, path):
node = file_._getNode(path)
(shparent, shname) = file_._shadowName()
node._g_move(shparent, shname)
def moveFromShadow(file_, path):
(shparent, shname) = file_._shadowName()
node = shparent._f_getChild(shname)
(pname, name) = splitPath(path)
parent = file_._getNode(pname)
node._g_move(parent, name)
def undoCreate(file_, path):
moveToShadow(file_, path)
def redoCreate(file_, path):
moveFromShadow(file_, path)
def undoRemove(file_, path):
moveFromShadow(file_, path)
def redoRemove(file_, path):
moveToShadow(file_, path)
def undoMove(file_, origpath, destpath):
(origpname, origname) = splitPath(origpath)
node = file_._getNode(destpath)
origparent = file_._getNode(origpname)
node._g_move(origparent, origname)
def redoMove(file_, origpath, destpath):
(destpname, destname) = splitPath(destpath)
node = file_._getNode(origpath)
destparent = file_._getNode(destpname)
node._g_move(destparent, destname)
def attrToShadow(file_, path, name):
node = file_._getNode(path)
attrs = node._v_attrs
value = getattr(attrs, name)
(shparent, shname) = file_._shadowName()
shattrs = shparent._v_attrs
# Set the attribute only if it has not been kept in the shadow.
# This avoids re-pickling complex attributes on REDO.
if not shname in shattrs:
shattrs._g__setattr(shname, value)
attrs._g__delattr(name)
def attrFromShadow(file_, path, name):
(shparent, shname) = file_._shadowName()
shattrs = shparent._v_attrs
value = getattr(shattrs, shname)
node = file_._getNode(path)
node._v_attrs._g__setattr(name, value)
# Keeping the attribute in the shadow allows reusing it on Undo/Redo.
##shattrs._g__delattr(shname)
def undoAddAttr(file_, path, name):
attrToShadow(file_, path, name)
def redoAddAttr(file_, path, name):
attrFromShadow(file_, path, name)
def undoDelAttr(file_, path, name):
attrFromShadow(file_, path, name)
def redoDelAttr(file_, path, name):
attrToShadow(file_, path, name)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## End:
| true |
e88a6e32174c2425599a12c33565e4ff379c90f0 | Python | skriser/pythonlearn | /Day28/04trya.py | UTF-8 | 405 | 2.5625 | 3 | [] | no_license | #!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@time: 2018/06/04 15:21
@author: 柴顺进
@file: 04trya.py
@software:rongda
@note:
"""
import urllib2
req = urllib2.Request('https://blog.csdn.net/cecrel')
try:
res = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print dir(e)
print e.code
print e.msg
except urllib2.URLError, e:
print dir(e)
print e.reason
print 'over' | true |
3b5544ed46a4aa5000f0f9d324ad77dbc8f3f9b1 | Python | MrDaGree/linuxtks | /gui/modules/filewatch.py | UTF-8 | 8,684 | 2.90625 | 3 | [] | no_license | import os
import platform
from datetime import *
import imgui
import threading
import json
from modules import logger
from modules import LTKSModule
log = logger.Logger()
class FileWatch(LTKSModule.LTKSModule):
alerts = []
alertsData = {}
watchLoopTime = 30.0
started = False
interfaceActive = False
addingPathText = ""
def __init__(self):
with open('saves/watch-list.json') as watchInformation_json:
self.watchInformation = json.load(watchInformation_json)
super().__init__("File/Directory Watcher", "This module is responsible for timely checks on certain directories and files to see if anything has changed")
def alert(self, message, data):
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("[%m-%d-%Y] [%H:%M:%S]")
data["timestamp"] = timestampStr
self.alertsData[len(self.alertsData) + 1] = data
self.alerts.append(timestampStr + " " + message)
log.logAlert(message)
def saveWatchInformation(self):
with open('saves/watch-list.json', 'w') as watchInformation_json:
json.dump(self.watchInformation, watchInformation_json, sort_keys=True, indent=4)
def handleFileAlert(self, path):
with open(path) as watch_file:
content = watch_file.read()
data = {}
data["path"] = path
data["last-content"] = self.watchInformation[path]["last-content"]
data["new-content"] = content
self.alert("A file (" + path + ") has been modified!", data)
self.watchInformation[path]["last-content"] = content
def checkFile(self, path):
fileStat = os.stat(path)
if float(self.watchInformation[path]["last-modified"]) < fileStat.st_mtime:
self.handleFileAlert(path)
self.watchInformation[path]["last-modified"] = fileStat.st_mtime
self.saveWatchInformation()
def handleUpdatingDirInformation(self, path):
self.watchInformation[path]["dir-content"] = {}
self.watchInformation[path]["dir-content"]["files"] = []
self.watchInformation[path]["dir-content"]["directories"] = []
for file in os.listdir(path):
if os.path.isfile(file):
self.watchInformation[path]["dir-content"]["files"].append(file)
elif os.path.isdir(file):
self.watchInformation[path]["dir-content"]["directories"].append(file)
def handleDirectoryAlert(self, path):
data = {}
data["path"] = path
data["last-content"] = self.watchInformation[path]["dir-content"]
data["new-content"] = {}
data["new-content"]["directories"] = []
data["new-content"]["files"] = []
for file in os.listdir(path):
if os.path.isfile(file):
data["new-content"]["files"].append(file)
elif os.path.isdir(file):
data["new-content"]["directories"].append(file)
self.alert("A directory (" + path + ") has been modified!", data)
self.handleUpdatingDirInformation(path)
def checkDir(self, path):
dirStat = os.stat(path)
if float(self.watchInformation[path]["last-modified"]) < dirStat.st_mtime:
self.handleDirectoryAlert(path)
self.watchInformation[path]["last-modified"] = dirStat.st_mtime
self.saveWatchInformation()
def watchLoop(self):
self.watchThread = threading.Timer(self.watchLoopTime, self.watchLoop)
self.watchThread.setDaemon(True)
self.watchThread.start()
for watch in self.watchInformation:
if os.path.isfile(watch):
self.checkFile(watch)
elif os.path.isdir(watch):
self.checkDir(watch)
self.saveWatchInformation()
def handleNewDirAdding(self):
newPath["dir-content"] = {}
newPath["dir-content"]["files"] = []
newPath["dir-content"]["directories"] = []
for file in os.listdir(self.addingPathText):
print(file, os.path.isfile(file), os.path.isdir(file))
if os.path.isfile(file):
newPath["dir-content"]["files"].append(file)
if os.path.isdir(file):
newPath["dir-content"]["directories"].append(file)
def handleNewPathAdding(self):
if (os.path.isfile(self.addingPathText) or os.path.isdir(self.addingPathText)):
dirStat = os.stat(self.addingPathText)
newPath = {
'last-modified': dirStat.st_mtime,
}
if (os.path.isfile(self.addingPathText)):
with open(self.addingPathText) as watch_file:
newPath['last-content'] = watch_file.read()
if (os.path.isdir(self.addingPathText)):
self.handleNewDirAdding()
self.watchInformation[self.addingPathText] = newPath
self.saveWatchInformation()
def displayInterface(self):
imgui.begin_child("left_bottom", width=606, height=370)
imgui.text("Watch Paths")
imgui.begin_child("left_bottom", width=606, height=310, border=True)
for path in list(self.watchInformation.keys()):
imgui.push_text_wrap_position()
imgui.text(path)
imgui.same_line()
if (imgui.button("- Remove Path")):
del self.watchInformation[path]
self.saveWatchInformation()
imgui.end_child()
imgui.text("Add new path:")
addNewPathInputChanged, self.addingPathText = imgui.input_text(
"##Path input",
self.addingPathText,
2048
)
imgui.same_line()
if (imgui.button("+ Add Path")):
self.handleNewPathAdding()
imgui.end_child()
imgui.same_line()
imgui.begin_child("file_dir_alerts")
imgui.text("File/Directory Change Alerts")
imgui.begin_child("file_dir_alerts_logger", border=True)
for alert in self.alertsData:
data = self.alertsData[alert]
if (imgui.tree_node(data["timestamp"] + " " + data["path"])):
imgui.indent()
imgui.push_text_wrap_position()
imgui.text("Change Path: " + data["path"])
if (os.path.isfile(data["path"])):
if (imgui.tree_node("Last Content")):
imgui.push_text_wrap_position()
imgui.text(data["last-content"])
imgui.tree_pop()
if (imgui.tree_node("New Content")):
imgui.push_text_wrap_position()
imgui.text(data["new-content"])
imgui.tree_pop()
if (os.path.isdir(data["path"])):
if (imgui.tree_node("Last Content")):
if (imgui.tree_node("Files (" + str(len(data["last-content"]["files"])) + ")")):
for file in data["last-content"]["files"]:
imgui.push_text_wrap_position()
imgui.text(file)
imgui.tree_pop()
if (imgui.tree_node("Directories (" + str(len(data["last-content"]["directories"])) + ")")):
for file in data["last-content"]["directories"]:
imgui.push_text_wrap_position()
imgui.text(file)
imgui.tree_pop()
imgui.tree_pop()
if (imgui.tree_node("New Content")):
if (imgui.tree_node("Files (" + str(len(data["new-content"]["files"])) + ")")):
for file in data["new-content"]["files"]:
imgui.push_text_wrap_position()
imgui.text(file)
imgui.tree_pop()
if (imgui.tree_node("Directories (" + str(len(data["new-content"]["directories"])) + ")")):
for file in data["new-content"]["directories"]:
imgui.push_text_wrap_position()
imgui.text(file)
imgui.tree_pop()
imgui.tree_pop()
imgui.tree_pop()
imgui.unindent()
imgui.end_child()
imgui.end_child()
def start(self):
log.logNorm(self.name + " watch loop started...")
self.started = True
self.watchLoop()
| true |
1a2217844053e650af54530ba1a549f238c771bd | Python | steinbergs-python-packages/spycery | /spycery/basics/const.py | UTF-8 | 2,907 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module provides a Const type to be used to define readonly attributes."""
class Const(type):
"""
Basic const type implementation.
Use it as the metaclass, when implementing a class containing readonly attributes.
Example:
class MyClass(metaclass=Const):
my_param = Const.Attribute("xyz")
This will define myparam as readonly.
Each try to change its value - be it as class attribute or instance attribute - will raise an AttributeError:
MyClass.my_param = 5
MyClass().my_param = "abc"
"""
def __setattr__(cls, name, value):
"""Set attribute value (if allowed)."""
# these two lines look like the more proper check, but there's an easier approach used below
# attributes = tuple([item for item in Const.__dict__.values() if isinstance(item, type)])
# if isinstance(getattr(cls, name, None), attributes):
attr = type(getattr(cls, name, None))
if attr.__module__ == __name__ and attr.__qualname__.startswith("Const."):
raise AttributeError("can't set attribute")
super(Const, cls).__setattr__(name, value)
class Attribute: # pylint: disable=too-few-public-methods
"""Attribute class."""
def __init__(self, value):
self.value = value
def __call__(self):
return self.value
def __len__(self):
return len(self.value)
def __repr__(self):
return str(self.value)
def __str__(self):
return str(self.value)
class Int(int): # pylint: disable=too-few-public-methods
"""Int attribute class."""
class Str(str): # pylint: disable=too-few-public-methods
"""Str attribute class."""
class ConstBase(metaclass=Const):
"""
Basic const class implementation.
Use it as base class, when implementing a class containing readonly attributes.
Example:
class MyClass(ConstBase):
my_param = Const.Attribute("xyz")
This will define myparam as readonly.
Each try to change its value - be it as class attribute or instance attribute - will raise an AttributeError:
MyClass.my_param = 5
MyClass().my_param = "abc"
"""
def __setattr__(self, name, value):
"""Set attribute value (if allowed)."""
# these two lines look like the more proper check, but there's an easier approach used below
# attributes = tuple([item for item in Const.__dict__.values() if isinstance(item, type)])
# if isinstance(getattr(self, name, None), attributes):
attr = type(getattr(self, name, None))
if attr.__module__ == __name__ and attr.__qualname__.startswith("Const."):
raise AttributeError("can't set attribute")
super(ConstBase, self).__setattr__(name, value)
| true |
a8d1c0cb7aa1eeebd18ea392d40d39ec16adc9dd | Python | arfu2016/nlp | /nlp_models/spacy/property2.py | UTF-8 | 626 | 3.296875 | 3 | [] | no_license | """
@Project : text-classification-cnn-rnn
@Module : property2.py
@Author : Deco [deco@cubee.com]
@Created : 6/5/18 4:05 PM
@Desc : https://www.python-course.eu/python3_properties.php
"""
class P:
def __init__(self,x):
self.x = x
# 调用.x赋值时,实际是使用 @x.setter
@property
def x(self):
return self.__x
@x.setter
def x(self, x):
if x < 0:
self.__x = 0
elif x > 1000:
self.__x = 1000
else:
self.__x = x
if __name__ == '__main__':
p1 = P(1001)
print(p1.x)
p1.x = -12
print(p1.x)
| true |
f3789a93d3b90e7c6b7d7319573270d1144543c3 | Python | ErichBSchulz/PIRDS-respiration-data-standard | /pirds_library/examples/PythonToArduino/Measurement_PythonToArduino.py | UTF-8 | 1,740 | 2.921875 | 3 | [
"MIT",
"CC0-1.0"
] | permissive | #! /usr/bin/env python
#################################################################################
# File Name : Measurement_PythonToArduino.py
# Created By : lauriaclarke
# Creation Date : [2020-04-08 09:05]
# Last Modified : [2020-04-09 09:14]
# Description :
#################################################################################
import serial
import time
import struct
import sys
# check for input arguments
if len(sys.argv) < 3:
print("please re-run with the required arguments: python3 [program name] [serial port] [baud rate]\n ")
sys.exit()
# print input arguments
print("establishing connection on port: ", sys.argv[1])
print("baud rate: ", sys.argv[2], "\n")
arduino = serial.Serial(sys.argv[1], int(sys.argv[2]), timeout=.1)
class Measurement:
def __init__(self, measurementType, deviceType, deviceLocation, measurementTime, measurementValue):
self.m = "M"
self.measurementType = measurementType
self.deviceType = deviceType
self.deviceLocation = deviceLocation
self.measurementTime = measurementTime
self.measurementValue = measurementValue
p1 = Measurement("T", "B", 25, 123456789, 1234567)
print(p1.m, p1.measurementType, p1.deviceType, p1.deviceLocation, p1.measurementTime, p1.measurementValue)
time.sleep(1)
arduino.write(str.encode(p1.m))
arduino.write(str.encode(p1.measurementType))
arduino.write(str.encode(p1.deviceType))
arduino.write(struct.pack('>B', p1.deviceLocation))
arduino.write(struct.pack('>I', p1.measurementTime))
arduino.write(struct.pack('>I', p1.measurementValue))
while True:
data = arduino.readline()[:-2]
if data:
print(data)
| true |
fd43f6af2fec8d52bb60102a2366733395c948bd | Python | goldphoenix90/Project3_WineQuality | /model.py | UTF-8 | 1,944 | 2.734375 | 3 | [] | no_license | # Importing the libraries
from numpy.random import seed
seed(1)
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import keras
import pickle
survey = pd.read_csv('Resources/winequality-red.csv')
X = survey.drop("quality", axis=1)
y = survey["quality"]
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from keras.utils import to_categorical
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1, stratify=y, train_size=0.75, test_size=0.25)
X_scaler = MinMaxScaler().fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
encoded_y_train = label_encoder.transform(y_train)
encoded_y_test = label_encoder.transform(y_test)
y_train_categorical = to_categorical(encoded_y_train)
y_test_categorical = to_categorical(encoded_y_test)
model = Sequential()
model.add(Dense(units=100, activation='relu', input_dim=11))
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=100, activation='relu'))
# model.add(Dense(units=100, activation='relu'))
# model.add(Dense(units=100, activation='relu'))
# model.add(Dense(units=100, activation='relu'))
# model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=6, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(
X_train_scaled,
y_train_categorical,
epochs=100,
shuffle=True,
verbose=2
)
model_loss, model_accuracy = model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
# Saving model to disk
model.save('redwinequality_model_trained.h5')
# Loading model to compare the results
from keras.models import load_model
survey_model = load_model('redwinequality_model_trained.h5') | true |
93241f3342733195517e3a0aa34e95a56d57b17d | Python | hewhocannotbetamed/HandyBeam | /build/lib/handybeam/cl_py_ref_code/hbk_lamb_grid_sampler.py | UTF-8 | 2,352 | 2.875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | ## This is python reference code for the opencl kernel _hbk_lamb_grid_sampler.
## Imports
import numpy as np
root_2 = 1.4142135623730951
tau = 6.283185307179586
medium_wavelength = 0.008575
def hbk_lamb_grid_sampler_ref(
required_resolution,
radius,
N,
x0,
y0,
z0
):
'''
---------------------------------------------
hbk_lamb_grid_sampler_ref( required_resolution,radius,N,x0,y0,z0)
---------------------------------------------
This method generates a hemispherical grid of sampling points
using the lambert projection.
Parameters
-----------
required_resolution : float
Distance between sampling points in the grid.
x0 : float
x-coordinate of the origin of the grid.
y0 : float
y-coordinate of the origin of the grid.
z0 : float
z-coordinate of the origin of the grid.
radius : float
Radius of the hemisphere defining the lambert projection.
'''
no_points_required = np.ceil((tau*radius)/required_resolution)
density = (2*root_2) / no_points_required
N = len(np.arange(-1,1,density))
# Initialise lists to store the sampling grid coordinates.
x_points = []
y_points = []
z_points = []
# Perform the lambert equi-area projection to generate hemispherical
# sampling points.
for idx_x in range(N):
for idx_y in range(N):
x_base = (-1 + density * idx_x ) * root_2
y_base = (-1 + density * idx_y ) * root_2
rho = np.sqrt(x_base * x_base + y_base * y_base)
c = 2 * np.arcsin(0.5*rho)
phi = np.arcsin(np.cos(c)) / rho
l = np.arctan2( (x_base * np.sin(c)), -y_base*np.sin(c))
cos_phi = np.cos(phi)
lambert_x = radius * np.sin(phi)
lambert_y = radius * np.cos(l) * cos_phi
lambert_z = radius * np.sin(l) * cos_phi
if lambert_z < 0:
pixel_x_coordinate = float('nan')
pixel_y_coordinate = float('nan')
pixel_z_coordinate = float('nan')
else:
pixel_x_coordinate = lambert_x + x0
pixel_y_coordinate = lambert_y + y0
pixel_z_coordinate = lambert_z + z0
| true |
a891e386a22ec869cfba6bd6c0944fc9336d9665 | Python | dtrckd/simplon_tssr_2021 | /exo5.py | UTF-8 | 559 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/python
import sys
#
# Add a SEP at the end of each line
#
path = sys.argv[1]
sep = sys.argv[2] # RFTM
def alter_file(path, sep):
f = open(path)
content = f.read()
f.close()
content = content.split("\n") # "salut ca va" -> ["salut", "ca", "va]
for i in range(len(content)):
content[i] = content[i] + " " + sep
content = '\n'.join(content)
#content = (" "+sep+" ").join(content) # ["salut", "ca", "va] -> "salut {sep} ca {sep} va"
dest = path + ".tmp"
f = open(dest, "w")
f.write(content)
alter_file(path, sep)
| true |
14ec2008be0eaa7c9be0156475245f17cd1ca140 | Python | tirhelen/ohtu-2021-viikko1 | /src/tests/varasto_test.py | UTF-8 | 2,508 | 3.171875 | 3 | [] | no_license | import unittest
from varasto import Varasto
class TestVarasto(unittest.TestCase):
def setUp(self):
self.varasto = Varasto(10)
def test_konstruktori_luo_tyhjan_varaston(self):
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertAlmostEqual(self.varasto.saldo, 0)
def test_uudella_varastolla_oikea_tilavuus(self):
self.assertAlmostEqual(self.varasto.tilavuus, 10)
def test_tilavuus_nolla_virheellisella_arvolla(self):
self.varasto = Varasto(-5.0)
self.assertAlmostEqual(self.varasto.tilavuus, 0)
def test_virheellinen_alkusaldo_korjataan(self):
self.varasto = Varasto(10, -2)
self.assertAlmostEqual(self.varasto.saldo, 0)
def test_alkusaldo_suurempi_kuin_tilavuus(self):
self.varasto = Varasto(5, 10)
self.assertAlmostEqual(self.varasto.saldo, 5)
def test_lisays_lisaa_saldoa(self):
self.varasto.lisaa_varastoon(8)
self.assertAlmostEqual(self.varasto.saldo, 8)
def test_lisays_lisaa_pienentaa_vapaata_tilaa(self):
self.varasto.lisaa_varastoon(8)
# vapaata tilaa pitäisi vielä olla tilavuus-lisättävä määrä eli 2
self.assertAlmostEqual(self.varasto.paljonko_mahtuu(), 2)
def test_negatiivinen_lisays_varastoon(self):
self.varasto.lisaa_varastoon(-4)
self.assertAlmostEqual(self.varasto.saldo, 0)
def test_lisays_ylittaa_jaljella_olevan_tilavuuden(self):
self.varasto.lisaa_varastoon(20)
self.assertAlmostEqual(self.varasto.saldo, 10)
def test_ottaminen_palauttaa_oikean_maaran(self):
self.varasto.lisaa_varastoon(8)
saatu_maara = self.varasto.ota_varastosta(2)
self.assertAlmostEqual(saatu_maara, 2)
def test_ottaminen_lisaa_tilaa(self):
self.varasto.lisaa_varastoon(8)
self.varasto.ota_varastosta(2)
# varastossa pitäisi olla tilaa 10 - 8 + 2 eli 4
self.assertAlmostEqual(self.varasto.paljonko_mahtuu(), 4)
def test_ottamisessa_maara_negatiivinen(self):
self.varasto.ota_varastosta(-3)
self.assertAlmostEqual(self.varasto.saldo, 0)
def test_ottamisessa_maara_ylittaa_saldon(self):
self.varasto.lisaa_varastoon(5)
otettavissa = self.varasto.ota_varastosta(10)
self.assertAlmostEqual(otettavissa, 5)
def test_oikea_str_muoto(self):
self.assertEqual(str(self.varasto), "saldo = 0, vielä tilaa 10") | true |
dc638e1f808e5a178c5d96662735b9631ca9eb9a | Python | hehehexdd/Super-Ganio | /game_data/engine/entities/enemies.py | UTF-8 | 1,412 | 2.734375 | 3 | [
"CC0-1.0"
] | permissive | from game_data.engine.entities.base.entity import *
from game_data.engine.base.collisioninfo import *
class Enemy(Entity):
def __init__(self, hp, x, y, level_instance, images: dict, speed_x, initial_move_dir: int):
super().__init__(hp, x, y, level_instance, images, images['move'], speed_x)
self.scale_all_images_by(3)
self.flip_all_images(False)
from game_data.source.collisions.customcollisions import EnemyDamageBox
self.collision = EnemyDamageBox(self, self.current_image.get_rect(), {CollisionChannel.Entity: CollisionAction.Pass},
{CollisionChannel.Death: CollisionAction.Pass, CollisionChannel.World: CollisionAction.Block, CollisionChannel.EnemyObstacle: CollisionAction.Block})
self.level_instance.collisions.append(self.collision)
self.move_x = initial_move_dir
def move_x_axis(self, value):
self.switch_current_image_set('move')
if not self.is_dead():
new_pos = self.x + value
if not self.move_x == 0:
if not self.level_instance.check_collides_any(self, (new_pos, self.y)):
self.x = new_pos
self.collision.move(self.current_image.get_rect(topleft=(self.x, self.y)))
else:
if self.move_x > 0:
self.flip_all_images(False)
else:
self.flip_all_images(True)
self.move_x *= -1
def kill(self):
super(Enemy, self).kill()
self.level_instance.collisions.remove(self.collision)
self.level_instance.entities.remove(self) | true |
d1ffdaf89f0b8861dec174cd8d50f6bb94ed66eb | Python | zhubinQAQ/CPM-R-CNN | /pet/utils/data/transforms/transforms_instance.py | UTF-8 | 3,350 | 2.515625 | 3 | [] | no_license | import cv2
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Box2CS(object):
def __init__(self, aspect_ratio, pixel_std):
self.aspect_ratio = aspect_ratio
self.pixel_std = pixel_std
def __call__(self, image, target):
target.box2cs(self.aspect_ratio, self.pixel_std)
return image, target
class Scale(object):
def __init__(self, scale_factor):
self.scale_factor = scale_factor
def __call__(self, image, target):
target.scale(self.scale_factor)
return image, target
class Rotate(object):
def __init__(self, rotation_factor):
self.rotation_factor = rotation_factor
def __call__(self, image, target):
target.rotate(self.rotation_factor)
return image, target
class Flip(object):
def __init__(self, flip):
self.flip = flip
def __call__(self, image, target):
if self.flip and random.random() <= 0.5:
image = image[:, ::-1, :]
target.flip()
return image, target
class Half_Body(object):
def __init__(self, half_body, num_keypoints_half_body, prob_half_body, points_num, upper_body_ids,
x_ext_half_body, y_ext_half_body, aspect_ratio, pose_pixel_std):
self.half_body = half_body
self.num_keypoints_half_body = num_keypoints_half_body
self.prob_half_body = prob_half_body
self.points_num = points_num
self.upper_body_ids = upper_body_ids
self.x_ext_half_body = x_ext_half_body
self.y_ext_half_body = y_ext_half_body
self.aspect_ratio = aspect_ratio
self.pose_pixel_std = pose_pixel_std
def __call__(self, image, target):
if self.half_body and random.random() <= self.prob_half_body:
target.halfbody(self.num_keypoints_half_body, self.points_num, self.upper_body_ids, self.x_ext_half_body,
self.y_ext_half_body, self.aspect_ratio, self.pose_pixel_std)
return image, target
class Affine(object):
def __init__(self, train_size):
self.train_size = train_size
def __call__(self, image, target):
target.affine(self.train_size)
image = cv2.warpAffine(
image,
target.trans,
(int(self.train_size[0]), int(self.train_size[1])),
flags=cv2.INTER_LINEAR)
return image, target
class Generate_Target(object):
def __init__(self, target_type, sigma, heatmap_size, train_size):
self.target_type = target_type
self.sigma = sigma
self.heatmap_size = heatmap_size
self.train_size = train_size
def __call__(self, image, target):
final_target = target.generate_target(self.target_type,
self.sigma,
self.heatmap_size,
self.train_size)
return image, final_target
class BGR_Normalize(object):
def __init__(self, mean, std, to_rgb=False):
self.mean = mean
self.std = std
self.to_rgb = to_rgb
def __call__(self, image, target):
if self.to_rgb:
image = image[[2, 1, 0]]
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
| true |
45a7eb48f29e16609a8b8c7bc5ab2ef5de80f278 | Python | Aravindh15/FaceRecognition_In_RaspberryPi | /Code/face_IO.py | UTF-8 | 1,826 | 3.03125 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
# define the gpio
all_pin = [11,12,13,15]
buzzer = 12 # GPIO.1 (pin 12)
led_red = 11 # GPIO.0 (pin 11)
led_yellow = 13 # GPIO.2 (pin 13)
led_green = 15 # GPIO.3 (pin 15)
def setup():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(all_pin, GPIO.OUT)
GPIO.output(all_pin, GPIO.HIGH)
# match with person in DB
def match_person_twinkle():
print('Match with person in DB')
GPIO.output(led_red, GPIO.LOW)
GPIO.output(led_yellow, GPIO.LOW)
GPIO.output(buzzer, GPIO.HIGH)
while True:
GPIO.output(led_green, GPIO.LOW) # green led on
time.sleep(0.5)
GPIO.output(led_green, GPIO.HIGH) # led off
time.sleep(0.5)
# match with person in DB not twinkle
def match_person():
print('Match with person in DB')
GPIO.output(led_red, GPIO.LOW)
GPIO.output(led_yellow, GPIO.LOW)
GPIO.output(led_green, GPIO.HIGH) # led off
GPIO.output(buzzer, GPIO.LOW)
# missmatch with person in DB
def missmatch_person_twinkle():
print('Missmatch with person in DB')
GPIO.output(led_green, GPIO.LOW)
GPIO.output(led_yellow, GPIO.HIGH)
while True:
GPIO.output(led_red, GPIO.LOW) # led on
GPIO.output(buzzer, GPIO.LOW)
time.sleep(0.5)
GPIO.output(led_red, GPIO.HIGH) # led off
GPIO.output(buzzer, GPIO.HIGH)
time.sleep(0.5)
# missmatch with person in DB not twinkle
def missmatch_person():
print('Missmatch with person in DB')
GPIO.output(led_green, GPIO.LOW)
GPIO.output(led_yellow, GPIO.LOW)
GPIO.output(buzzer, GPIO.HIGH)
GPIO.output(led_red, GPIO.HIGH) # led on
def destroy():
GPIO.output(LedPin, GPIO.HIGH) # led off
GPIO.cleanup()
'''
if __name__ == '__main__':
setup()
try:
match_person()
except KeyboardInterrupt:
destroy()
'''
| true |
fec1da7b7251652a3e0e23b7643ecf2527a31f37 | Python | sunilsm7/django_resto | /restaurants/validators.py | UTF-8 | 689 | 2.59375 | 3 | [
"MIT"
] | permissive | from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_even(value):
if value % 2 != 0:
raise ValidationError(
_('%(value)s is not an even number'),
params={'value': value},
)
def clean_email(self):
email = self.cleaned_data.get('email')
if ".edu" in email:
raise forms.ValidationError("We do not accept edu emails")
CATEGORIES = ["Mexican", "Asian", "American", "Indian", "Chinese"]
def validate_category(value):
cat = value.capitalize()
if not value in CATEGORIES and not cat in CATEGORIES:
raise ValidationError("{}".format(value +" is not a valid category")) | true |
0369510f87d5785bbc12795e4cead99b0e167c5f | Python | LucDoh/CrowdRank | /scripts/crowdrank_simple.py | UTF-8 | 417 | 2.765625 | 3 | [
"MIT"
] | permissive | import sys
sys.path.append("..")
import os.path
import time
import pandas as pd
from crowdrank import ranker
def main():
'''Script to call crowdrank as simply as possible
python crowdrank_simple.py "keyword"'''
keyword = sys.argv[1]
skip = not (len(sys.argv) > 2 and sys.arg[2] == 0)
ranking_df = ranker.rank(keyword, skip = skip)
print(ranking_df)
if __name__ == "__main__":
main()
| true |
f00dd6772ef2cf5a2ad1f86e355549962f15d87d | Python | npkhanhh/codeforces | /python/round712/1504A.py | UTF-8 | 308 | 3.203125 | 3 | [] | no_license | from sys import stdin
for _ in range(int(stdin.readline())):
s = list(input().strip())
n = len(s)
res = 'NO'
for i in range(n):
if s[n-i-1] != 'a':
res = 'YES'
s.insert(i, 'a')
break
print(res)
if res == 'YES':
print(''.join(s))
| true |
215579d6d4a11d7f522b3e99826ab422660e706f | Python | ghleokim/codeTestProblems | /swExpertAcademy/q2056_calendar.py | UTF-8 | 577 | 3.28125 | 3 | [] | no_license | #q2056
num = int(input())
dayChart = {
31: [1,3,5,7,8,10,12],
30: [4,6,9,11],
28: [2]
}
def checkDate(year, month, day):
if int(month) > 12 or int(month) < 1 or int(day) < 1:
return -1
else:
for d, m in dayChart.items():
if (int(month) in m) and (int(day) <= d):
return '{0}/{1}/{2}'.format(year, month, day)
return -1
for case in range(num):
inDate = input()
year = inDate[:4]
month = inDate[4:6]
day = inDate[6:]
print('#{0} {1}'.format(case+1, checkDate(year, month, day))) | true |
fbd085e8d66fdfb4ee0822cf8ace50238e22b40c | Python | whyj107/Algorithm | /Programmers/20200807_가장 큰 수.py | UTF-8 | 1,176 | 3.9375 | 4 | [] | no_license | # 문제
# 가장 큰 수
# https://programmers.co.kr/learn/courses/30/lessons/42746?language=python3
# 나의 풀이
# 시간 초과
from itertools import permutations
def solution0(numbers):
tmp = [''.join(list(map(str, i))) for i in list(permutations(numbers, len(numbers)))]
tmp.sort()
return tmp[-1]
# string 비교 문제 풀이
def solution(numbers):
numbers = list(map(str, numbers))
numbers.sort(key=lambda x: x*3, reverse=True)
return str(int(''.join(numbers)))
# 정렬로 문제 풀이
def solution1(numbers):
numbers = list(map(str, numbers))
answer = "".join(sorted(numbers, key=lambda x: (x[0], x[1%len(x)], x[2%len(x)], x[3%len(x)]),reverse=True))
return answer if int(answer) != 0 else "0"
import functools
def comparator(a,b):
t1 = a+b
t2 = b+a
# t1이 크다면 1 // t2가 크다면 -1 // 같으면 0
return (int(t1) > int(t2)) - (int(t1) < int(t2))
def solution2(numbers):
n = [str(x) for x in numbers]
n = sorted(n, key=functools.cmp_to_key(comparator),reverse=True)
answer = str(int(''.join(n)))
return answer
if __name__ == '__main__':
print(solution([6, 10, 2]), "6210") | true |
c02208e183ffad708e59d1a9dd37bc391d5bba6b | Python | ExperimentalHypothesis/flask-restful-web-api | /section13/tests/integration/test_user.py | UTF-8 | 1,156 | 2.53125 | 3 | [] | no_license | from models.user import UserModel
import pytest
from app import app
from db import db
@pytest.fixture(autouse=True)
def test_client_db():
# set up
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///"
with app.app_context():
db.init_app(app)
db.create_all()
testing_client = app.test_client()
ctx = app.app_context()
ctx.push()
# do testing
yield testing_client
# tear down
with app.app_context():
db.session.remove()
db.drop_all()
ctx.pop()
def test_save_delete(test_client_db):
u = UserModel(username="test", password="passtest")
found_by_id = u.get_user_by_id(1)
assert found_by_id is None
u.save_to_db()
found_by_id = u.get_user_by_id(1)
assert found_by_id is not None
u.delete_from_db()
found_by_id = u.get_user_by_id(1)
assert found_by_id is None
def test_get_user_by_id(test_client_db):
u = UserModel(username="test", password="passtest")
u.save_to_db()
found_by_id = u.get_user_by_id(1)
assert found_by_id.username == "test"
assert found_by_id.password == "passtest"
assert found_by_id.id == 1
| true |
cf6b016d5360b6408c9b0955a22dd4e6604df4dd | Python | kayscott/Ch_9_Exercises | /using in to search.py | UTF-8 | 195 | 2.53125 | 3 | [] | no_license | # USING IN TO SEARCH
import os
f = open(os.path.expanduser('~/Desktop/*Filename*.txt'))
for line in f:
line = line.rstrip()
if not ' *keyword*' in line:
continue
print line | true |
2b3f38e20378b89a310b19f96e7a002ef1921611 | Python | Gengj/MNIST_LEARNING_CLASS | /MNIST_1.py | UTF-8 | 14,350 | 2.75 | 3 | [] | no_license | # -*- coding:utf-8 -*-
"""
-------------------------------------------------
@Author: GengJia
@Contact: 35285770@qq.com
@Site: https://github.com/Gengj
-------------------------------------------------
@Version: 1.0
@License: (C) Copyright 2013-2020
@File: MNIST_1.py
@Time: 2018/1/16 下午7:25
@Desc:
-------------------------------------------------
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# MINTS基本参数
INPUT_NODE = 784
OUTPUT_NODE = 10
# 配置神经网络参数
LAYER1_NODE = 500 # 神经网路含有一个隐藏层,该层含有500个节点
BATCH_SIZE = 100 # 一个batch中含有的数据个数
LEARNING_RATE_BASE = 0.8 # 基础的学习率
LEARNING_RATE_DECAT = 0.99 # 学习率的衰减率
REGULARIZATION_RATE = 0.0001 # 描述模型复杂度的正则化项在损失函数中的系数
TRAING_STEP = 30000 # 训练轮数
MOVING_AVERAGE_DECAY = 0.99 # 滑动平均衰减率
# 辅助函数
# 给定神经网路的输入和所有参数,计算神经网络的前向传播结果
# 定义一个使用ReLU激活函数的三层全连接神经网络
# 通过加入隐藏层实现了多层网络结构
# 通过ReLU激活函数实现去线性化
# 函数支持传入用于计算参数平均值的类,方便在测试时使用滑动平均模型
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
# 没有提供滑动平均类时,直接使用参数当前的取值
if avg_class == None:
# 计算隐藏层的前向传播结果,这里使用ReLU激活函数
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
# 计算输出层的前向传播结果。因为在计算损失函数时会一并计算softmax函数
# 所有在这里不需要加入激活函数,而且不加入softmax函数不会影响预测结果
# 因为预测时,使用的时不同类别对应节点输出值的相对大小,有没有softmax层对分类结果的计算没有影响
# 因此,在计算整个神经网络的前向传播时,可以不加入最后的softmax层
return tf.matmul(layer1, weights2) + biases2
else:
layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1))
+ avg_class.average(biases1))
return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)
# 训练模型的过程
def train(mnist):
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
# 生成隐藏层的参数
weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))
# 生成输出层的参数
weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
# 计算在当前参数下神经网络前向传播的结果。这里给出的用于计算滑动平均的类为None
# 因此函数不会使用参数的滑动平均值
y = inference(x, None, weights1, biases1, weights2, biases2)
# 定义存储训练轮数的变量
# 这个变量不需要计算滑动平均值,所以这个变量一般为不可训练的变量
# 使用tensorflow训练神经网络时,一般都会将代表训练论述的变量指定为不可训练的参数
global_step = tf.Variable(0, trainable=False)
# 给定滑动平均衰减率和训练轮数的变量,初始化滑动平均类
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
# 在所有代表神经网络参数的变量上使用滑动平均,其他辅助变量例如global_step就不需要训练了
# variable_averages.apply返回:计算图上集合tf.GraphKeys.TRAINABLE_VARIABLES中的元素
# 这个集合中的元素即所有没有指定trainable=False的参数
variable_averages_op = variable_averages.apply(tf.trainable_variables())
# 计算使用滑动平均之后的前向传播结果
# 滑动平均本身不会改变变量的取值,而是会维护一个影子变量来记录其滑动平均值
# 所以当需要使用这个滑动平均值时,需要明确调用average函数
average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)
# 计算交叉熵作为刻画预测值和真实值之间差距的损失函数
# 当分类问题只有一个正确答案时,可以使用tensorflow提供的nn.sparse_softmax_cross_entropy_with_logits
# 函数来计算交叉熵
# mnist问题的图片中只包含0~9中的一个数字,因此可以使用这个函数计算
# nn.sparse_softmax_cross_entropy_with_logits函数
# '''
# 第一个参数:神经网络不包含softmax层的前向传播结果
# 第二个参数:训练数据的正确答案
# 因为标准答案y_是一个长度为10的一维数组,而该函数需要提供的是正确答案的数字
# 因此使用tf.argmax函数得到y_数组中最大值的编号,即正确答案的数字
# '''
# 这个函数看名字都知道,是将稀疏表示的label与输出层计算出来结果做对比,函数的形式和参数如下:
#
# nn.sparse_softmax_cross_entropy_with_logits(logits, label, name=None)
#
# 第一个坑: logits表示从最后一个隐藏层线性变换输出的结果!假设类别数目为10,
# 那么对于每个样本这个logits应该是个10维的向量,且没有经过归一化,所有这个向量的元素和不为1。
# 然后这个函数会先将logits进行softmax归一化,然后与label表示的onehot向量比较,计算交叉熵。
# 也就是说,这个函数执行了三步(这里意思一下):
# sm = nn.softmax(logits)
# onehot = tf.sparse_to_dense(label,…)
# nn.sparse_cross_entropy(sm, onehot)
# 第二个坑: 输入的label是稀疏表示的,就是是一个[0,10)的一个整数,这我们都知道。
# 但是这个数必须是一维的!就是说,每个样本的期望类别只有一个,属于A类就不能属于其他类了。
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
# 计算在当前batch中所有样例的交叉熵平均值
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# 计算L2正则化损失函数,定义正则率
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
# 计算模型的正则化损失,一般只需要计算神经网络边上权重的正则化损失,而不需要计算偏置项
regularization = regularizer(weights1) + regularizer(weights2)
# 总损失是交叉熵损失和正则化损失的和
loss = cross_entropy_mean + regularization
# 设置指数衰减的学习率
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE, # 基础学习率,随着迭代的进行,更新变量时使用的学习率在这个基础上递减
global_step, # 迭代轮数
mnist.train.num_examples / BATCH_SIZE, # 过完所有训练数据需要的迭代次数
LEARNING_RATE_DECAT # 学习率衰减速度
)
# 使用tf.train.GradientDescentOptimizer优化算法来优化损失函数
# 注意:这里损失函数包含了交叉上损失和l2正则化损失
# 注意:这里的train_step不是训练步数,而是每一步训练
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# 在训练神经网络模型的时候,每过一遍数据既需要通过反响传播来更新神经网络的参数
# 又要更新每个参数的滑动平均值。
# 为了一次完成多个操作,TensorFlow提供tf.control_dependencies(control_inputs=)和tf.group(*input)
# 两种机制。
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train')
# 以上语句和
# train_op = tf.group(train_step,variable_averages_op)
# 等价
# 检验滑动平均算法得到的神经网路前向结果是否正确
# 使用tf.argmax得到average_y和y_数组中最大值的编号,即正确答案的数字,第二个参数1表示在第一维上进行
# tf.argmax返回一个长度为batch的一维数组,数组中的值就是每一个样例对应的数字识别结果,交给tf.equal判断
# tf.equal判断两个张量的每一维是否相等,相等返回true,返回BOOL类型
correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
# tf.cast():Casts a tensor to a new type.
# 将bool类型转化为float32类型,再计算平均值。平均值=模型在这组训练上的准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 开启会话,正式启动训练过程
with tf.Session() as sess:
# 初始化全部变量
tf.initialize_all_variables().run()
# 准备验证数据
# 一般在神经网络的训练过程中,会通过验证数据来大致判断停止的条件和评判训练的效果
validate_feed = {
x: mnist.validation.images,
y_: mnist.validation.labels
}
# 准备测试数据
# 在真实环境中,这部分数据是不可见的。
# 这个数据只是作为判断模型优劣的最后评价结果
test_feed = {
x: mnist.test.images,
y_: mnist.test.labels
}
# 迭代神经网络
# TRAING_STEP = 30000,训练30000轮
for i in range(TRAING_STEP):
# 每1000轮输出一次在验证数据集上的测试结果
if i % 1000 == 0:
# 计算滑动平均模型在验证数据上的准确率
# 因为MNISTS数据集比较小,所以一次可以处理所有的验证数据
# 为了计算方便,本程序没有将验证数据划分为更小的batch
# 而当神经网络模型比较复杂或者验证数据比较大时,太大的batch会导致计算时间过长或内存溢出的错误
validate_acc = sess.run(accuracy, feed_dict=validate_feed)
print("After %d training step(s),validation using average model is %g" % (i, validate_acc))
# 产生这一轮使用的batch个训练数据,并进行训练过程
xs, ys = mnist.train.next_batch(BATCH_SIZE)
sess.run(train_op, feed_dict={x: xs, y_: ys})
print("weights1:\n")
print(sess.run(weights1))
print("*************************************************")
print("weights2:\n")
print(sess.run(weights2))
print("*************************************************")
# 30000轮训练结束后,在测试数据上检测神经网络模型的最终正确率
test_acc = sess.run(accuracy, feed_dict=test_feed)
print("After %d training step(s),test accuracy using average model is %g" % (TRAING_STEP, test_acc))
# 主程序入口
def main(argv=None):
# 声明MNIST数据集的处理类mnist,mnist初始化时会自动下载数据
# 但本程序是直接将下载数据放在源代码根目录下,因为mnist下载时会报SSL的错
mnist = input_data.read_data_sets("", one_hot=True)
train(mnist)
if __name__ == '__main__':
print("-------------------START-------------------")
# TensorFlow提供的程序入口,tf.app.run()会调用上面定义的main函数
tf.app.run()
#
# -------------------START-------------------
# Extracting train-images-idx3-ubyte.gz
# Extracting train-labels-idx1-ubyte.gz
# Extracting t10k-images-idx3-ubyte.gz
# Extracting t10k-labels-idx1-ubyte.gz
# After 0 training step(s),validation using average model is 0.0934
# After 1000 training step(s),validation using average model is 0.976
# After 2000 training step(s),validation using average model is 0.9808
# After 3000 training step(s),validation using average model is 0.9816
# After 4000 training step(s),validation using average model is 0.9822
# After 5000 training step(s),validation using average model is 0.9824
# After 6000 training step(s),validation using average model is 0.9838
# After 7000 training step(s),validation using average model is 0.9834
# After 8000 training step(s),validation using average model is 0.9834
# After 9000 training step(s),validation using average model is 0.9838
# After 10000 training step(s),validation using average model is 0.9836
# After 11000 training step(s),validation using average model is 0.9844
# After 12000 training step(s),validation using average model is 0.9836
# After 13000 training step(s),validation using average model is 0.9844
# After 14000 training step(s),validation using average model is 0.9838
# After 15000 training step(s),validation using average model is 0.9854
# After 16000 training step(s),validation using average model is 0.9852
# After 17000 training step(s),validation using average model is 0.9852
# After 18000 training step(s),validation using average model is 0.9856
# After 19000 training step(s),validation using average model is 0.9848
# After 20000 training step(s),validation using average model is 0.985
# After 21000 training step(s),validation using average model is 0.985
# After 22000 training step(s),validation using average model is 0.9852
# After 23000 training step(s),validation using average model is 0.9856
# After 24000 training step(s),validation using average model is 0.985
# After 25000 training step(s),validation using average model is 0.9848
# After 26000 training step(s),validation using average model is 0.9858
# After 27000 training step(s),validation using average model is 0.9854
# After 28000 training step(s),validation using average model is 0.9854
# After 29000 training step(s),validation using average model is 0.985
# After 30000 training step(s),test accuracy using average model is 0.9837 | true |
deade78b2736a2d2090f1c71ea75c9d242f96746 | Python | lamine2000/ZCasino | /ZCasino.py | UTF-8 | 1,522 | 3.453125 | 3 | [] | no_license | from random import randrange
from math import ceil
from os import system, name
def clear():
# for windows
_ = system('cls') if name == 'nt' else system('clear')
if __name__ == '__main__':
continuer = 'o'
argent = 1000
while continuer == 'o':
print(f'Vous avez {argent}$')
miseArgent = miseNum = -1
while not 0 < miseArgent <= argent:
miseArgent = int(input(f'Combien misez-vous ? [1$;{argent}$]... '))
while not 0 <= miseNum < 50:
miseNum = int(input('Sur quel numéro misez-vous ? [0;49]... '))
numBille = randrange(50)
print(f'La bille s\'est arrêtée sur le ........ {numBille}')
if numBille == miseNum:
gain = miseArgent * 3
print(f'Quelle chance, vous gagnez {gain}$')
elif numBille % 2 == miseNum % 2:
gain = ceil(miseArgent / 2)
print(f'Bravo, vous gagnez {gain}$')
else:
gain = -miseArgent
print('Dommage, vous perdez votre mise')
argent += gain
print(f'Vous avez {argent}$')
if argent <= 0:
print('Dommage, vous n\'avez pas assez d\'argent pour miser. À la prochaine !')
break
reponse = 'a'
while reponse[0] not in 'onON':
reponse = input('Voulez-vous continuer ? O/N... ')
continuer = reponse[0].lower()
if continuer == 'o':
clear()
else:
print('Dommage, vous partez déjà. Bye !')
| true |
c46ccf4aacacbe46fd11578f1ed1177bda6d845d | Python | tobeeeelite/success | /tool.py | UTF-8 | 1,017 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/10 9:54
# @Author : zyg
# @Site :
# @File : tool.py
# @Software: PyCharm
import os
import shutil
def get_files(in_dir):
#print('get train_data files from '+exts)
files = []
if not os.path.exists(in_dir):
ValueError("visit path is not exits")
abs_file = []
for root, _, files in os.walk(in_dir):
for file in files:
_ ,ext = os.path.splitext(file)
# print(ext)
if ext not in ['.pdf','.docx','doc']:
abs_file.append(os.path.join(root, file))
return abs_file
def main():
label = '数学'
files = get_files(r'D:\小学试题照片')
for k,f in enumerate(files) :
if label in f:
print(f)
_, name = os.path.split(f)
new_path = os.path.join(r'D:\maths','{0}.jpg'.format(k))
shutil.copyfile(f,new_path)
if __name__ == '__main__':
main() | true |
2a2f2b87dc0411047a47a5e7b19186e6b29020f4 | Python | kiic-hub/MLDL | /Get_data.py | UTF-8 | 1,425 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/minkh93/MLDL/blob/master/Get_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
import cv2
import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import OneHotEncoder
class get_data:
def __init__(self):
get_ipython().system('git clone https://github.com/minkh93/MLDL.git')
def get_train_data():
root_dir='/content/MLDL/train/'
train_input=[]
train_label=[]
for index in range(6):
path = root_dir+str(index)
print(path)
img_list = os.listdir(path)
get_ipython().system('cd $path')
for img in img_list:
image = cv2.imread(str(index)+'/'+img, cv2.IMREAD_COLOR)
train_input.append([np.array(image)])
train_label.append([np.array(index)])
return train_input, train_label
def get_test_data():
root_dir='/content/MLDL/train/'
train_input=[]
train_label=[]
img_list = os.listdir(path)
for img in img_list:
image = cv2.imread(str(index)+'/'+img, cv2.IMREAD_COLOR)
train_input.append([np.array(image)])
train_label.append([np.array(index)])
return train_input, train_label
| true |
8706cc1055eeadc3e6dbb82fb235465808146938 | Python | raphaelaffinito/rawPy | /RawPy/bayes.py | UTF-8 | 10,133 | 2.734375 | 3 | [] | no_license |
import gzip
import os
import sys
from time import time
import emcee
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
from matplotlib import gridspec
from scipy.stats import pearsonr
from six.moves import cPickle as pickle
class bayes_framework:
"""
API for the Bayesian inference (inversion using a probabilistic random
sampling approach)
"""
msg_width = 30
bayes_params = {}
def __init__(self):
self.bayes_params.update({"nsteps": 2000, "nthreads": 4, "nburn": 500})
pass
def log_prior(self):
"""
Calculate a flat prior distribution. TODO: explore other priors
Returns: log(1) = 0.0
"""
return 0.0
def log_likelihood(self, t, y):
"""
Calculate the log likelihood of the data, given the model parameters
Input: time, friction data
Returns: log likelihood
"""
params = self.params
# TODO: calculate the data reciprocal weights around peak friction
weights = 1.0
sigma = weights * params["sigma"]
if self.solver_mode == "step":
model_result = self.forward(t, mode=self.solver_mode)
mu_model = self.interpolate(t, model_result["t"], model_result["mu"])
else:
model_result = self.forward(t)
mu_model = model_result["mu"]
if np.isnan(mu_model[-1]):
return -np.inf
# Compute the likelihood of the data given the model parameters,
# assuming that model-data mismatch is normally distributed with
# standard deviation sigma
logl = -0.5*np.sum(np.log(2*np.pi*sigma**2) + (y - mu_model)**2 / sigma**2)
return logl
def log_posterior(self, p, t, y):
"""
Calculate the log posterior (\propto log prior + log likelihood),
given the model parameters (a, b, Dc, and optionally k)
Input: model parameters, time, friction data
Returns: log posterior
"""
self.unpack_params(p)
params = self.params
if params["a"] < 0 or params["Dc"] <= 0 or params["sigma"] <= 0 or params["k"] <= 0:
return -np.inf
return self.log_prior() + self.log_likelihood(t, y)
def inv_bayes(self, p0, pickle_file="bayes_pickle.tar.gz"):
"""
Perform the Bayesian inference (inversion), given the data. The initial
guess is obtained from
Input: initial guess, pickle output file
Returns: summary statistocs of the sampled posterior distribution
"""
self.unpack_params(p0)
params = self.params
bayes_params = self.bayes_params
data = self.data
ndim = len(p0)
nwalkers = 4*ndim
nsteps = bayes_params["nsteps"]
nthreads = bayes_params["nthreads"]
# Initiate random values between 0.5 and 1.5
# TODO: initialise walkers with tight gaussian around p0
starting_guess = 0.5 + np.random.random((nwalkers, ndim))
# Multiply with inversion results
for i, key in enumerate(self.inversion_params):
starting_guess[:, i] *= params[key]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, self.log_posterior,
args=[data["t"], data["mu"]], threads=nthreads
)
print("Sampling posterior distribution with %d walkers..." % nwalkers)
t0 = time()
dN = int(nsteps//10)
ETA_str = "--"
try:
for i, result in enumerate(sampler.sample(starting_guess, iterations=nsteps)):
if i > 0 and i % dN == 0:
t_i = time()
inv_rate = (t_i-t0)/float(i)
todo = nsteps-i
ETA = todo*inv_rate
ETA_str = "%.2f s" % ETA
n = int((self.msg_width + 1) * float(i) / nsteps)
sys.stdout.write("\r[{0}{1}]\tETA: {2}".format('#' * n, ' ' * (self.msg_width - n), ETA_str))
except pickle.PickleError:
print("Python2.7 compatibility issue detected, switching from multithreaded to singlethreaded")
sampler = emcee.EnsembleSampler(
nwalkers, ndim, self.log_posterior,
args=[data["t"], data["mu"]], threads=1
)
for i, result in enumerate(sampler.sample(starting_guess, iterations=nsteps)):
if i > 0 and i % dN == 0:
t_i = time()
inv_rate = (t_i-t0)/float(i)
todo = nsteps-i
ETA = todo*inv_rate
ETA_str = "%.2f s" % ETA
n = int((self.msg_width + 1) * float(i) / nsteps)
sys.stdout.write("\r[{0}{1}]\tETA: {2}".format('#' * n, ' ' * (self.msg_width - n), ETA_str))
sys.stdout.write("\n")
t1 = time()
print("MCMC execution time: %.2f" % (t1 - t0))
self.chain = sampler.chain
self.pickle_chain(pickle_file)
stats = self.get_mcmc_stats().T
return stats
def pickle_chain(self, pickle_file):
"""
Export the results of the Bayesian inference to disk using Python's
pickle protocol
:param pickle_file: the name of the output file
:return:
"""
output = {
"params": self.params,
"bayes_params": self.bayes_params,
"data": self.data,
"chain": self.chain,
}
print("Dumping pickles...")
with gzip.GzipFile(pickle_file, "w") as f:
pickle.dump(output, f, pickle.HIGHEST_PROTOCOL)
return True
def unpickle_chain(self, pickle_file):
print("Loading pickles...")
if not os.path.isfile(pickle_file):
print("Pickles not found!")
return False
try:
with gzip.GzipFile(pickle_file, "r") as f:
data = pickle.load(f)
except ValueError as e:
print("Exception '%s' caught, this is likely related to Python version incompatibility" % e)
print("Re-run the Bayesian inference using the desired Python version. Will now exit...")
exit()
self.__dict__.update(data)
return True
def prune_chain(self):
"""
Sometimes a few walkers get stuck in a local minimum. Prune those
walkers astray from the sampling chain
"""
chain = self.chain
nburn = self.bayes_params["nburn"]
stats = np.zeros((len(self.inversion_params), 2))
for i, key in enumerate(self.inversion_params):
param = chain[:, nburn:, i].reshape(-1)
std = param.std()
mean = param.mean()
dx = np.abs(mean - param)
param[dx > 2*std] = np.nan
stats[i, 0] = np.nanmean(param)
stats[i, 1] = np.nanstd(param)
plt.plot(np.sort(dx)[::-1], ".")
plt.axhline(2*std, ls="--", c="k")
plt.show()
return stats
def get_mcmc_stats(self):
"""
Calculate the mean and standard deviation of MCMC chain for each model
parameter in the posterior distribution (after a certain burn-in)
Returns: posterior distribution statistics
"""
chain = self.chain
nburn = self.bayes_params["nburn"]
stats = np.zeros((len(self.inversion_params), 2))
for i, key in enumerate(self.inversion_params):
param = chain[:, nburn:, i].reshape(-1)
stats[i, 0] = param.mean()
stats[i, 1] = param.std()
return stats
def plot_mcmc_chain(self):
"""
Plot the trace and distribution of each model parameter in the MCMC chain
"""
chain = self.chain
nburn = self.bayes_params["nburn"]
ndim = chain.shape[2]
nwalkers = chain.shape[0]
gs = gridspec.GridSpec(ndim, 3)
plt.figure(figsize=(10, 1.5*ndim))
for i, key in enumerate(self.inversion_params):
param = chain[:, nburn:, i].reshape(-1)
plt.subplot(gs[i, :-1])
for j in range(nwalkers):
plt.plot(chain[j, :, i], lw=1.0, c="k", alpha=0.3)
plt.axvline(nburn, ls="--", c="darkgray")
plt.ylabel("%s" % key)
if i == ndim-1:
plt.xlabel("step")
hist, bins = np.histogram(param, bins="auto")
midbins = 0.5 * (bins[1:] + bins[:-1])
plt.subplot(gs[i, -1])
plt.plot(midbins, hist)
plt.axvline(param.mean(), ls="--", c="k")
plt.tight_layout()
plt.show()
def corner_plot(self):
chain = self.chain
ndim = chain.shape[2]-1
nburn = self.bayes_params["nburn"]
plt.figure(figsize=(10, 8))
for i in range(ndim):
param_i = chain[:, nburn:, i].reshape(-1)
for j in range(i+1):
ax = plt.subplot(ndim, ndim, 1+ndim*i+j)
if i == j:
hist, bins = np.histogram(param_i, bins="auto")
midbins = 0.5*(bins[1:] + bins[:-1])
plt.plot(midbins, hist)
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.2e"))
else:
param_j = chain[:, nburn:, j].reshape(-1)
r, p = pearsonr(param_i, param_j)
plt.plot(param_j, param_i, ".", ms=1, alpha=0.5)
plt.plot(np.median(param_j), np.median(param_i), "o", mew=1, mfc="r", mec="k")
plt.text(0.5, 0.9, "pearson r: %.2f" % r, transform=ax.transAxes, fontsize=9, ha="center")
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter("%.2e"))
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.2e"))
if j == 0:
plt.ylabel(self.inversion_params[i])
if i == ndim-1:
plt.xlabel(self.inversion_params[j])
plt.tight_layout()
plt.show()
| true |
28f981d366ecf9e4393fa71145ee27e1ce7f471a | Python | Kpavicic00/FDR | /apps/login_pages/league_apps/BFPD.py | UTF-8 | 14,046 | 2.53125 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import numpy as np
from functions import *
from League_functions.BFPD_func import BFPD_base
from database import *
import altair as alt
from html_temp import *
import os
import time
def app():
create_BFPD()
st.title('1. function IFPA process function')
st.write('Welcome to metrics')
username = return_username()
i = (username[0])
res = str(''.join(map(str, i)))
delite_temp_user(res)
col1,col2 = st.columns(2)
with col1:
st.info(" For restart data you must delete data and start over !!!")
# Processd data
if st.checkbox("Process data"):
df = pd.read_sql('SELECT * FROM League_datas', conn)
df_new = df[["0","Nationality","Competition","Expenditures","Arrivals","Income","Departures","Balance","Year"]]
st.dataframe(df_new)
a_leuge_DF = BFPD_base(df_new)
my_form = st.form(key = "form123")
submit = my_form.form_submit_button(label = "Submit")
if submit:
st.success("Datas processes :")
my_form_save = st.form(key = "form1")
st.info("For process data you must save data to database")
submit = my_form_save.form_submit_button(label = "Save data")
if submit:
return_user_idd = return_user_id(res)
i = (return_user_idd[0])
res = int(''.join(map(str, i)))
te = int(res)
flag = return_id_BFPD_table(te)
if flag == []:
df = a_leuge_DF
size = NumberOfRows(df)
size = len(df)
list1 = [0] * size
for i in range(0,size):
list1[i] = te
df['user_id'] = list1
create_BFPD()
df.to_sql('BFPD_table',con=conn,if_exists='append')
st.success("Data successfuly saved !")
else:
st.warning("Please first delite your records from database !!")
# Export datas
form_export_csv = st.form(key = "export_form")
submit = form_export_csv.form_submit_button(label = "Export datas")
if submit:
if submit:
return_user_idd = return_user_id(res)
i = (return_user_idd[0])
res = int(''.join(map(str, i)))
te = int(res)
flag = return_id_BFPD_table(te)
if flag != []:
if int(te) > 0:
df = pd.read_sql_query('SELECT * FROM BFPD_table WHERE user_id = "{}"'.format(te),conn)
df_new = df[["Name_of_Legue","Year","Nationality","Balance_by_player","Balance_INFLACION"]]
st.markdown(get_table_download_link_csv(df_new), unsafe_allow_html=True)
st.success("Export Datas")
else:
st.warning("file not found")
st.info("Please procces data again !")
# Delite datas
my_form_delite = st.form(key = "form12")
submit = my_form_delite.form_submit_button(label = "Delite datas")
if submit:
return_user_idd = return_user_id(res)
i = (return_user_idd[0])
res = int(''.join(map(str, i)))
te = int(res)
flag = (return_id_BFPD_table(te))
if flag != []:
if int(te) > 0 :
delite_BFPD(te)
st.success("Delite Datas")
st.info("Please procces data")
else:
st.warning("file not found")
st.info("Please procces data again !")
try:
if st.checkbox("Viusalise data !!!"):
# Viusalise datas
#st.write("Viusalise datas",res)
return_user_idd = return_user_id(res)
st.write("")
i = (return_user_idd[0])
res = int(''.join(map(str, i)))
te = int(res)
flag = return_id_BFPD_table(te)
if flag != []:
if int(te) > 0:
## 1. Graph
df = pd.read_sql_query('SELECT * FROM BFPD_table WHERE user_id = "{}"'.format(te),conn)
df.columns.name = None
#st.dataframe(df)
df_new = df[["Name_of_Legue","Year","Nationality","Balance_by_player","Balance_INFLACION"]]
df_new['Year']= pd.to_datetime(df_new['Year'],format='%Y')
st.markdown(html_BFPD_vizaulazacija1,unsafe_allow_html=True)
chartline1 = alt.Chart(df_new).mark_line(size=5,color='#297F87').encode(
x=alt.X('Year', axis=alt.Axis(title='date')),
y=alt.Y('sum(Balance_by_player)',axis=alt.Axis( title='Inflation rate'), stack=None),
).properties(
width=700,
height=500
).interactive()
chartline2 = alt.Chart(df_new).mark_line(size=5,color='#DF2E2E').encode(
x=alt.X('Year', axis=alt.Axis(title='date')),
y=alt.Y('sum(Balance_INFLACION)', axis=alt.Axis( title='Inflation rate'),stack=None)
).properties(
width=700,
height=500
).interactive()
st.altair_chart(chartline1 + chartline2)
##########################################################################################################
## 2. Graph
st.markdown(html_BFPD_vizaulazacija2,unsafe_allow_html=True)
st.subheader("Income by year ")
df2 = pd.read_sql_query('SELECT * FROM BFPD_table WHERE user_id = "{}"'.format(te),conn)
df_new2 = df2[["Name_of_Legue","Year","Nationality","Balance_by_player","Balance_INFLACION"]]
df_new2["date2"] = pd.to_datetime(df["Year"]).dt.strftime("%Y-%m-%d")
data_start = df_new2["Year"].min()
data_end = df_new2["Year"].max()
def timestamp(t):
return pd.to_datetime(t).timestamp() * 1000
slider2 = alt.binding_range(name='cutoff:', min=timestamp(data_start), max=timestamp(data_end))
selector2 = alt.selection_single(name="SelectorName",fields=['cutoff'],bind=slider2,init={"cutoff": timestamp("2011-01-01")})
abssa = alt.Chart(df_new2).mark_bar(size=17).encode(
x='Year',
y=alt.Y('Balance_by_player',title =None),
color=alt.condition(
'toDate(datum.Year) < SelectorName.cutoff[0]',
alt.value('red'), alt.value('blue')
)
).properties(
width=700,
).add_selection(
selector2
)
st.altair_chart(abssa)
st.subheader("Income by year + INFLACION")
df2 = pd.read_sql_query('SELECT * FROM BFPD_table WHERE user_id = "{}"'.format(te),conn)
df_new2 = df2[["Name_of_Legue","Year","Nationality","Balance_by_player","Balance_INFLACION"]]
df_new2["date2"] = pd.to_datetime(df2["Year"]).dt.strftime("%Y-%m-%d")
data_start = df_new2["Year"].min()
data_end = df_new2["Year"].max()
#st.write("data_start",data_start,"data_end",data_end)
def timestamp(t):
return pd.to_datetime(t).timestamp() * 1000
slider2 = alt.binding_range(name='cutoff:', min=timestamp(data_start), max=timestamp(data_end))
selector2 = alt.selection_single(name="SelectorName",fields=['cutoff'],bind=slider2,init={"cutoff": timestamp("2011-01-01")})
abssa = alt.Chart(df_new2).mark_bar(size=17).encode(
x='Year',
y=alt.Y('Balance_INFLACION',title =None),
color=alt.condition(
'toDate(datum.Year) < SelectorName.cutoff[0]',
alt.value('red'), alt.value('blue')
)
).properties(
width=700,
).add_selection(
selector2
)
st.write(abssa)
##########################################################################################################
## 3. Graph
st.markdown(html_BFPD_vizaulazacija3,unsafe_allow_html=True)
while True:
lines = alt.Chart(df_new).mark_bar(size=25).encode(
x=alt.X('Year',axis=alt.Axis(title='date')),
y=alt.Y('Balance_by_player',axis=alt.Axis(title='value'))
).properties(
width=600,
height=300
)
def plot_animation(df_new):
lines = alt.Chart(df_new).mark_bar(size=25).encode(
x=alt.X('Year', axis=alt.Axis(title='date')),
y=alt.Y('Balance_by_player',axis=alt.Axis(title='value')),
).properties(
width=600,
height=300
)
return lines
N = df_new.shape[0] # number of elements in the dataframe
burst = 6 # number of elements (months) to add to the plot
size = burst # size of the current dataset
line_plot = st.altair_chart(lines)
line_plot
start_btn = st.button('Start')
if start_btn:
for i in range(1,N):
step_df = df_new.iloc[0:size]
lines = plot_animation(step_df)
line_plot = line_plot.altair_chart(lines)
size = i + burst
if size >= N:
size = N - 1
time.sleep(0.1)
break
##########################################################################################################
## 4. Graph
st.markdown(html_BFPD_vizaulazacija4,unsafe_allow_html=True)
while True:
lines = alt.Chart(df_new).mark_bar(size=25).encode(
x=alt.X('Year',axis=alt.Axis(title='date')),
y=alt.Y('Balance_INFLACION',axis=alt.Axis(title='value'))
).properties(
width=600,
height=300
)
def plot_animation(df_new):
lines = alt.Chart(df_new).mark_bar(size=25).encode(
x=alt.X('Year', axis=alt.Axis(title='date')),
y=alt.Y('Balance_INFLACION',axis=alt.Axis(title='value')),
).properties(
width=600,
height=300
)
return lines
N = df_new.shape[0] # number of elements in the dataframe
burst = 6 # number of elements (months) to add to the plot
size = burst # size of the current dataset
line_plot = st.altair_chart(lines)
line_plot
start_btn = st.button('Start',key='3wsadsa')
if start_btn:
for i in range(1,N):
step_df = df_new.iloc[0:size]
lines = plot_animation(step_df)
line_plot = line_plot.altair_chart(lines)
size = i + burst
if size >= N:
size = N - 1
time.sleep(0.1)
break
st.success("Viusalise Datas")
else:
st.warning("file not found")
st.info("Please procces data again !!")
except Exception as e:
st.write(e)
st.write("Error, please resart Visaulsation checkboc !! ") | true |
3eae24814131168ae2489dbf20f26a7c282e313c | Python | Python3pkg/Cerebrum | /cerebrum/neuralnet/elements/neuron.py | UTF-8 | 3,203 | 2.546875 | 3 | [
"MIT"
] | permissive | import random
import itertools
import time
import signal
from threading import Thread
from multiprocessing import Pool
import multiprocessing
POTENTIAL_RANGE = 110000 # Resting potential: -70 mV Membrane potential range: +40 mV to -70 mV --- Difference: 110 mV = 110000 microVolt --- https://en.wikipedia.org/wiki/Membrane_potential
ACTION_POTENTIAL = 15000 # Resting potential: -70 mV Action potential: -55 mV --- Difference: 15mV = 15000 microVolt --- https://faculty.washington.edu/chudler/ap.html
AVERAGE_SYNAPSES_PER_NEURON = 8200 # The average number of synapses per neuron: 8,200 --- http://www.ncbi.nlm.nih.gov/pubmed/2778101
# https://en.wikipedia.org/wiki/Neuron
class Neuron():
neurons = []
def __init__(self):
self.connections = {}
self.potential = 0.0
self.error = 0.0
#self.create_connections()
#self.create_axon_terminals()
Neuron.neurons.append(self)
self.thread = Thread(target = self.activate)
#self.thread.start()
#self.process = multiprocessing.Process(target=self.activate)
def fully_connect(self):
for neuron in Neuron.neurons[len(self.connections):]:
if id(neuron) != id(self):
self.connections[id(neuron)] = round(random.uniform(0.1, 1.0), 2)
def partially_connect(self):
if len(self.connections) == 0:
neuron_count = len(Neuron.neurons)
#for neuron in Neuron.neurons:
elected = random.sample(Neuron.neurons,100)
for neuron in elected:
if id(neuron) != id(self):
#if random.randint(1,neuron_count/100) == 1:
self.connections[id(neuron)] = round(random.uniform(0.1, 1.0), 2)
print("Neuron ID: " + str(id(self)))
print(" Potential: " + str(self.potential))
print(" Error: " + str(self.error))
print(" Connections: " + str(len(self.connections)))
def activate(self):
while True:
'''
for dendritic_spine in self.connections:
if dendritic_spine.axon_terminal is not None:
dendritic_spine.potential = dendritic_spine.axon_terminal.potential
print dendritic_spine.potential
self.neuron_potential += dendritic_spine.potential * dendritic_spine.excitement
terminal_potential = self.neuron_potential / len(self.axon_terminals)
for axon_terminal in self.axon_terminals:
axon_terminal.potential = terminal_potential
'''
#if len(self.connections) == 0:
# self.partially_connect()
#else:
self.partially_connect()
pass
'''
if abs(len(Neuron.neurons) - len(self.connections) + 1) > 0:
self.create_connections()
if abs(len(Neuron.neurons) - len(self.axon_terminals) + 1) > 0:
self.create_axon_terminals()
'''
class Supercluster():
def __init__(self,size):
for i in range(size):
Neuron()
print(str(size) + " neurons created.")
self.n = 0
self.build_connections()
#pool = Pool(4, self.init_worker)
#pool.apply_async(self.build_connections(), arguments)
#map(lambda x: x.partially_connect(),Neuron.neurons)
#map(lambda x: x.create_connections(),Neuron.neurons)
#map(lambda x: x.create_axon_terminals(),Neuron.neurons)
def build_connections(self):
for neuron in Neuron.neurons:
self.n += 1
#neuron.thread.start()
neuron.partially_connect()
print("Counter: " + str(self.n))
Supercluster(100000)
| true |
e44652d0e4e85676aea95ec8c2f6e3ab07d1d5bc | Python | JonnyCBB/RADDOSE-3D_GUI | /RaddoseInputWriter.py | UTF-8 | 3,682 | 2.59375 | 3 | [] | no_license | # these functions are designed to write RADDOSE-3D input files
# from the GUI output parameters
def writeCRYSTALBLOCK(currentCrystal):
raddose3dinputCRYSTALBLOCK = """
##############################################################################
# Crystal Block #
##############################################################################
Crystal
Type %s # Cuboid or Spherical
Dimensions %s %s %s # Dimensions of the crystal in X,Y,Z in um.
# Z is the beam axis, Y the rotation axis and
# X completes the right handed set
# (vertical if starting face-on).
PixelsPerMicron %s # This needs to be at least 10x the beam
# FWHM for a Gaussian beam.
# e.g. 20um FWHM beam -> 2um voxels -> 0.5 voxels/um
AbsCoefCalc %s # Absorption Coefficients Calculated using
# RADDOSE v2 (Paithankar et al. 2009)
# Example case for insulin:
UnitCell 78.02 78.02 78.02 # unit cell size: a, b, c
# alpha, beta and gamma angles default to 90
NumMonomers 24 # number of monomers in unit cell
NumResidues 51 # number of residues per monomer
ProteinHeavyAtoms Zn 2 S 6 # heavy atoms added to protein part of the
# monomer, i.e. S, coordinated metals,
# Se in Se-Met
SolventHeavyConc P 425 # concentration of elements in the solvent
# in mmol/l. Oxygen and lighter elements
# should not be specified
SolventFraction 0.64 # fraction of the unit cell occupied by solvent
""" %(currentCrystal.crystType,currentCrystal.crystDimX,currentCrystal.crystDimY,
currentCrystal.crystDimZ,currentCrystal.crystPixPerMic,currentCrystal.crystAbsorpCoeff)
return raddose3dinputCRYSTALBLOCK
def writeBEAMBLOCK(currentBeam):
raddose3dinputBEAMBLOCK = """
##############################################################################
# Beam Block #
##############################################################################
Beam
Type %s # can be Gaussian or TopHat
Flux %s # in photons per second (2e12 = 2 * 10^12)
FWHM %s %s # in um, vertical by horizontal for a Gaussian beam
Energy %s # in keV
Collimation Rectangular %s %s # Vertical/Horizontal collimation of the beam
# For 'uncollimated' Gaussians, 3xFWHM
# recommended
""" %(currentBeam.beamType,currentBeam.beamFlux,currentBeam.beamFWHM[0],
currentBeam.beamFWHM[1],currentBeam.beamEnergy,currentBeam.beamRectColl[0],
currentBeam.beamRectColl[1])
return raddose3dinputBEAMBLOCK
def writeWEDGEBLOCK(currentWedge):
raddose3dinputWEDGEBLOCK = """
##############################################################################
# Wedge Block #
##############################################################################
Wedge %s %s # Start and End rotational angle of the crystal
# Start < End
ExposureTime %s # Total time for entire angular range
# AngularResolution 2 # Only change from the defaults when using very
# small wedges, e.g 5.
""" %(currentWedge.angStart,currentWedge.angStop,currentWedge.exposTime)
return raddose3dinputWEDGEBLOCK
| true |
62bf756643693a58d0bb44b89296d373d70e20d9 | Python | ns-rokuyon/pytorch-webdataset-utils | /webdatasetutils/distributed.py | UTF-8 | 3,244 | 2.890625 | 3 | [
"MIT"
] | permissive | import torch
import random
import webdataset as wds
import warnings
from dataclasses import dataclass
from typing import Callable, List, Optional, Set
@dataclass
class DistributedShardInfo:
unavailable_urls: Set[str]
use_size_in_cluster: int
use_size_in_dataloader: int
n_urls_per_rank: int
n_urls_per_worker: int
class DistributedShardSelector:
"""Shard selector of WebDataset in DDP
Parameters
----------
rank : int
Rank ID in distributed training
world_size : int
Cluster size of distributed training
shuffle : bool
If true, first, given url list will be shuffled
callback : Optional[Callable[[DistributedShardInfo], None]]
Callback function to get splitted shard results
"""
def __init__(
self,
rank: int,
world_size: int,
shuffle: bool = True,
callback: Optional[Callable[[DistributedShardInfo], None]] = None
) -> None:
self.rank = rank
self.world_size = world_size
self.shuffle = shuffle
self.callback = callback
def __call__(self, urls: List[str]) -> List[str]:
assert not isinstance(urls, str)
rank, world_size = self.rank, self.world_size
worker_info = torch.utils.data.get_worker_info()
urls = urls.copy()
n_all_urls = len(urls)
if self.shuffle:
random.shuffle(urls)
unavailable_urls = set()
# Normalize number of urls to distribute uniformly for each rank
use_size_in_cluster = n_all_urls - (n_all_urls % world_size)
unavailable_urls.add(
set(urls[use_size_in_cluster:])
)
urls = urls[:use_size_in_cluster]
# Split given urls based on distributed process rank
urls = urls[rank::world_size]
n_urls_per_rank = len(urls)
if worker_info is None:
num_workers = 1
use_size_in_dataloader = n_urls_per_rank
else:
wid = worker_info.id
num_workers = worker_info.num_workers
if wid == 0 and n_urls_per_rank < num_workers:
warnings.warn(f'num_workers {num_workers} > '
f'num_shards per rank {n_urls_per_rank}')
# Normalize number of urls to distribute uniformly
# for each dataloader's worker
use_size_in_dataloader = n_urls_per_rank - (
n_urls_per_rank % num_workers
)
urls = urls[:use_size_in_dataloader]
unavailable_urls.add(
set(urls[use_size_in_dataloader:])
)
# Worker based splitting
urls = urls[wid::num_workers]
n_urls_per_worker = len(urls)
if self.callback:
self.callback(
DistributedShardInfo(
unavailable_urls=unavailable_urls,
use_size_in_cluster=use_size_in_cluster,
use_size_in_dataloader=use_size_in_dataloader,
n_urls_per_rank=n_urls_per_rank,
n_urls_per_worker=n_urls_per_worker
)
)
return urls
DistributedShardSplitter = DistributedShardSelector
| true |
ce2a84f84ba02677721f02bbce1628cc9df256b1 | Python | nralex/Python | /5-ExerciciosFuncoes/exercício05.py | UTF-8 | 868 | 4.03125 | 4 | [] | no_license | #####################################################################################################################
# Faça um programa com uma função chamada somaImposto. A função possui dois parâmetros formais: taxaImposto, que é #
# a quantia de imposto sobre vendas expressa em porcentagem e custo, que é o custo de um item antes do imposto. #
# A função “altera” o valor de custo para incluir o imposto sobre vendas. #
#####################################################################################################################
def somaImposto(taxaImposto, custo):
comImpostos = ((taxaImposto / 100) + 1) * custo
return comImpostos
taxa = float(input('Imposto: % '))
preço = float(input('Custo [sem impostos]: R$ '))
print(f'Custo com impostos: R$ {somaImposto(taxa, preço):.2f}') | true |
9fc7a44789523ad1d1edac8768f2445ee44789d5 | Python | tiangexiao/neural_network_code | /low_level_api/simple_linear_model.py | UTF-8 | 932 | 3.171875 | 3 | [] | no_license | """
定义两个可以更新的Variable:W和b
定义损失函数loss
使用迭代器更新loss
copy: https://github.com/MorvanZhou/tutorials/blob/master/tensorflowTUT/tf5_example2/full_code.py
"""
import numpy as np
import tensorflow as tf
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='weights')
biases = tf.Variable(tf.zeros([1]), name='biases')
y = Weights * x_data + biases
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
#查看可以进行训练的参数
for variable in tf.trainable_variables():
print(variable)
sess = tf.Session()
sess.run(init)
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(Weights), sess.run(biases), sess.run(loss))
| true |
048db1a1882d50906556d458e366ec4ebb2a0a12 | Python | johnisawkward/VRoidBones | /util.py | UTF-8 | 1,695 | 2.578125 | 3 | [
"Unlicense"
] | permissive | import bpy
def unique_constraint(bone, t):
for constraint in bone.constraints:
if constraint.type == t:
return constraint
constraint = bone.constraints.new(type=t)
return constraint
def get_children(parent):
l = []
for obj in bpy.context.scene.objects:
if obj.name == parent.name: continue
if obj.parent is None: continue
if obj.parent.name == parent.name:
l.append(obj)
return l
def bone_has_effect(bone):
'''Check if bone has vertex groups attached to it'''
armature = bpy.context.object
children = get_children(armature)
for obj in children:
me = obj.data
vg_id = None
for i in obj.vertex_groups:
if i.name == bone.name:
vg_id = i.index
break
if vg_id is None:
continue
for vertex in me.vertices:
if i.index in list([vg.group for vg in vertex.groups]):
return True
return False
def get_pose_bone(bone_name):
pose_bones = bpy.context.object.pose.bones
bone = None
if bone_name in pose_bones:
bone = pose_bones[bone_name]
elif '_' not in bone_name:
for b in pose_bones:
if b.name.endswith(f'_{bone_name}'):
bone = b
break
else:
name, side = bone_name.split('_')
if side not in {'L', 'R'}:
for b in pose_bones:
if b.name.endswith(f'_{name}'):
bone = b
break
for b in pose_bones:
if b.name.endswith(f'_{side}_{name}'):
bone = b
break
return bone | true |
3d8a8582a7790488009cbe153942eb2e669c51cd | Python | apoorvaish/mujoco-rl | /evolutionary-strategies/dl.py | UTF-8 | 1,567 | 2.90625 | 3 | [
"MIT"
] | permissive | import numpy as np
class Network:
def __init__(self, D, M, K, action_max):
self.D = D
self.M = M
self.K = K
self.action_max = action_max
def init(self):
D, M, K = self.D, self.M, self.K
self.W1 = np.random.randn(D, M) / np.sqrt(D)
# self.W1 = np.zeros((D, M))
self.b1 = np.zeros(M)
self.W2 = np.random.randn(M, K) / np.sqrt(M)
# self.W2 = np.zeros((M, K))
self.b2 = np.zeros(K)
def relu(self, x):
return x * (x > 0)
def forward(self, X):
Z = self.relu(X.dot(self.W1) + self.b1)
return np.tanh(Z.dot(self.W2) + self.b2) * self.action_max
def sample_action(self, x):
# assume input is a single state of size (D,)
# first make it (N, D) to fit ML conventions
X = np.atleast_2d(x)
Y = self.forward(X)
return Y[0] # the first row
def get_params(self):
# return a flat array of parameters
return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2])
def get_params_dict(self):
return {
'W1': self.W1,
'b1': self.b1,
'W2': self.W2,
'b2': self.b2,
}
def set_params(self, params):
# params is a flat list
# unflatten into individual weights
D, M, K = self.D, self.M, self.K
self.W1 = params[:D * M].reshape(D, M)
self.b1 = params[D * M:D * M + M]
self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K)
self.b2 = params[-K:]
| true |
15831f5f23fa51836b6a577e864f34ab6c90a1c2 | Python | jaimetorresl/ProyectoProgramacion | /ECG.py | UTF-8 | 10,784 | 2.796875 | 3 | [] | no_license | # Importamos las librerías necesarias
import numpy as np
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
import scipy.optimize as opt
import scipy.integrate as inte
# Definimos la función F1
def F1(y1,y2,Trr):
alpha = 1 - np.sqrt(y1**2 + y2**2)
return alpha * y1 - ((2.0*np.pi)/Trr)*y2
# Definimos la función F2
def F2(y1, y2, Trr):
alpha = 1 - np.sqrt(y1**2 + y2**2)
return alpha * y2 + ((2.0*np.pi)/Trr)*y1
def F3(y1,y2,y3,a,b,ti,tMuestreo):
theta = np.arctan2(y1,y2)
suma = 0
for i in range(5):
dthetai = np.fmod(theta - ti[i], 2 * np.pi)*-1
suma += (a[i]*dthetai*np.exp(-(dthetai**2/(2*(b[i]**2)))))
z0 = (0.15) * np.sin(2 * np.pi * 0.25 * (tMuestreo))
return suma*-1 - (y3-z0)
def EulerForward(y1,y2,y3, FrecuenciaCardiaca = 80, NumLatidos = 12, FrecuenciaMuestreo = 360, a=[1.2,-5.0,30.0,-7.5,0.75], b=[0.25,0.1,0.1,0.1,0.4],ti=[(-1/3)*np.pi,(-1/12)*np.pi,0,(1/12)*np.pi, (1/2)*np.pi]):
#Defininimos el avance
h = 1 / FrecuenciaMuestreo
# Definimos la condición inicial para Y1 y Y2
Y10 = y1
Y20 = y2
Y30 = y3
# Definimos el tiempo inicial
To = 0.0
# Definimos el tiempo final
Tf = NumLatidos
meanFc = 60 / FrecuenciaCardiaca
# Creamos un arreglo de tiempo que vaya
# desde To hasta Tf con pasos de h
T = np.arange(To, Tf + h, h)
# RR para calcular el omega, es el componente aleatorio de W(omega)
tRR = np.random.normal(meanFc, meanFc * 0.05, np.size(T))
# Definimos un arreglo para ir almacenando
# los valores estimados de Y1(t) en cada iteración
Y1EulerFor = np.zeros(len(T))
Y2EulerFor = np.zeros(len(T))
Y3EulerFor = np.zeros(len(T))
Y1EulerFor[0] = Y10
Y2EulerFor[0] = Y20
Y3EulerFor[0] = Y30
for iter in range(1, len(T)):
Y1EulerFor[iter] = Y1EulerFor[iter-1] + h * F1(Y1EulerFor[iter-1],Y2EulerFor[iter-1],tRR[iter] )
Y2EulerFor[iter] = Y2EulerFor[iter-1] +h * F2(Y1EulerFor[iter-1],Y2EulerFor[iter-1], tRR[iter])
Y3EulerFor[iter] = Y3EulerFor[iter-1] + h * F3(Y1EulerFor[iter-1],Y2EulerFor[iter-1],Y3EulerFor[iter-1],a,b,ti,FrecuenciaMuestreo)
return T,Y3EulerFor
def EulerBack(y1,y2,y3, FrecuenciaCardiaca = 60, NumLatidos = 10, FrecuenciaMuestreo = 360, a=[1.2,-5.0,30.0,-7.5,0.75], b=[0.25,0.1,0.1,0.1,0.4],ti=[(-1/3)*np.pi,(-1/12)*np.pi,0,(1/12)*np.pi, (1/2)*np.pi]):
#Defininimos el avance
h = 1 / FrecuenciaMuestreo
# Definimos la condición inicial para Y1 y Y2
Y10 = y1
Y20 = y2
Y30 = y3
# Definimos el tiempo inicial
To = 0.0
# Definimos el tiempo final
Tf = NumLatidos
meanFc = 60 / FrecuenciaCardiaca
# RR para calcular el omega
# Creamos un arreglo de tiempo que vaya
# desde To hasta Tf con pasos de h
T = np.arange(To, Tf + h, h)
tRR = np.random.normal(meanFc, meanFc * 0.05, np.size(T))
# Definimos un arreglo para ir almacenando
# los valores estimados de Y1(t) en cada iteración
Y1EulerBack = np.zeros(len(T))
Y2EulerBack = np.zeros(len(T))
Y3EulerBack = np.zeros(len(T))
Y1EulerBack[0] = Y10
Y2EulerBack[0] = Y20
Y3EulerBack[0] = Y30
for iter in range(1, len(T)):
Y1EulerBack[iter] = Y1EulerBack[iter-1] + h * F1(Y1EulerBack[iter-1],Y2EulerBack[iter-1],tRR[iter-1] )
Y2EulerBack[iter] = Y2EulerBack[iter-1] + h * F2(Y1EulerBack[iter-1],Y2EulerBack[iter-1], tRR[iter-1])
Y3EulerBack[iter] = Y3EulerBack[iter-1] + h * F3(Y1EulerBack[iter],Y2EulerBack[iter],Y3EulerBack[iter],a,b,ti,FrecuenciaMuestreo)
return T,Y3EulerBack
def EulerMod(y1,y2,y3, FrecuenciaCardiaca , NumLatidos , FrecuenciaMuestreo , a, b,ti=[(-1/3)*np.pi,(-1/12)*np.pi,0,(1/12)*np.pi, (1/2)*np.pi]):
#Defininimos el avance
h = 1 / FrecuenciaMuestreo
# Definimos la condición inicial para Y1 y Y2
Y10 = y1
Y20 = y2
Y30 = y3
# Definimos el tiempo inicial
To = 0.0
# Definimos el tiempo final
Tf = NumLatidos
meanFc = 60 / FrecuenciaCardiaca
# RR para calcular el omega
# Creamos un arreglo de tiempo que vaya
# desde To hasta Tf con pasos de h
T = np.arange(To, Tf + h, h)
tRR = np.random.normal(meanFc, meanFc * 0.05, np.size(T))
# Definimos un arreglo para ir almacenando
# los valores estimados de Y1(t) en cada iteración
Y1EulerMod = np.zeros(len(T))
Y2EulerMod = np.zeros(len(T))
Y3EulerMod = np.zeros(len(T))
Y1EulerMod[0] = Y10
Y2EulerMod[0] = Y20
Y3EulerMod[0] = Y30
for iter in range(1, len(T)):
Y1EulerMod[iter] = Y1EulerMod[iter-1] + (h/2.0) * (F1(Y1EulerMod[iter-1],Y2EulerMod[iter-1],tRR[iter]) + F1(Y1EulerMod[iter],Y2EulerMod[iter],tRR[iter]))
Y2EulerMod[iter] = Y2EulerMod[iter-1] +(h/2.0) * (F2(Y1EulerMod[iter-1],Y2EulerMod[iter-1],tRR[iter]) + F2(Y1EulerMod[iter],Y2EulerMod[iter], tRR[iter]))
Y3EulerMod[iter] = Y3EulerMod[iter-1] + (h/2.0) * (F3(Y1EulerMod[iter-1],Y2EulerMod[iter-1],Y3EulerMod[iter-1],a,b,ti,FrecuenciaMuestreo)+ F3(Y1EulerMod[iter],Y2EulerMod[iter],Y3EulerMod[iter],a,b,ti,FrecuenciaMuestreo))
return T,Y3EulerMod
def RK2(y1,y2,y3, FrecuenciaCardiaca = 60, NumLatidos = 10, FrecuenciaMuestreo = 360, a=[1.2,-5.0,30.0,-7.5,0.75], b=[0.25,0.1,0.1,0.1,0.4],ti=[(-1/3)*np.pi,(-1/12)*np.pi,0,(1/12)*np.pi, (1/2)*np.pi]):
#Defininimos el avance
h = 1 / FrecuenciaMuestreo
# Definimos la condición inicial para Y1 y Y2
Y10 = y1
Y20 = y2
Y30 = y3
# Definimos el tiempo inicial
To = 0.0
# Definimos el tiempo final
Tf = NumLatidos
meanFc = 60 / FrecuenciaCardiaca
# RR para calcular el omega
# Creamos un arreglo de tiempo que vaya
# desde To hasta Tf con pasos de h
T = np.arange(To, Tf + h, h)
tRR = np.random.normal(meanFc, meanFc * 0.05, np.size(T))
# Definimos un arreglo para ir almacenando
# los valores estimados de Y1(t) en cada iteración
Y1EulerRK2 = np.zeros(len(T))
Y2EulerRK2 = np.zeros(len(T))
Y3EulerRK2 = np.zeros(len(T))
Y1EulerRK2[0] = Y10
Y2EulerRK2[0] = Y20
Y3EulerRK2[0] = Y30
for iter in range(1, len(T)):
k11 = F1(Y1EulerRK2[iter-1], Y2EulerRK2[iter-1],tRR[iter-1])
k21 = F2(Y1EulerRK2[iter-1] , Y2EulerRK2[iter-1], tRR[iter-1])
k31 = F3(Y1EulerRK2[iter-1],Y2EulerRK2[iter-1],Y3EulerRK2[iter-1],a,b,ti,FrecuenciaMuestreo)
k12 = F1(Y1EulerRK2[iter-1]+k11*h, Y2EulerRK2[iter-1] + k21*h,tRR[iter-1] +h)
k22 = F2(Y1EulerRK2[iter-1]+k11*h, Y2EulerRK2[iter-1] + k21*h,tRR[iter-1] +h)
k32 = F3(Y1EulerRK2[iter-1]+k11*h, Y2EulerRK2[iter-1] + k21*h,Y3EulerRK2[iter-1] + k31*h,a,b,ti,FrecuenciaMuestreo)
Y1EulerRK2[iter] = Y1EulerRK2[iter-1] + (h/2.0)*(k11 + k12)
Y2EulerRK2[iter] = Y2EulerRK2[iter-1] + (h/2.0) * (k21+k22)
Y3EulerRK2[iter] = Y3EulerRK2[iter-1] + (h/2.0) *(k31 + k32)
return T,Y3EulerRK2
def RK4(y1,y2,y3, FrecuenciaCardiaca = 60, NumLatidos = 10, FrecuenciaMuestreo = 360, a=[1.2,-5.0,30.0,-7.5,0.75], b=[0.25,0.1,0.1,0.1,0.4],ti=[(-1/3)*np.pi,(-1/12)*np.pi,0,(1/12)*np.pi, (1/2)*np.pi]):
#Defininimos el avance
h = 1 / FrecuenciaMuestreo
# Definimos la condición inicial para Y1 y Y2
Y10 = y1
Y20 = y2
Y30 = y3
# Definimos el tiempo inicial
To = 0.0
# Definimos el tiempo final
Tf = NumLatidos
meanFc = 60 / FrecuenciaCardiaca
# RR para calcular el omega
# Creamos un arreglo de tiempo que vaya
# desde To hasta Tf con pasos de h
T = np.arange(To, Tf + h, h)
tRR = np.random.normal(meanFc, meanFc * 0.05, np.size(T))
# Definimos un arreglo para ir almacenando
# los valores estimados de Y1(t) en cada iteración
Y1EulerRK4 = np.zeros(len(T))
Y2EulerRK4 = np.zeros(len(T))
Y3EulerRK4 = np.zeros(len(T))
Y1EulerRK4[0] = Y10
Y2EulerRK4[0] = Y20
Y3EulerRK4[0] = Y30
for iter in range(1, len(T)):
k11 = F1(Y1EulerRK4[iter-1], Y2EulerRK4[iter-1],tRR[iter-1])
k21 = F2(Y1EulerRK4[iter-1] , Y2EulerRK4[iter-1], tRR[iter-1])
k31 = F3(Y1EulerRK4[iter-1],Y2EulerRK4[iter-1],Y3EulerRK4[iter-1],a,b,ti,FrecuenciaMuestreo)
k12 = F1(Y1EulerRK4[iter-1]+0.5*k11*h, Y2EulerRK4[iter-1] + 0.5*k21*h,tRR[iter-1] +0.5*h)
k22 = F2(Y1EulerRK4[iter-1]+0.5*k11*h, Y2EulerRK4[iter-1] + 0.5*k21*h,tRR[iter-1] +0.5*h)
k32 = F3(Y1EulerRK4[iter-1]+0.5*k11*h, Y2EulerRK4[iter-1] + 0.5*k21*h,Y3EulerRK4[iter-1] + k31*h,a,b,ti,FrecuenciaMuestreo)
k13 = F1(Y1EulerRK4[iter-1]+0.5*k12*h, Y2EulerRK4[iter-1] + 0.5*k22*h,tRR[iter-1] +0.5*h)
k23 = F2(Y1EulerRK4[iter - 1] + 0.5 * k12 * h, Y2EulerRK4[iter - 1] + 0.5 * k22 * h, tRR[iter - 1] + 0.5 * h)
k33 = F3(Y1EulerRK4[iter - 1] + 0.5 * k12 * h, Y2EulerRK4[iter - 1] + 0.5 * k22 * h,
Y3EulerRK4[iter - 1] + k32 * h, a, b, ti, FrecuenciaMuestreo)
k14 = F1(Y1EulerRK4[iter - 1] + 0.5 * k13 * h, Y2EulerRK4[iter - 1] + 0.5 * k23 * h, tRR[iter - 1] + 0.5 * h)
k24 = F2(Y1EulerRK4[iter - 1] + 0.5 * k13 * h, Y2EulerRK4[iter - 1] + 0.5 * k23 * h, tRR[iter - 1] + 0.5 * h)
k34 = F3(Y1EulerRK4[iter - 1] + 0.5 * k13 * h, Y2EulerRK4[iter - 1] + 0.5 * k23 * h,
Y3EulerRK4[iter - 1] + k33 * h, a, b, ti, FrecuenciaMuestreo)
Y1EulerRK4[iter] = Y1EulerRK4[iter-1] + (h/6.0)*(k11 + k12 + k13 + k14)
Y2EulerRK4[iter] = Y2EulerRK4[iter-1] + (h/6.0) * (k21+k22 + k23 + k24)
Y3EulerRK4[iter] = Y3EulerRK4[iter-1] + (h/6.0) *(k31 + k32 + k33 + k34)
return T,Y3EulerRK4
def findpeaks(z,tMuestreo=360):
peaks, properties = find_peaks(z, height=0.02)
time = np.arange(len(z)) / tMuestreo
time_ecg = time[peaks]
time_ecg = time_ecg[1:]
taco = np.diff(time[peaks])
tacobpm = 60 / taco
print(np.mean(tacobpm))
return np.mean(tacobpm)
def noise(z):
return z + np.random.normal(0,0.0012,z.shape)
def exportarDatos(root,metodo,z,t,a,b):
archivo = open(root+".txt","w")
archivo.write(metodo+"\n")
archivo.write(str(len(z)) + "\n")
for i in z:
archivo.write(str(i)+ "\n")
for i in t:
archivo.write(str(i) + "\n")
for i in a:
archivo.write(str(i) + "\n")
for i in b:
archivo.write(str(i) + "\n")
archivo.close()
def importarDatos(root):
archivo = open(root, "r")
metodo = archivo.readline()
lenz = int(archivo.readline())
z=[]
t=[]
a=[]
b=[]
for i in range(lenz):
z.append(float(archivo.readline()))
for i in range(lenz):
t.append(float(archivo.readline()))
for i in range(5):
a.append(float(archivo.readline()))
for i in range(5):
b.append(float(archivo.readline()))
return metodo,z,t,a,b
| true |
4916e6c3d63b567e2492c46a4f563c02d94e3c21 | Python | DPBayes/DP-HMC-experiments | /metrics.py | UTF-8 | 3,480 | 2.890625 | 3 | [
"MIT"
] | permissive | import numpy as np
import numba
import timeit
def total_mean_error(samples, true_samples):
"""
Return the Euclidean distance between the means of two given samples.
"""
return np.sqrt(np.sum(component_mean_error(samples, true_samples)**2, axis=0))
def component_mean_error(samples, true_samples):
"""
Return the difference between the means of the two given samples.
"""
return np.mean(samples, axis=0) - np.mean(true_samples, axis=0).reshape(-1, 1)
def component_var_error(samples, true_samples):
"""
Return the difference between the variances of the two given samples.
"""
return np.var(samples, axis=0) - np.var(true_samples, axis=0).reshape(-1, 1)
def split_r_hat(chains):
"""
Compute split-R-hat for the given chains.
Parameters
----------
chains : ndarray
The chains as an array of shape (num_samples, num_dimensions, num_chains).
"""
n_samples, dim, num_chains = chains.shape
# If the number of samples if not even, discard the last sample
if n_samples % 2 != 0:
chains = chains[0:n_samples-1, :, :]
return r_hat(np.concatenate(np.array_split(chains, 2, axis=0), axis=2))
def r_hat(chains):
"""
Compute R-hat for the given chains.
Parameters
----------
chains : ndarray
The chains as an array of shape (num_samples, num_dimensions, num_chains).
"""
chains = np.transpose(chains, axes=(2, 0, 1))
m, n, d = chains.shape
chain_means = np.mean(chains, axis=1)
total_means = np.mean(chain_means, axis=0)
B = n / (m - 1) * np.sum((chain_means - total_means)**2, axis=0)
s2s = np.var(chains, axis=1, ddof=1)
W = np.mean(s2s, axis=0)
var = (n - 1) / n * W + 1 / n * B
r_hats = np.sqrt(var / W)
return r_hats
def mmd(samples, true_samples):
"""
Return MMD between two samples.
Both arguments must be arrays either of shape
(num_samples, num_dimensions, num_chains),
or of shape (num_samples, num_dimensions), which is treated as if
num_chains = 1.
Returns
-------
ndarray
MMD for each chain.
"""
if len(samples.shape) == 2:
n, dim = samples.shape
chains = 1
elif len(samples.shape) == 3:
n, dim, chains = samples.shape
else:
raise ValueError("samples must be 2 or 3-dimensional")
mmd = np.zeros(chains)
for i in range(chains):
mmd[i] = numba_mmd(np.asarray(samples[:, :, i]), np.asarray(true_samples))
return mmd
@numba.njit
def kernel(x1, x2, sigma):
return np.exp(-np.sum((x1 - x2)**2) / (2 * sigma**2))
@numba.njit
def numba_mmd(sample1, sample2):
subset1 = sample1[np.random.choice(sample1.shape[0], 500, replace=True), :]
subset2 = sample2[np.random.choice(sample2.shape[0], 500, replace=True), :]
distances = np.sqrt(np.sum((subset1 - subset2)**2, axis=1))
sigma = np.median(distances)
n = sample1.shape[0]
m = sample2.shape[0]
term1 = 0.0
for i in range(0, n):
for j in range(i + 1, n):
term1 += kernel(sample1[i, :], sample1[j, :], sigma)
term2 = 0.0
for i in range(0, m):
for j in range(i + 1, m):
term2 += kernel(sample2[i, :], sample2[j, :], sigma)
term3 = 0.0
for i in range(n):
for j in range(m):
term3 += kernel(sample1[i, :], sample2[j, :], sigma)
return np.sqrt(np.abs(2 * term1 / (n * (n - 1)) + 2 * term2 / (m * (m - 1)) - 2 * term3 / (n * m)))
| true |
d9cfba658a2fbffe1ce616ac170ef4cba97f0178 | Python | frotaur/SmartFish | /Vect2D/unit2DVect.py | UTF-8 | 1,161 | 2.859375 | 3 | [] | no_license | import unittest
import Vect2D as v
class testVect(unittest.TestCase):
def testStr(self):
a = v.Vect2D((1,2))
self.assertEqual("(1,2)",str(a))
def testAddandEqual(self):
a = v.Vect2D((1,2))
b = v.Vect2D((2,6))
self.assertEqual(v.Vect2D((3,8)),a+b)
def testDot(self):
a = v.Vect2D((1,2))
b = v.Vect2D((2,6))
self.assertEqual(14,a*b)
def testCross(self):
a = v.Vect2D((1,2))
b = v.Vect2D((2,6))
self.assertEqual(2,a^b)
self.assertEqual(-2,b^a)
def testScalMult(self):
a = v.Vect2D([1,2])
self.assertEqual(v.Vect2D([4,8]),4*a)
self.assertEqual(v.Vect2D([0.5,1]),a*0.5)
def testequalityFloat(self):
a = v.Vect2D([0.2,.34])
b = (1+1e-5)*a
self.assertNotEqual(a,b)
b = (1+1e-14)*a
self.assertEqual(a,b)
def testchangeNormwithR(self):
a = v.Vect2D([1,2])
b = v.Vect2D([1,2])
a.r = 3
self.assertEqual(3,a.norm())
self.assertEqual(0,b^a)
def testpolarCoord(self):
a = v.Vect2D([1,0])
self.assertEqual(a.r,1)
self.assertEqual(a.phi,0)
a.y=-1
self.assertEqual(a.phi%360,315)
a.phi = 90
self.assertEqual(v.Vect2D([0,1]),a)
a = v.Vect2D(25,0.0)
self.assertEqual(a.phi,0)
unittest.main() | true |
65cd65a8e0fe21143247b3fa7764a5ce0ce7029d | Python | priteshmehta/automation_framework | /helpers/element.py | UTF-8 | 1,698 | 2.625 | 3 | [] | no_license | from selenium import webdriver
from selenium.common.exceptions import (InvalidElementStateException,
NoSuchElementException,
StaleElementReferenceException,
TimeoutException)
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
class EC():
def __init__(self):
self.conditions = []
self._description = []
def copy(self):
ec = EC()
ec.conditions = list(self.conditions)
ec._description = list(self._description)
return ec
class Element(object):
def __init__(self, driver, by=None, value=None, parent=None, name=None):
"""
by(BY): the selenium BY
value(str): query string
"""
super(Element, self).__init__()
self.driver = driver
self.conditions = EC()
def element_by_id(self, value, name):
return self._element(By.ID, value, name)
def element_by_selector(self, value, name):
return self._element(By.CSS_SELECTOR, value, name)
def element_by_xpath(self, value, name):
return self._element(By.XPATH, value, name)
def element_by_class(self, value, name):
return self._element(By.CLASS_NAME, value, name)
def _element(self, by, value, name):
return Element(self.driver, by=by, value=value, parent=self, name=name)
class Page(Element):
def __init__(self, driver, url, name):
super(Page, self).__init__(driver, name=name)
self.url = url
self.driver.get(url)
| true |
6133573bf90afaf0980a9fb44e533731a202e7f9 | Python | jaraco/calendra | /calendra/america/el_salvador.py | UTF-8 | 576 | 2.734375 | 3 | [
"MIT"
] | permissive | from ..core import WesternCalendar
from ..registry_tools import iso_register
@iso_register('SV')
class ElSalvador(WesternCalendar):
"El Salvador"
# Civil holidays
include_labour_day = True
# Christian holidays
include_holy_thursday = True
include_good_friday = True
include_easter_saturday = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 10, "Mothers' Day"),
(6, 17, "Fathers' Day"),
(8, 6, "Celebrations of San Salvador"),
(9, 15, "Independence Day"),
(11, 2, "All Saints Day"),
)
| true |
3572bcf71c21890c329436f91463020127e6692b | Python | avhirupc/LeetCode | /problems/Pattern : String/ZigZag Conversion.py | UTF-8 | 1,268 | 2.90625 | 3 | [] | no_license | from collections import deque
from itertools import cycle
class Solution(object):
def convert(self, s, numRows):
rows = deque([])
level = cycle(list(range(numRows))+list(range(numRows-2,0,-1)))
itr=0
while(itr<len(s)):
rows.append((s[itr],next(level)))
itr+=1
result_set=[]
for level in range(numRows):
result_set.extend(
list(map(lambda x: x[0],
filter(lambda x: x[1] == level, rows))))
return "".join(result_set)
from collections import deque
from itertools import cycle
class Solution(object):
def convert(self, s, numRows):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if numRows ==1 or numRows >= len(s):
return s
result_set =[""]* numRows
index, step=0,1
for c in s:
result_set[index] += c
if index==0:
step = 1
if index == numRows-1:
step = -1
index += step
return "".join(result_set)
| true |
ba94de1f5da424d4c4d6f3e49dc4b9262e1ff79c | Python | PrajaktaSelukar/Sorting-Visualizer | /Quick Sort/sortingAlgorithms_new.py | UTF-8 | 3,878 | 3.328125 | 3 | [] | no_license | #Tkinter is used for developing GUI
from tkinter import *
from tkinter import ttk
import random
from BubbleSort import bubble_sort
from QuickSort import quick_sort
#create a random new array
#root is the name of the main window object
root = Tk()
root.title('Sorting Algorithm Visualizer')
#setting the minimum size of the root window
root.minsize(900, 600)
root.config(bg='black')
#variables to select the algorithms
selected_algo = StringVar()
data = [] #make data global since we are using two buttons for generating and starting algo
def drawData(data, colorArray):
#delete the previous input
canvas.delete("all")
#set the canvas dimension
c_height = 380
c_width = 900
#set the bar graph dimension which will be changed bcoz of differebt dataset
x_width = c_width / (len(data) + 1)
offset = 30
spacing = 10
#normalize the data to match the bars acc to canvas height
#normalize would help to make bar heights of (1, 2, 4, 6) same as (10, 20, 40, 60)
normalizedData = [i / max(data) for i in data]
#for i, height in enumerate(data):
for i, height in enumerate(normalizedData):
#set top left corner
x0 = i * x_width + offset + spacing
#y0 = c_height - height
y0 = c_height - height * 340
#set bottom right corner
x1 = (i+1) * x_width + offset #this time without spacing
y1 = c_height
canvas.create_rectangle(x0, y0, x1, y1, fill=colorArray[i])
canvas.create_text(x0+2, y0, anchor=SW, text=str(data[i]))
root.update_idletasks()
def Generate():
global data #to access global data
minValue = int(minEntry.get())
maxValue = int(maxEntry.get())
size = int(sizeEntry.get())
data = [] #first create empty dataset
for _ in range(size):
data.append(random.randrange(minValue, maxValue+1))
drawData(data, ['red' for x in range(len(data))])
def StartAlgorithm():
global data
if not data:
return
if (algoMenu.get() == 'Quick Sort'):
quick_sort(data, 0, len(data)-1, drawData, speedScale.get())
drawData(data, ['green' for x in range(len(data))])
elif (algoMenu.get() == 'Bubble Sort'):
bubble_sort(data, drawData, speedScale.get())
#frame / base layout
UI_frame = Frame(root, width=900, height=200, bg='grey')
UI_frame.grid(row=0, column=0, padx=10, pady=5)
canvas = Canvas(root, width=900, height=380, bg='white')
canvas.grid(row=1, column=0, padx=10, pady=5)
#User Interface Area
#Row=0
Label(UI_frame, text="Algorithm: ", bg='grey').grid(row=0, column=0, padx=5, pady=5, sticky=W)
algoMenu = ttk.Combobox(UI_frame, textvariable=selected_algo, values=['Bubble Sort', 'Quick Sort', 'Merge Sort'])
algoMenu.grid(row=0, column=1, padx=5, pady=5)
algoMenu.current(0) #Keep the first algo as the default
#Instead of button we'll put a slider
speedScale = Scale(UI_frame, from_=0.1, to=2.0, length=200, digits=2, resolution=0.2, orient=HORIZONTAL, label="Select Speed [s]")
speedScale.grid(row=0, column=2, padx=5, pady=5)
Button(UI_frame, text="Start", command=StartAlgorithm, bg='red').grid(row=0, column=3, padx=5, pady=5)
#Row=1
sizeEntry = Scale(UI_frame, from_=3, to=25, resolution=1, orient=HORIZONTAL, label="Data Size")
sizeEntry.grid(row=1, column=0, padx=5, pady=5)
minEntry = Scale(UI_frame, from_=0, to=10, resolution=1, orient=HORIZONTAL, label="Min Value")
minEntry.grid(row=1, column=1, padx=5, pady=5)
maxEntry = Scale(UI_frame, from_=10, to=100, resolution=1, orient=HORIZONTAL, label="Max Value")
maxEntry.grid(row=1, column=2, padx=5, pady=5)
Button(UI_frame, text="Generate", command=Generate, bg='white').grid(row=1, column=3, padx=5, pady=5)
root.mainloop() | true |
ec308cb6daaa212787cf93f7654c4aba4074b8c9 | Python | calazans10/algorithms.py | /data structs/using_tuple.py | UTF-8 | 392 | 3.546875 | 4 | [] | no_license | # -*- coding: utf-8
zoo = ('lobo', 'elefante', 'pinguim',)
print('O número de animais no zoo é', len(zoo))
novo_zoo = ('macaco', 'golfinho', zoo,)
print('O número de animais no novo zoo é', len(novo_zoo))
print('Todos os animais no novo zoo são', novo_zoo)
print('Os animais trazidos do antigo zoo são', novo_zoo[2])
print('O último animal trazido do antigo zoo é', novo_zoo[2][2])
| true |
c1bdc033889db970aa4ef1adff36cfe3604fda61 | Python | chaneyzorn/LeetCode-Python | /src/0070-climbing-stairs.py | UTF-8 | 559 | 2.921875 | 3 | [] | no_license | class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
prev, prev_prev, ans = 1, 1, 1
for i in range(2, n + 1):
ans = prev_prev + prev
prev, prev_prev = ans, prev
return ans
# def climbStairs(self, n):
# """
# :type n: int
# :rtype: int
# """
# dp = [0] * (n + 1)
# dp[0], dp[1] = 1, 1
#
# for i in range(2, n + 1):
# dp[i] = dp[i-2] + dp[i - 1]
# return dp[n]
| true |
39e4c483a9ebca4b76758fe43b2e69fdfd23a4f7 | Python | gomerudo/auto-ml | /automl/createconfigspacepipeline/base.py | UTF-8 | 11,551 | 2.96875 | 3 | [] | no_license | from smac.configspace import ConfigurationSpace
from automl.utl import json_utils
class ConfigSpacePipeline:
"""This class deals with the creation and manipulation of configuration space from the given input pipeline.
In addition to getting the configuration spaces from the predefined json files, it also resets the default values
of the configuration space depending on the provided input. The upper and lower limit of a hyperparameter in a
configuration space is also adjusted if the input hyperparameter value is out of range. This class is not supposed
to be instantiated by the user instead is used by the BayesianOptimizationPipeline class.
Args:
pipeline (Pipeline): The pipeline for which configuration space will be generated.
"""
def __init__(self, pipeline):
"""Initializer of the class ConfigSpacePipeline"""
self.pipeline = pipeline
self.combined_configuration_space = ConfigurationSpace()
def get_config_space(self):
"""This function is used to get the combined configuration space for the pipeline.
This function process each component in the pipeline, thereby adding the configuration space with reset
default of each of the component and obtaining the complete configuration space. It adds configuration space of
components within 'FeatureUnion' and deals with situation such as 'FeatureUnion' within 'FeatureUnion'.
It also adds the configuration space of estimators of a component but not of the component itself.
Returns:
ConfigurationSpace: Configuration space of the pipeline.
"""
for i in range(0, len(self.pipeline.steps)):
self._process_component_config_space(self.pipeline.steps[i][1])
return self.combined_configuration_space
def _process_component_config_space(self, component):
"""Processes component's configuration space and adds it to the combined configuration space
This functions also handles component such as 'FeatureUnion' by recursively adding the sub-component's
configuration space. It also adds the configuration space of estimators of a component but not of the component
itself.
Args:
component (obj): Object of the component
"""
if self._get_component_name(component) == "FeatureUnion":
for each_feature in component.transformer_list:
self._process_component_config_space(each_feature[1])
else:
component_dict = component.get_params()
if "estimator" in component_dict:
component = component_dict["estimator"]
component_dict = component.get_params()
component_name = self._get_component_name(component)
if self._component_json_exist(component_name):
component_json = self._get_component_json(component_name)
component_new_json = self._component_reset_default(component_json, component_dict)
component_config_space = json_utils._convert_json_to_cs(component_new_json)
component_number = self._component_already_exists(component_name, self.combined_configuration_space)
component_name = component_name + "-" + str(component_number)
# The following line adds two configuration space by adding a prefix of the component's name
self.combined_configuration_space.add_configuration_space(component_name, component_config_space)
@staticmethod
def _get_component_json(component_name):
"""This function is used to get individual configuration space of a component in JSON format.
Args:
component_name (string): Name of the component as a string.
Returns:
dict: Individual configuration space in JSON format.
"""
component_json = json_utils._read_json_file_to_json_obj(component_name)
return component_json
@staticmethod
def _component_json_exist(component_name):
"""This function checks whether the configuration space of a component exits or not.
Args:
component_name (string): Name of the component as a string.
Returns:
bool: True if the component exists and False if it does noName of the component as a string.
"""
exist = True if (json_utils._check_existence(component_name)) else False
return exist
def _component_reset_default(self, component_json, component_dict):
"""This function is used to reset the defaults of the config space of the component of the input pipeline.
In addition to resetting the defaults, it also updates the upper and lower limit of input hyperparameter if its
value is out of range.
Note:
Values for some hyperparameter in the configuration space are set to constant in order to optimize the
result.
Hyperparameter with variable input type (int, float, string) are set to the type which has been defined in
the initial configuration space.
Args:
component_json (dict): Json of the component obtained from the pre-defined json file
component_dict (dict): Dictionary of hyperparameters of the component in the input pipeline
Returns:
dict: Configuration space in JSON format with reset defaults.
"""
for i in range(0, len(component_json['hyperparameters'])):
if self._is_key_in_dic(component_dict, component_json['hyperparameters'][i]['name']):
if self._is_key_in_dic(component_json['hyperparameters'][i], "default"):
if self._is_type_same(component_json['hyperparameters'][i]['default'],
component_dict[component_json['hyperparameters'][i]['name']]):
if component_json['hyperparameters'][i]['type'] == "categorical":
component_json['hyperparameters'][i] = \
self._json_process_for_categorical(
component_json['hyperparameters'][i],
component_dict[component_json['hyperparameters'][i]['name']]
)
elif component_json['hyperparameters'][i]['type'] == "uniform_int":
component_json['hyperparameters'][i] = \
self._json_process_for_int_and_float(
component_json['hyperparameters'][i],
component_dict[component_json['hyperparameters'][i]['name']]
)
elif component_json['hyperparameters'][i]['type'] == "uniform_float":
component_json['hyperparameters'][i] = \
self._json_process_for_int_and_float(
component_json['hyperparameters'][i],
component_dict[component_json['hyperparameters'][i]['name']]
)
elif self._is_string_boolean(component_json['hyperparameters'][i]['default'],
component_dict[component_json['hyperparameters'][i]['name']]):
component_json['hyperparameters'][i]['default'] = \
str(component_dict[component_json['hyperparameters'][i]['name']])
elif self._is_string_none(component_json['hyperparameters'][i]['default'],
component_dict[component_json['hyperparameters'][i]['name']]):
component_json['hyperparameters'][i]['default'] = \
str(component_dict[component_json['hyperparameters'][i]['name']])
return component_json
@staticmethod
def _is_key_in_dic(component_dict, key):
if key in component_dict:
return True
else:
return False
@staticmethod
def _is_type_same(hyperparameter_1, hyperparameter_2):
if isinstance(hyperparameter_1, type(hyperparameter_2)):
return True
else:
return False
@staticmethod
def _json_process_for_categorical(hyperparameter_dict, value):
"""This function resets the default value of a categorical hyperparameter.
Args:
hyperparameter_dict (dict): Dictionary of hyperparameter name and the available choices.
value (string): Value of the hyperparameter that we want to reset
Returns:
Dictionary with reset default value.
"""
if value in hyperparameter_dict['choices']:
hyperparameter_dict['default'] = value
return hyperparameter_dict
@staticmethod
def _json_process_for_int_and_float(hyperparameter_dict, value):
"""This function resets the default value and (if necessary) upper-lower limit of int-float type hyperparameter.
Args:
hyperparameter_dict (string): Dictionary of hyperparameter name and the upper-lower limit.
value (int or float): Value of the hyperparameter that we want to reset.
Returns:
dict: Dictionary with reset default value and upper-lower limit.
"""
# Due to bug in ConfigSpace package the default value cannot be set lesser than 1e-10 and greater than 1e298
# https: // github.com / automl / ConfigSpace / issues / 97
if value < 1e-10 or value > 1e298:
value = hyperparameter_dict['default']
else:
hyperparameter_dict['default'] = value
if hyperparameter_dict['lower'] > value:
hyperparameter_dict['lower'] = value
elif hyperparameter_dict['upper'] < value:
hyperparameter_dict['upper'] = value
return hyperparameter_dict
@staticmethod
def _is_string_boolean(hyperparameter_1, hyperparameter_2):
if ((hyperparameter_1 == "True" or hyperparameter_1 == "False") and
(hyperparameter_2 is True or hyperparameter_2 is False)):
return True
else:
return False
@staticmethod
def _is_string_none(hyperparameter_1, hyperparameter_2):
if hyperparameter_1 == "None" and hyperparameter_2 is None:
return True
else:
return False
@staticmethod
def _component_already_exists(component_name, combined_configuration_space):
component_number = 0
combined_configuration_space_json = json_utils._convert_cs_to_json(combined_configuration_space)
for hyperparameter in combined_configuration_space_json['hyperparameters']:
if hyperparameter['name'].startswith(component_name):
c_full_name, h_name = hyperparameter['name'].split(':')
c_name, c_number = c_full_name.split('-')
if component_number < int(c_number):
component_number = int(c_number)
return component_number+1
@staticmethod
def _get_component_name(component):
return component.__class__.__name__
| true |
ca593200019d08597eec5b1afed324d0112176c1 | Python | frankShih/LeetCodePractice | /870-advantageShuffle/solution.py | UTF-8 | 1,395 | 3.171875 | 3 | [] | no_license | class Solution:
def advantageCount(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
A = sorted(A)
'''
# naive O(N^2), timeout
result=[None]*len(A)
visit = set()
for i in range(len(B)):
for j in range(len(A)):
if not j in visit and A[j]>B[i]:
# print(A[j], B[i])
result[i]=A[j]
visit.add(j)
break
for i in range(len(B)):
if result[i] != None:
continue
for j in range(len(A)):
if not j in visit:
# print(A[j], B[i])
result[i]=A[j]
visit.add(j)
break
return result
'''
# O(Nlog(N)), greedy
# create value:index mapping may lost the info. of duplicate values
# so, create sorted indices
sortIndB = sorted([x for x in range(len(B))], key=lambda x: B[x], reverse=True)
# print(sortIndB)
left, right = 0, len(A)-1
result=[None]*len(A)
for i in sortIndB:
if A[right]>B[i]:
result[i] = A[right]
right-=1
else:
result[i] = A[left]
left+=1
return result | true |
82d95379bd78b89514bd61b0b5d68f813bd4b8d7 | Python | sbthegreat/BrushSmart | /views/confirmScreen.py | UTF-8 | 1,518 | 3.515625 | 4 | [] | no_license | from tkinter import *
import constants
"""
This is a screen that draws the text "Are you sure?" which is used for the user to confirm if they wish to quit the program.
Down ⯆ - Yes, exit the program
Up ⯅ - No, return to the home screen
"""
def Draw(state):
state.canvas.create_text(constants.UP_NAV, text="NO", fill="white", font=constants.FONT)
state.canvas.create_text(constants.DOWN_NAV, text="YES", fill="white", font=constants.FONT)
state.canvas.create_polygon(constants.UP_ARROW, fill="white")
state.canvas.create_polygon(constants.DOWN_ARROW, fill="white")
state.canvas.create_text((constants.SCREEN_WIDTH/2,constants.SCREEN_HEIGHT/2), text="Are you sure?", fill="white", font=constants.FONT)
## for id in range(len(state.trailPoints) - 1):
## coords = state.trailPoints[id][0], state.trailPoints[id][1], state.trailPoints[id + 1][0], state.trailPoints[id + 1][1]
## state.canvas.create_line(coords, fill="white")
for id in range(len(state.trailPoints) - 1):
if id == 0: # earliest - darkest
color = "#006600"
elif id == 1:
color = "#009900"
elif id == 2:
color = "#00CC00"
elif id == 3:
color = "#00FF00"
coords = state.trailPoints[id][0], state.trailPoints[id][1], state.trailPoints[id + 1][0], state.trailPoints[id + 1][1]
state.canvas.create_line(coords, fill=color, width=10.0)
state.canvas.update()
state.canvas.delete("all")
| true |
8e1280da4209164a6d438885b50dc13d033891b5 | Python | morenopep/inter-server | /interServer.py | UTF-8 | 450 | 3.109375 | 3 | [] | no_license | #!/usr/bin/python
import socket
print "Interagindo com FTP SERVER!"
ip = raw_input("Digite o IP: ")
porta = 21
meusocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
meusocket.connect((ip,porta))
banner = meusocket.recv(1024)
print banner
print "Enviando usuario"
meusocket.send("USER teste\r\n")
banner = meusocket.recv(1024)
print banner
print "Enviando senha"
meusocket.send("PASS teste\r\n")
banner = meusocket.recv(1024)
print banner | true |
4eaee5e5a2c2bc0e560dfc9dce61221c9bb7c6d3 | Python | killswitchh/Leetcode-Problems | /Easy/valid-palindrome-II.py | UTF-8 | 347 | 3.15625 | 3 | [] | no_license | '''
https://leetcode.com/problems/valid-palindrome-ii/
'''
class Solution:
def validPalindrome(self, s: str) -> bool:
for i in range(len(s)//2):
if s[i] != s[len(s)-i-1]:
return s[:i]+s[i+1:] == (s[:i]+s[i+1:])[::-1] or s[:len(s)-i-1]+s[len(s)-i:] == (s[:len(s)-i-1]+s[len(s)-i:])[::-1]
return True | true |
be647f5d248813bd03a348099fb373ed891643cc | Python | agk29/HandyScripts | /merge_tiff.py | UTF-8 | 1,067 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 10:14:26 2021
@author: akenny
Merge a selection of tiff files all in one folder
"""
import os, glob, rasterio
from rasterio.merge import merge
def merge_tiff(directory='', output_folder='', output_f='merged.tif'):
search_criteria = '*.tif'
q = os.path.join(directory, search_criteria)
dem_fps = glob.glob(q)
src_files_to_mosaic = []
for fp in dem_fps:
src = rasterio.open(fp)
src_files_to_mosaic.append(src)
mosaic, out_trans = merge(src_files_to_mosaic)
out_meta = src.meta.copy()
out_meta.update({"driver": "GTiff", "height": mosaic.shape[1], "width": mosaic.shape[2], "transform": out_trans, "crs": "+proj=utm +zone=35 +ellps=GRS80 +units=m +no_defs"})
out_fp = os.path.join(output_folder, output_f)
with rasterio.open(out_fp, "w", **out_meta) as dest:
dest.write(mosaic)
## Example use:
# directory = 'C:/folder/subfolder'
# output_folder = 'C:/folder'
# output_f = 'test_merged.tif'
# merge_tiff(directory, output_folder, output_f) | true |
e4d1f8747559c62e993fbb7e9d748443a81370f7 | Python | crempp/mdweb | /mdweb/Page.py | UTF-8 | 3,042 | 2.984375 | 3 | [
"MIT"
] | permissive | """MDWeb Page Objects."""
import codecs
import os
import re
import markdown
from mdweb.BaseObjects import NavigationBaseItem, MetaInfParser
from mdweb.Exceptions import (
ContentException,
PageParseException,
)
#: A regex to extract the url path from the file path
URL_PATH_REGEX = r'^%s(?P<path>[^\0]*?)(index)?(\.md)'
#: A regex for extracting meta information (and comments).
META_INF_REGEX = r'(^```metainf(?P<metainf>.*?)```)?(?P<content>.*)'
class PageMetaInf(MetaInfParser): # pylint: disable=R0903
"""MDWeb Page Meta Information."""
def __init__(self, meta_string):
"""Content page meta-information.
If a page defines a non-standard meta value it is blindly included.
:param meta_string: Raw meta-inf content as a string
"""
super(PageMetaInf, self).__init__(meta_string)
self.nav_name = self.title if self.nav_name is None else self.nav_name
def load_page(content_path, page_path):
"""Load the page file and return the path, URL and contents"""
# Extract the part of the page_path that will be used as the URL path
pattern = URL_PATH_REGEX % content_path
matches = re.match(pattern, page_path)
if matches:
url_path = matches.group('path').rstrip('/').lstrip('/')
else:
raise PageParseException("Unable to parse page path [%s]" %
content_path)
if not os.path.exists(page_path):
raise ContentException('Could not find file for content page "%s"' %
page_path)
# Read the page file
with codecs.open(page_path, 'r', encoding='utf8') as f:
file_string = f.read()
return page_path, url_path, file_string
class Page(NavigationBaseItem):
"""MDWeb Page View."""
def __init__(self, page_path, url_path, file_string):
"""Initialize Page object."""
self.page_path = page_path
self.url_path = url_path
# Separate the meta information and the page content
meta_inf_regex = re.compile(META_INF_REGEX, flags=re.DOTALL)
match = meta_inf_regex.search(file_string)
meta_inf_string = match.group('metainf') if match.group('metainf') \
else ''
content_string = match.group('content')
self.meta_inf = PageMetaInf(meta_inf_string)
# Strip the meta information and comments
self.markdown_str = content_string
# The page will be rendered on first view
self.page_html = self.parse_markdown(self.markdown_str)
self.abstract = self.page_html[0:100]
@property
def is_published(self):
return self.meta_inf.published
@staticmethod
def parse_markdown(page_markdown):
"""Parse given markdown string into rendered html.
:param page_markdown: Markdown to be parsed
:return: Rendered page HTML
"""
page_html = markdown.markdown(page_markdown)
return page_html
def __repr__(self):
return '{0}'.format(self.page_path)
| true |
db9413b90435c69113f9267f01fe50929fffe0a1 | Python | lyz05/Sources | /北理珠/python/《深度学习入门:基于Python的理论与实现》/深度学习入门:基于Python的理论与实现-源代码/test/test_load_mnist.py | UTF-8 | 386 | 2.53125 | 3 | [
"MIT"
] | permissive | import sys,os
sys.path.append(os.pardir)
from dataset.mnist import load_mnist
# 第一次调用会花费几分钟......
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True,normalize=False,one_hot_label=False)
# 输出各个数据的形状
print(x_train.shape) # (60000, 784)
print(t_train.shape) # (60000,)
print(x_test.shape) # (10000, 784)
print(t_test.shape) # (10000,) | true |
206ac444a7f54282aea563d3a4449d43a28fabc4 | Python | jasonyjong/cs224u-wiki-generator | /pu_files/kmeans.py | UTF-8 | 767 | 2.609375 | 3 | [] | no_license | from sklearn import cluster
import evaluation as evaluation
def kmeansFunction(rawX, rawY, rawXTesting, rawYTesting):
X = [elem[0:1] for elem in rawX]
Y = rawY
senses = [elem[2] for elem in rawX]
words = [elem[3] for elem in rawX]
modelk = cluster.KMeans(n_clusters = 2)
modelk.fit(X)
# This part needs to be changed to sample
sampleX = [elem[0:1] for elem in rawXTesting]
sampleY = rawYTesting
q = modelk.transform(sampleX)
# This gives distance to the new "clusters"
predictedProb = [-elem[1] for elem in q]
# Note we use negative since we want lower distance!
predictedY = evaluation.getPredictedY(words, senses, predictedProb, rawXTesting, rawYTesting)
return evaluation.evaluationMetrics(sampleY, predictedY)
| true |
913d702aad2d2836d2263d15231a3ad3fa7ade4c | Python | Ram-N/Drop7 | /grid_utils.py | UTF-8 | 6,767 | 3.171875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import cfg
def grid_of_ones(size=cfg._SIZE):
return np.ones((size,size), dtype=np.int)
def apply_gravity_to_column(column):
'''
An entire column is adjusted for 'gravity.' All the zeros float to the top.
All the other numbers come down as needed.
'''
original = column[:] #original
updated = column[:] #this can be changed
flip_flag = 1
safety_brkr = 0
flip_occurred = False
while flip_flag:
a = updated[:]
safety_brkr += 1
flip_flag = 0 # off
for index, (up, down) in enumerate(zip(a[:-1], a[1:])):
if up and not down:
updated[index], updated[index+1] = 0, up
# print("After ", index, "Column looks like:", column)
flip_flag = 1 # at least one flip happened, so keep going
flip_occurred = True
if safety_brkr >= 100:
flip_flag = 0
return (flip_occurred, original, updated)
from itertools import groupby
def mask(vec):
return([x > 0 for x in vec])
def get_mask_lengths(_vec):
'''
Outputs a tuple of rle lengths, 0's and 1's and their rle's
'''
m = mask(_vec)
b = range(len(m))
ml = []
for group in groupby(iter(b), lambda x: m[x]): # use m[x] as the grouping key.
ml.append((group[0], len(list(group[1])))) #group[0] is 1 or 0. and group[1] is its rle
return ml
def blank_out(_num, vec):
return [0 if x ==_num else x for x in vec]
def inplace_explosions(vec):
exp_occurred = False
original = [x for x in vec] #manually creating a deepcopy
updated_vec = [x for x in vec] #manually creating a deepcopy
ml = get_mask_lengths(updated_vec) # number of contiguous non-zeros
#print(ml)
start, end = 0, 0
for piece in ml:
_facevalue, _runlen = piece[0], piece[1]
start = end
end = start + _runlen
#print(vec[start:end])
if _facevalue: #True, nonzero elements exist
seg = updated_vec[start:end]
exploded_seg = blank_out(_runlen, seg)
if(seg != exploded_seg):
exp_occurred = True
updated_vec[start:end] = exploded_seg[:]
#this is a list of all the elements that remained unchanged. This is the !MASK of changes
unchanged = [1 if i==j else 0 for i,j in zip(original, updated_vec)]
# print("Exp occurred", exp_occurred)
return (exp_occurred, original, unchanged)
def _orig_inplace_explosions(vec):
"""
In this def, we loop until ALL the explosions are taken care of.
But the 'right' way to do it seems to be to do one pass.
Then return the mask. Apply Gravity etc. and come back here.
"""
potential = True
exp_occurred = False
original = [x for x in vec] #manually creating a deepcopy
updated = [x for x in vec] #manually creating a deepcopy
while potential:
potential = False
ml = get_mask_lengths(updated) # number of contiguous non-zeros
#print(ml)
start, end = 0, 0
for piece in ml:
_len = piece[1]
start = end
end = start + _len
#print(vec[start:end])
if piece[0]: #True, nonzero elements exist
seg = updated[start:end]
newseg = blank_out(_len, seg)
if(seg != newseg):
potential = True # there could be more explosions
exp_occurred = True
updated[start:end] = newseg[:]
unchanged = [1 if i==j else 0 for i,j in zip(original, updated)]
# print("Exp occurred", exp_occurred)
return (exp_occurred, original, unchanged)
def nz(grid):
return np.count_nonzero(grid)
def is_grid_full(grid):
nz = np.count_nonzero(grid)
return nz == (cfg._SIZE * cfg._SIZE)
def drop_ball_in_column(grid, ball, col):
'''
If valid column, find the first zero in the column and replace the value there.
If column is full, return illegal flag
If grid is full game_over
'''
game_over = is_grid_full(grid)
gcol = grid[:, col]
slot = np.where(gcol==0)[0]
if not slot.size: #returned []
need_another_col = True
else:
need_another_col = False
if not game_over and not need_another_col:
grid[slot[-1], col] = ball # place in the last zero column, from the top
if game_over:
need_another_col = False
return(grid, game_over, need_another_col)
###############################
####### UPDATING GRID #########
###############################
def apply_explosions_to_grid(grid, s, chain_level):
original = grid.copy() #need this for calculating points
explosions = 0
# for each row, calculate explosions (but don't execute them)
# for each col, caluclate explosions (but don't execute them)
row_mask, col_mask = grid_of_ones(cfg._SIZE), grid_of_ones(cfg._SIZE)
for i in range(cfg._SIZE):
_, _, row_mask[i, :] = inplace_explosions(grid[i, :])
_, _, col_mask[:, i] = inplace_explosions(grid[:, i])
# Executing all the explosions at once
for i in range(cfg._SIZE):
grid[i, :] = grid[i, :] * row_mask[i, :]
grid[:, i] = grid[:, i] * col_mask[:, i]
#print("Came in with", original)
#print("ROW MASK", row_mask)
#print("COL MASK", col_mask)
#print("After applying Explosions", original, grid)
#Explosions is the NUMBER of BALLS that EXPLODE at a give grid configuration
explosions = np.count_nonzero(original!=grid)
# print("Explosions", explosions)
# if explosions == 2:
# print(original, grid)
explosions_done = (explosions == 0)
if chain_level>1:
print("Chain Level:", chain_level, file=open(cfg._outfile, "a"))
s.award_points(chain_level, explosions)
return(grid, explosions_done)
def apply_gravity_to_grid(grid):
original = grid.copy()
for i in range(cfg._SIZE):
_,_,grid[:, i] = apply_gravity_to_column(grid[:, i])
updated = grid.copy()
return(grid, np.array_equal(updated, original))
def update_grid(grid, s):
gravity_done, explosions_done = 0, 0
chain_level = 0
while not (gravity_done and explosions_done):
chain_level += 1
grid, explosions_done = apply_explosions_to_grid(grid, s, chain_level)
grid, gravity_done = apply_gravity_to_grid(grid)
# print("In update grid", explosions_done, gravity_done)
return grid | true |
b6762f57e56c1ba7a3dbfc4c50a6f55b8880f5a9 | Python | gagaspbahar/prak-pengkom-20 | /P04_16520289/P04_16520289_03.py | UTF-8 | 3,390 | 4.0625 | 4 | [] | no_license | # NIM/Nama : 16520289/Gagas Praharsa Bahar
# Tanggal : 2 Desember 2020
# Deskripsi: Problem 3 - Simetri Lipat dan Simetri Putar
# Kamus
# cekLipatVertikal = cek sb.vertikal
# cekLipatHorizontal = cek sb. horizontal
# cekDiagonalAtas = cek lipat diagonal yang arahnya keatas
# cekDiagonalBawah = cek lipat diagonal yang arahnya kebawah
# putarMatriks1, 2, 3 = cek sb. putar 90, 180, 270 derajat
# int n = baris matriks
# int m = kolom matriks
# int a = matriks
# int sLipat = jumlah simetri lipat
# int sPutar = jumlah simetri putar
#Kamus untuk para fungsi:
# bool flag = bool untuk mengecek sudah terlihat salah atau belum, lalu return 1 bila tidak ada kesalahan, 0 bila ada kesalahan.
# Algoritma
def cekLipatVertikal(n,m): # Cek lipat Vertikal
flag = True
for i in range(n):
for j in range(m):
if a[i][j] != a[i][m-1-j]:
flag = False
if(flag):
return 1
else:
return 0
def cekLipatHorizontal(n,m): # Cek lipat Horizontal
flag = True
for i in range(n):
for j in range(m):
if a[i][j] != a[n-1-i][j]:
flag = False
if(flag):
return 1
else:
return 0
def cekLipatDiagonalAtas(n): # Cek diagonal atas
flag = True
for i in range(n):
for j in range(n):
if a[i][j] != a[n-1-j][n-1-i]:
flag = False
if(flag):
return 1
else:
return 0
def cekLipatDiagonalBawah(n): # Cek diagonal bawah
flag = True
for i in range(n):
for j in range(n):
if a[i][j] != a[j][i]:
flag = False
if(flag):
return 1
else:
return 0
def putarMatriks1(n): # Cek putar 90
flag = True
for i in range(n):
for j in range(n):
if a[i][j] != a[n-j-1][i]:
flag = False
if(flag):
return 1
else:
return 0
def putarMatriks2(n,m): # Cek putar 180
flag = True
for i in range(n):
for j in range(m):
if a[i][j] != a[n-i-1][m-j-1]:
flag = False
if(flag):
return 1
else:
return 0
def putarMatriks3(n): # Cek putar 270
flag = True
for i in range(n):
for j in range(n):
if a[i][j] != a[j][n-i-1]:
flag = False
if(flag):
return 1
else:
return 0
#Inisialisasi
n = int(input("Masukkan N: "))
m = int(input("Masukkan M: "))
a = [[0 for i in range(m)] for i in range(n)]
sLipat = 0
sPutar = 0
#Masukan array
for i in range(n):
for j in range(m):
a[i][j] = int(input("Masukkan elemen baris {} kolom {}: ".format(i+1,j+1)))
#Penghitungan sLipat dan sPutar dengan pemanggilan fungsi
if(n == m):
sLipat += cekLipatDiagonalBawah(n) + cekLipatDiagonalAtas(n)
sPutar += putarMatriks1(n) + putarMatriks3(n)
sLipat += cekLipatHorizontal(n,m) + cekLipatVertikal(n,m)
sPutar += putarMatriks2(n,m) + 1 #sPutar selalu minimal 1, saat diputar 360 derajat
# Keluaran
print("Simetri lipat:", sLipat)
print("Simetri putar:", sPutar)
# Note from class:
# [NOMOR 3]
# jadi maksudnya simetri lipat itu -> ngecek tengah vertikal, tengah horizontal, 2 diagonalnya (4 berarti maxnya).
# simetri putar -> diputer 90 derajat, max 4 juga (360 derajat) | true |
b883867190ef9a24693e2b816c3d2d7e4985cafc | Python | futureimperfect/games | /hangman.py | UTF-8 | 2,343 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env python
import urllib2
from random import randint
MAX_WRONG_GUESSES = 6
class Words(object):
def __init__(self):
self.url = 'http://www.mieliestronk.com/corncob_lowercase.txt'
self.words = self.httpGet(self.url).splitlines()
def httpGet(self, url):
r = urllib2.urlopen(self.url)
if r.code == urllib2.httplib.OK:
return r.read()
raise
def getRandomWord(self):
return self.words[randint(0, len(self.words) - 1)]
def print_hangman(fail_count=0):
hmap = {
'1': 'O',
'2': '/',
'3': '|',
'4': '\\',
'5': '/',
'6': '\\',
}
for k, v in hmap.items():
if int(k) > fail_count:
hmap[k] = ' '
hangman = '''
------
| |
%(1)s |
%(2)s%(3)s%(4)s |
%(5)s %(6)s |
''' % hmap
print(hangman)
def play(words):
r_word = words.getRandomWord()
random_word = list(r_word)
print("I've picked a random word. Can you guess what it is?")
wrong_guesses = 0
right_guesses = []
while len(random_word) > 0 and wrong_guesses < MAX_WRONG_GUESSES:
guess = raw_input('Enter your guess: ')
while len(guess) > 1 or not guess.isalpha():
guess = raw_input('Must be a single letter! Enter your guess: ')
if guess in random_word:
while guess in random_word:
right_guesses.append(random_word.pop(random_word.index(guess)))
else:
wrong_guesses += 1
print_hangman(fail_count=wrong_guesses)
print(' '.join([w if w in right_guesses else '__' for w in r_word]))
if len(random_word) == 0:
print('You won! The winning word was {0}.'.format(r_word))
else:
print('Failwhale! The word you missed was {0}.'.format(r_word))
response = raw_input('\nWould you like to play again? y/n ')
while response not in ('y', 'Y', 'yes', 'n', 'N', 'no'):
response = raw_input(
'Please enter a valid response. Would you like to play again? y/n ')
return True if response in ('y', 'Y', 'yes') else False
def main():
print('Welcome to Hangman!\n')
print('Building word list...\n\n')
words = Words()
while play(words):
pass
print('\nGoodbye!')
if __name__ == '__main__':
main()
| true |
c0b5cf76ebc852b5345ea93b97b7bad2243ab588 | Python | kaka-lin/pyqt-image-recognition | /models/binarized_utils.py | UTF-8 | 3,354 | 2.859375 | 3 | [
"MIT"
] | permissive | import warnings
warnings.filterwarnings("ignore")
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function, gradcheck
import numpy as np
def Binarize(tensor, quantization_model='deterministic'):
if quantization_model == 'deterministic':
return tensor.sign()
elif quantization_model == 'stochastic':
"""
xb: 1 if p = sigma(x)
-1 if 1-p
where sigma(x) "is hard sigmoid" function:
sigma(x) = clip((x+1)/2, 0,1)
tensor.add_(1).div_(2) => at x=0, output=0.5
torch.rand(tensor.size()).add(-0.5) => output_1, #隨機產生[0,1]之間的值並減0.5
if rand < 0.5 => ouput + output_1 < 0.5 => output_2
rand >= 0.5 => output_2 >= 0.5
output_2.clamp_(0,1).round() => output_3
if rand < 0.5 => output_3=0
rand >= 0.5 => output_3=1
output_3.mul(2).add(-1) => output_4
此為將[0,1] -> [-1,1]
if x=-1 => output=0 => max(ouput_1) < 0.5 => output_2 < 0.5 => output_3 = 0 => 最後輸出 -1
"""
return tensor.add_(1).div_(2).add_(torch.rand(tensor.size()).add(-0.5)).clamp_(0, 1).round().mul_(2).add_(-1)
class BinarizeLinear(nn.Linear):
def __init__(self, *args, **kwargs):
super(BinarizeLinear, self).__init__(*args, **kwargs)
def forward(self, input):
if input.size(1) != 784: # 28*28
# Any changes on x.data wouldn’t be tracked by autograd
input.data = Binarize(input.data)
if not hasattr(self.weight, 'org'):
self.weight.org = self.weight.data.clone()
self.weight.data = Binarize(self.weight.org)
out = F.linear(input, self.weight)
if not self.bias is None:
self.bias.org = self.bias.data.clone()
out += self.bias.view(1, -1).expand_as(out)
return out
class BinarizeConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
super(BinarizeConv2d, self).__init__(*args, **kwargs)
def forward(self, input):
if input.size(1) != 3:
input.data = Binarize(input.data)
if not hasattr(self.weight, 'org'):
self.weight.org = self.weight.data.clone()
self.weight.data = Binarize(self.weight.org)
out = F.conv2d(input, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
if not self.bias is None:
self.bias.org = self.bias.data.clone()
out += self.bias.view(1, -1, 1, 1).expand_as(out)
return out
if __name__ == "__main__":
'''
x = torch.randn(2,2, requires_grad=True)
w = torch.randn(2,2, requires_grad=True)
grad_output = torch.randn(2,2)
bin_x = x.sign()
bin_w = w.sign()
out = bin_x.matmul(bin_w.t())
out.backward(grad_output)
print("raw input x: \n{}".format(x))
print("raw input w: \n{}".format(w))
print("output: \n{}".format(out))
print("grad output: \n{}".format(grad_output))
print("grad_input_x: \n{}".format(x.grad)) # x.grad=0
print("grad_input_w: \n{}".format(w.grad))
print("="*50)
'''
binlinear = BinarizeLinear(2, 2, bias=False)
x = torch.randn(2,2, requires_grad=True)
test = gradcheck(binlinear, (x,), eps=1e-3, atol=1e-4)
print("Gradient check: ", test)
| true |
2812b122cb5acdeeab460bfef319b37fb3f333ea | Python | zhanggong0564/TF2-YOLOV4 | /utils__/show_box.py | UTF-8 | 3,906 | 2.59375 | 3 | [] | no_license | import numpy as np
import cv2
from utils__.utils import get_aim
from utils__.viduslizer import *
'''
1.得到了 image, boxes, labels, probs, class_labels
2.根据probs的高低阈值筛选
返回box和scores和每个框的类别
3.opencv将box和分数和类别画到image上并且返回物体的box和分数
'''
'''
果子类别对应区间
[1-100]苹果
[101-200]橙子
[201-300]梨子
[301-400]青苹果
'''
colors = [
(0,255,255),
(0,255,0),
(255,0,0),
(0,155,165)
]
font = cv2.FONT_HERSHEY_COMPLEX
class_ifo = {
'apple':0,
'pear':1,
'green apple':2,
'orange':3
}
no_grasp = "can't grasp"
def visualize_boxes(image, boxes, labels, probs,class_labels,color_intrin_part,aligned_depth_frame):
category_index = {}
for id_, label_name in enumerate(class_labels):
category_index[id_] = {"name": label_name}
box_info = find_box(boxes, labels, probs,category_index)
show_image = draw_box(image,box_info,color_intrin_part,aligned_depth_frame)
return show_image
def find_box(boxes, classes, scores,category_index,min_score_thresh=0.6):
box_info = {}
box_list = []
class_list = []
scores_list = []
sorted_ind = np.argsort(-scores)
boxes = boxes[sorted_ind] # 分数索引从大到小的框
scores = scores[sorted_ind] # 从大到小的分数
classes = classes[sorted_ind]
for i in range(min(20, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
s = scores[i]
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
box_list.append(box)
class_list.append(class_name)
scores_list.append(s)
box_info['box'] = box_list
box_info['class'] = class_list
box_info['scores'] = scores_list
return box_info
def draw_box(image,box_info,color_intrin_part,aligned_depth_frame,line_thickness=None):
show_image = image.copy()
H,W,c = show_image.shape
box_list= box_info['box']
class_list = box_info['class']
scores_list=box_info['scores']
for index,box in enumerate(box_list):
x0,y0,x1,y1 = box
x0 = int(x0*W)
y0 = int(y0*H)
x1 = int(x1*W)
y1 = int(y1*H)
target_xyz_true,w,h = xy2xyz(x0,y0,x1,y1,color_intrin_part,aligned_depth_frame)
_class = class_ifo[class_list[index]]
score = scores_list[index]
color = np.random.randint(0, 255, (1, 3))[0].tolist()
border = h if w >= h else w
draw_tag(show_image,class_list[index],x0,y0)
draw_bbx(show_image,x0,x1,y0,y1,color)
draw_corner(show_image, x0,x1,y0,y1, border, color)
if np.sum(target_xyz_true)==0.0:
text1 = "no depth info"
else:
text1 = str(target_xyz_true)
draw_tag(show_image,text1,x0,y1+30)
cv2.circle(show_image,((x0+x1)//2,(y0+y1)//2),10,color,-1,lineType=cv2.LINE_AA)
return show_image
def xy2xyz(x0,y0,x1,y1,color_intrin_part,aligned_depth_frame):
w = x1 - x0
h = y1 - y0
stride_w = w // 3
stride_h = h // 3
new_x0 = x0 + stride_w
new_y0 = y0 + stride_h
new_x1 = x1 - stride_w
new_y1 = y1 - stride_h
xyz_list = []
loopx = range(new_x0, new_x1)
loopy = range(new_y0, new_y1)
for xc, yc in zip(loopx, loopy):
target_xyz_true = get_aim(xc, yc, color_intrin_part, aligned_depth_frame)
if target_xyz_true[2] != 0.0:
xyz_list.append(target_xyz_true)
# target_depth = aligned_depth_frame.get_distance(center_x, center_y)
if xyz_list:
mean_xyz = np.mean(xyz_list, 0)
else:
mean_xyz = np.array(xyz_list)
def _round(x):
return round(x, 3)
if mean_xyz.any():
target_xyz_true = list(map(_round, mean_xyz))
else:return None
return target_xyz_true,w,h
| true |
7e3ccf52be37215adac3b07dd15f46a1a162106e | Python | cdpetty/one | /logger.py | UTF-8 | 280 | 3.1875 | 3 | [] | no_license | import sys
def log(*statements):
phrase = ' '.join(map(str, statements)) + '\n'
sys.stdout.write(phrase)
sys.stdout.flush()
def die(statement):
sys.stderr.write('One: error: ' + statement + '\n')
sys.exit(1)
def end(statement):
log(statement + '\n')
sys.exit(0)
| true |
1d8c5708ed9a11e976f1ddb1ef80313ad2e75d7d | Python | saurabhkulkarni77/Echo-updated | /sentiment_analysis.py | UTF-8 | 1,159 | 2.71875 | 3 | [] | no_license | from flask import Flask, render_template, request, jsonify
from lib.classifier import Classifier
from lib.examples import Examples
import threading
print(" - Starting up application")
lock = threading.Lock()
app = Flask(__name__)
class App:
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
def classifier(self):
with lock:
if getattr(self, '_classifier', None) == None:
print(" - Building new classifier - might take a while.")
self._classifier = Classifier().build()
print(" - Done!")
return self._classifier
t = threading.Thread(target=App().classifier)
t.daemon = True
t.start()
@app.route('/')
def main():
return render_template('main.html')
@app.route('/predict')
def predict():
q = request.args.get('q')
label, prediction = App().classifier().classify(q)
return jsonify(q=q, predicted_class=label, prediction=prediction)
@app.route('/examples')
def examples():
examples = Examples(App().classifier()).load(5, 5)
return jsonify(items=examples)
if __name__ == '__main__':
app.run(debug=True)
| true |