blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b534914224d0f7ac16d99d74a4404bc0a92cf76f | Python | pombredanne/Rusthon | /regtests/lang/raise.py | UTF-8 | 332 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | from runtime import *
'''
raise and catch error
'''
def main():
a = False
try:
raise TypeError
except TypeError:
a = True
assert( a==True )
b = False
try:
b = True
except:
b = False
assert( b==True )
c = False
try:
raise AttributeError('name')
except AttributeError:
c = True
assert( c==True )
main()
| true |
b6958c8aa6866c8393cb77dc9826aa22ed091041 | Python | Shubha737/100-days-code-challenge | /day_11_reversing_list.py | UTF-8 | 316 | 4.125 | 4 | [] | no_license | # Day 11 code 1
# Reversing a List in Python
total_element = int(input("Enter the total number of elements :"))
list = []
rev_list = []
for num in range(total_element):
elements = (input("Enter the element value:"))
list.append(elements)
print(list)
list.reverse()
print(list)
| true |
fdb661cec088c7129fa6f4c92084ff56c0d93f09 | Python | Neltab/PolyHash | /utils/output/output.py | UTF-8 | 1,144 | 2.953125 | 3 | [] | no_license | def CreateFile(bras: list, nomFichier: str):
""" Création du fichier de sortie en fonction des paramètres fournis dans la liste de bras
:param bras: Liste des bras créés par le programme
"""
with open("./output_files/" + nomFichier + ".out", "w") as fichier:
# On écrit le nombre de bras à utiliser
fichier.write(str(len(bras))+"\n")
total = 0
# Pour chaque bras on va écrire 4 lignes :
# Coordonnées du point de montage, nombre de tâches et nombres de mouvements
# Liste des indices des tâches à effectuer
# Liste des mouvements
# Ligne vide de séparation
for i in bras:
for task in i.taskDone:
total += task.nbpoint
fichier.write(str(i.pm[0])+" "+str(i.pm[1])+" "+str(len(i.taskDone))+" "+str(len(i.movementsDone))+"\n")
for y in range(len(i.taskDone)):
fichier.write(str(i.taskDone[y].indice)+" ")
fichier.write("\n")
for j in range(len(i.movementsDone)):
fichier.write(i.movementsDone[j]+" ")
fichier.write("\n")
| true |
190bb3896b5fac5622156faec1cb8391b1dcd227 | Python | juanmunoz00/python_classes | /list_example1.py | UTF-8 | 1,545 | 4.375 | 4 | [] | no_license | import random
##Definimos la lista
milista = ["Ford", "Toyota", "Nissan", "Dodge", "Masserati"]
##Imprimimos la lista
print(milista)
print("****************************************")
##Agregamos un elemento a la lista
milista.append("Porshe")
##Imprimimos la lista
print(milista)
print("****************************************")
##Cuantos elementos tiene la lista?
print("La lista tiene " + str(len(milista)) + " elementos")
print("****************************************")
##Indice de un elemento
marca = "Nissan"
print("El indice del elemento " + marca + " es: " + str(milista.index(marca) + 1))
print("****************************************")
marca = "Toyota"
print("Removiendo la marca " + marca)
milista.remove(marca)
print(milista)
print("****************************************")
##Remvemos por indice
_index = 2
print("Remover el elemento " + milista[_index])
milista.pop(_index)
print(milista)
print("****************************************")
##Ordenar
print("Ordenar alfabeticamente la lista")
milista.sort()
print(milista)
print("Z-A la lista")
milista.sort(reverse=True)
print(milista)
print("****************************************")
tamano_arreglo = random.randint(1, 10)
print("Un numero enterio aleatorio: " + str(tamano_arreglo))
#Generar una lista de numeros aleatorios
arrNum=[]
for x in range(tamano_arreglo):
ale = random.randint(1, 99)
arrNum.append(ale)
print(arrNum)
for i in arrNum:
print(i)## i es el elemento del arreglo
##print(arrNum)
| true |
c70a58520bcebf2a201d36144b7acf5fadbad822 | Python | caohaitao/PythonTest | /opengl/test1.py | UTF-8 | 1,818 | 2.953125 | 3 | [] | no_license | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from threading import Timer
import time
from stl_reader import read_one_file
triangles = []
def drawFunc():
global triangles
# 清楚之前画面
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
# glRotatef(0.1, 5, 5, 0) # (角度,x,y,z)
# glutWireTeapot(0.5)
glColor3f(1.0,0.0,0.0)
glPushMatrix()
glBegin(GL_TRIANGLES)
for t in triangles:
print(t.normal,t.pnts)
glNormal3f(t.normal[0],t.normal[1],t.normal[2])
for i in range(3):
glVertex3f(t.pnts[i][0],t.pnts[i][1],t.pnts[i][2])
glEnd()
glPopMatrix()
# 刷新显示
glFlush()
glutSwapBuffers()
def f1():
glutPostRedisplay()
Timer(0.01,f1).start()
def mouseButton(button,mode,x,y):
which = 'right'
m = 'down'
if button == GLUT_RIGHT_BUTTON:
which = 'right'
else:
which = 'left'
if mode == GLUT_DOWN:
m = 'down'
else:
m = 'up'
print(which,m,x,y)
if __name__=='__main__':
triangles = read_one_file(r"D:\code\MyCodes\CHT3D\modelmatch\stlModels\mytest\rect0.stl")
# 使用glut初始化OpenGL
b = glutInit()
if b == False:
print("glutInit failed")
exit(0)
# 显示模式:GLUT_SINGLE无缓冲直接显示|GLUT_RGBA采用RGB(A非alpha)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA)
# 窗口位置及大小-生成
glutInitWindowPosition(0, 0)
glutInitWindowSize(400, 400)
glutCreateWindow(b"first")
# 调用函数绘制图像
glutDisplayFunc(drawFunc)
# glutIdleFunc(drawFunc)
glutMouseFunc(mouseButton)
# Timer(0.1,f1).start()
# 主循环
glutMainLoop() | true |
a2bd10a14af79d62ae433378b297eac75b1dca95 | Python | NondairyDig/GenDocks | /netnet.py | UTF-8 | 487 | 2.6875 | 3 | [] | no_license | import socket
import time
def netcat(hostname, port, content):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, port))
s.sendall(content)
i = 1
while True:
data = s.recv(1024)
if data == b"":
break
print("Received:", repr(data))
if i == 1:
time.sleep(0.5)
s.send(b'28')
i = i + 1
print("Connection closed.")
s.close()
netcat("pwn-2021.duc.tf", 31905, b"") | true |
86671edbf579d1659f8313b1b6c3342312d9df33 | Python | AAVoid/TowerDefenseEvolution | /main.py | UTF-8 | 1,029 | 2.65625 | 3 | [] | no_license | #-*- coding:utf-8 -*-
from system import *
from personnage import *
from carte import *
from animation import *
from menuPrincipal import *
pygame.init()
systeme = Systeme()
systeme.chargerStatistiquesTxt()
system.actualiserExperienceNiveauSuivant(systeme)
system.actualiserPrixInvocation(systeme)
systeme.chargerSauvegardeTxt()
demarrerJeu(systeme)
c = Carte()
c.chargerCarteTxt("0")
c.demarrerPartie(systeme)
pygame.quit()
"""while continuer:
# fixons le nombre max de frames / secondes
framerate.tick(FREQUENCE_BOUCLE)
secondesEcouleesDepuisLeDernierAppelDeTick = framerate.get_time() / 1000
# On vide la pile d'evenements et on verifie certains evenements
for event in pygame.event.get(): # parcours de la liste des evenements recus
if event.type == QUIT: #Si un de ces evenements est de type QUIT
continuer = False # On arrete la boucle
c.afficherImage(fenetre)
pygame.display.flip()
# fin du programme principal...
pygame.quit()"""
| true |
b3d00c9e01f5d750f94d9b84e18527ee66c6abbd | Python | shanemat/CIS-530-MP-01 | /mp2-3.py | UTF-8 | 1,601 | 3.40625 | 3 | [] | no_license | import sys
import aig
import queue as q
from aig import SearchNode
def construct_solution(final_search_node):
"""
Constructs string containing solution in proper format from final search node
:param final_search_node: Result of search
:return: String containing solution
"""
result = ""
path_length = len(final_search_node.path)
for path_index in range(path_length):
result += "{0}".format(final_search_node.path[path_index])
if not path_index == path_length - 1:
result += " -[{0}]-> ".format(final_search_node.costs[path_index])
result += "\n"
result += "Total path cost: {0}".format(final_search_node.total_cost)
return result
def execute_b_and_b(graph):
"""
Executes Branch-and-Bounds search on given graph
:param graph: Graph to run Branch-and-Bounds search in
:return: Node containing solution of search
"""
node = SearchNode(graph.start_nodes[0], 0, None)
queue = q.PriorityQueue()
queue.put(node)
while not queue.empty():
node = queue.get()
if node.node_index in graph.goal_nodes:
break
for (next_node, cost) in graph.nodes[node.node_index].items():
queue.put(SearchNode(next_node, cost, node))
return construct_solution(node)
def run_program():
"""
Main function of the program
"""
if not aig.are_parameters_valid():
raise Exception("You must provide the name of input file as first argument!")
graph = aig.parse_input(sys.argv[1])
print(execute_b_and_b(graph))
run_program()
| true |
5f3c150e603efb0fbd6fea199abc8df4cc276487 | Python | rojter-tech/Codility | /Python/Lesson05/Lesson[5-3]Five.py | UTF-8 | 516 | 3.296875 | 3 | [] | no_license | #Author: Daniel Reuter
#Github: https://github.com/rojter-tech
def solution(A):
n = len(A)
minavg = 10**5 + 1
minpos = 0
for i in range(n-1):
thisavg = (A[i] + A[i+1])/2
if thisavg < minavg:
minavg = thisavg
minpos = i
if i < n - 2:
thisavg = (A[i] + A[i+1] + A[i+2])/3
if thisavg < minavg:
minavg = thisavg
minpos = i
return minpos
A = [4,2,2,5,1,1,8]
print(solution(A)) | true |
d2011ff54591c46dd70d476e77e43e1612f5f6bf | Python | YulanJS/Advanced_Programming_With_Python | /lecture9.py | UTF-8 | 4,943 | 4 | 4 | [] | no_license | # ----------------------------------------------------------------------
# Name: lecture9
# Purpose: Demonstrate the use of classes
#
# Author: Rula Khayrallah
# ----------------------------------------------------------------------
"""
Module containing some class definitions to be used in lecture 9.
The FruitShop class definition is used to illustrate a basic class
definition.
The Account class definition is used to illustrate methods, instance and
class variables.
The Student class definition is used to illustrate static and class
methods.
The SavingsAccount, PremiumAccount and PremiumSavingsAccount classes
are used to illustrate inheritance.
"""
class FruitShop:
"""
Represent a shop that sells fruits.
"""
class Account(object):
"""
Represent a bank account.
Argument:
account_holder (string): account holder's name.
Attributes:
holder (string): account holder's name.
balance (number): account balance in dollars.
"""
currency = '$' # class variable
def __init__(self, account_holder):
self.holder = account_holder
self._balance = 0
self._country = 'US'
self.__note = 'OK'
def __str__(self):
return f'Name: {self.holder}\nBalance: ${self.balance: ,.2f}'
def __lt__(self, other):
return self.balance < other.balance
def __eq__(self, other):
return self.balance == other.balance and self.holder == other.holder
def __add__(self, other):
new_holder = f'{self.holder} & {other.holder}'
new_account = Account(new_holder)
new_account.deposit(self.balance + other.balance)
return new_account
def deposit(self, amount):
"""
Deposit the given amount to the account.
:param amount: (number) the amount to be deposited in dollars.
"""
self._balance += amount
def withdraw(self, amount):
"""
Withdraw the specified amount from the account if possible.
:param amount: (number) the amount to be withdrawn in dollars.
:return: (boolean) True if the withdrawal is successful
False otherwise
"""
if self._balance >= amount:
self._balance = self._balance - amount
return True
else:
return False
@property
def credit_limit(self):
"""
The credit limit available for the account holder
:return: (float) maximum credit
"""
return self._balance / 2
@property
def balance(self):
return self._balance
class Student:
"""
Represent a student in a college setting.
Arguments:
name (string): student name
sid (integer): student id - 8 digits
Attributes:
name (string): student name
sid (integer): student id - 8 digits
"""
enrollment = 0 # Class variable
def __init__(self, name, sid):
self.name = name
if self.valid(sid): # invoke the static method
self.sid = sid
else:
self.sid = 99999999
self.add_student() # invoke the class method
@staticmethod
def valid(some_id):
"""
A valid student id starts with 2019.
:param some_id: (integer)
:return: (boolean) True id id is valid and False otherwise.
"""
return some_id // 10000 == 2019
@classmethod
def add_student(cls):
"""
Update the enrollment total.
"""
cls.enrollment += 1 # update the class variable
class SavingsAccount(Account):
"""
Represent a savings bank account with a withdrawal fee.
Inherits from: Account
Argument:
account_holder (str): account holder's name.
Attributes:
holder (str): account holder's name.
balance (number): account balance in dollars.
"""
# class variable
fee = 1
def withdraw(self, amount):
super.withdraw(amount)
self._balance -= self.fee
class PremiumAccount(Account):
"""
Represent a premium interest bearing bank account.
Inherits from Account
Argument:
account_holder (str): account holder's name.
rate (float): interest rate
Attributes:
holder (str): account holder's name.
balance (number): account balance in dollars.
interest_rate (float): interest rate
"""
def __init__(self, account_holder, rate):
super.__init__(account_holder)
self.interest_rate = rate
class PremiumSavingsAccount(PremiumAccount, SavingsAccount):
"""
Represent a premium interest bearing bank account
with a withdrawal fee.
Inherits from PremiumAccount, SavingsAccount
Argument:
account_holder (str): account holder's name.
rate (float): interest rate
Attributes:
holder (str): account holder's name.
balance (number): account balance in dollars.
interest_rate (float): interest rate
"""
| true |
bc1c2dfcd62df7d8a482c6ce471703697baa9525 | Python | LeiLikun/WebsiteBlocker | /blockWebsite.py | UTF-8 | 810 | 2.53125 | 3 | [] | no_license | import os
import re
import urllib2
def block(website):
f = os.popen('ipconfig /displaydns')
lines = filter(lambda x:x.count(website)>0,f.readlines())
lines = list(set(map(parse, lines)))
lines = map(lambda x:' 127.0.0.1 ' + x + '\n',lines)
with open('C:\Windows\System32\drivers\etc\hosts','a') as hosts:
hosts.writelines(lines)
os.system('ipconfig /flushdns')
print lines
def unblock(website):
with open('C:\Windows\System32\drivers\etc\hosts','w+') as hosts:
lines=filter(lambda x:x.count(website)==0,hosts.readlines())
hosts.seek(0)
print lines
hosts.writelines(lines)
def parse(line):
res = re.search(r'[a-z]+\.',line)
if res:
return res.string[res.start():-1]
if __name__=='__main__':
block('zhihu')
| true |
3f29ab1d4cab8bb7201e36c22e0f90caf1cb74f8 | Python | nickthequik/thesis | /experiment.py | UTF-8 | 1,647 | 3.015625 | 3 | [] | no_license |
import sys
import time
from file_utils import get_exp_cfg, make_data_dir
from env_utils import init_env
from plot_utils import plot_episodes_data, plot_loss_data
from agents import init_agent
from training import train_agent
from ep_utils import store_episodes_data, store_episodes_stats, get_episodes_stats
def main():
# get directory with argv
exp_dir = sys.argv[1]
# read config file
config = get_exp_cfg(exp_dir)
config['train'] = True
print('Experiment Configuration:')
print(config)
# initilize environment
env = init_env(config)
if not env: sys.exit()
# repeat experiment for iter iterations
iter = config['iterations']
for i in range(iter):
# create directory to store results
data_dir = make_data_dir(exp_dir, i+1)
# initilize agent
agent = init_agent(config, env)
if not agent: sys.exit()
# keep track of start time
start_time = time.time()
# start training agent
episodes_data, loss = train_agent(agent, env, config)
elapsed_time = time.time() - start_time
print("Elapsed time: {:g}".format(elapsed_time))
# save episodes data and stats
store_episodes_data(data_dir, episodes_data, loss=loss)
episodes_stats = get_episodes_stats(config, episodes_data, elapsed_time)
store_episodes_stats(data_dir, episodes_data, episodes_stats)
plot_episodes_data(data_dir, episodes_data, episodes_stats, config)
plot_loss_data(data_dir, loss, config)
agent.save(data_dir)
env.close()
if __name__ == '__main__':
main()
| true |
8f46efb740898fc7002f07b5bbfae01d2f6b65f5 | Python | duckheada/neural_package | /nn/noise.py | UTF-8 | 1,755 | 2.65625 | 3 | [] | no_license | import numpy as np
import theano, theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.ifelse import ifelse
from .module import Module
class Dropout(Module):
def __init__(self, drop_prob, **kwargs):
super(Dropout, self).__init__(**kwargs)
assert drop_prob >= 0 and drop_prob < 1, 'dropout rate should be in [0, 1)'
self.drop_prob = drop_prob
self.trng = RandomStreams(np.random.randint(1, 2147462579))
def forward(self, input):
# Using theano constant to prevent upcasting
retain_prob = T.constant(1.) - self.drop_prob
# Random binomial mask
mask = self.trng.binomial(input.shape, p=retain_prob, dtype=input.dtype)
# Rescale output during training so that no rescale needed during evaluation
dropped = input * mask / retain_prob
# Set output using ifelse
self.output = ifelse(self.train * self.drop_prob, dropped, input)
return self.output
def infer(self, input):
return input
class ReparamDiagGaussian(Module):
def __init__(self, **kwargs):
super(ReparamDiagGaussian, self).__init__(**kwargs)
self.trng = RandomStreams(np.random.randint(1, 2147462579))
def forward(self, input):
# The input must be a tuple
assert isinstance(input, tuple), 'ReparamDiagGaussian expects a tuple input'
# Unpack input into mu and logvar
mu, logvar = input
# Sample noise from standard Gaussian
epsilon = self.trng.normal(size=mu.shape, avg=0.0, std=1.0, dtype=mu.dtype)
# Reparameterization
output = epsilon * T.exp(logvar * 0.5) + mu
return output
# TODO: Gaussian noise
| true |
6a910c760c0af5ebf209b190482fa5ad84f13da7 | Python | magnusoy/Python-Grunnleggende | /Kapittel-11/oppgaver_1.py | UTF-8 | 1,641 | 3.890625 | 4 | [] | no_license | """
Oppgave 1
Skriv en lambda som aksepterer et tall og høyer det opp i 3
lambda kan hete cube
"""
"""
Oppgave 2
Lag en funksjon decrement_list som tar inn en liste med tall som parameter.
Den skal returnere en kopi av listen hvor alle verdiene er i dekrementert med 1.
Eks:
decrement_list([1, 2, 3]) -> [0, 1, 2]
decrement_list([20, 14, 11]) -> [19, 13, 10]
Tips: Bruk map()
"""
"""
Oppgave 3
Lag en funksjon remove_negatives som tar inn en liste med tall og returnerer en kopi av listen
hvor alle de negative tallene er fjernet.
Eks:
remove_negatives([-1, 3, 4, -99]) -> [3, 4]
remove_negatives([-2, 0, 1, 2, 3, 5, 11, -1]) -> [0, 1, 2, 3, 5, 11]
remove_negatives([50, 60, 80]) -> [50, 60, 80]
Tips: Bruk filter()
"""
"""
Oppgave 4
Lag en funksjon is_all_strings som tar inn en iterable og
returnerer True hvis den iterablen inneholder
bare strings. Om ikke skal den returere False
Eks:
is_all_strings(["a", "b", "c"]) -> True
is_all_strings([1, "a", "b", "c"]) -> False
is_all_strings(["heisann", "farvell"]) -> True
Tips: Bruk all()
"""
"""
Oppgave 5
lag en funksjon extremes som tar inn en iterable.
Den skal returere en tuple som inneholder
den laveste og høyeste verdien fra elementene.
Eks:
extremes([1, 2, 3, 4, 5, 6]) -> (1, 6)
extremes([75, 234, 54, 76, 1, -4]) -> (-4, 234)
extremes("norge") -> ("e", "r")
Tips: Bruk min() og max()
"""
"""
Oppgave 6
Lag en funksjon max_magnitude som tar inn en liste med tall.
Den skal returnere tallet som er lengt unna 0 (magnitude).
Eks:
max_magnitude([310, 33, -190]) -> 310
max_magnitude([464, -234, -1990]) -> 1990
max_magnitude([-310, -33, -190]) -> 310
"""
| true |
eedd99fca629b36521c615ed196d1fddd2830a53 | Python | andiegoode12/Artificial-Intelligence | /Knapsack Problem/KnapsackDFS.py | UTF-8 | 2,406 | 3.84375 | 4 | [] | no_license | """
Andie Goode
Knapsack DFS
"""
import math
import itertools
from collections import deque
def DFS(items, weight, values, capacity):
#starting stack is empty
stack = deque(['_'])
#list of visited
visited = []
#popped elements
popped = [0]
W = 0
V = 0
w = 0
v = 0
solution = []
#remove blank as we are starting to take items
stack.popleft()
#put items in stack
for i in range(0, len(items)):
stack.append([items[i]])
#while the stack is not empty
while len(stack) != 0:
#set popped to the item popped off the stack
popped[0] =stack.popleft()
for i in range(0, len(popped[0])):
w += weight[popped[0][i]-1]
v += values[popped[0][i]-1]
if (w <= capacity) and (v > V):
W = w
V = v
solution = popped[0]
w = 0
v = 0
#add item popped off to stack to visited
visited.append(popped[-1][:])
#loop through items not yet picked to make combinations
for i in range(len(items)-1,popped[-1][-1]-1,-1):
#add next item to knapsack
popped[-1].append(items[i])
stack.appendleft(popped[-1][:])
#remove last item from knapsack
popped[-1].pop()
print visited
print 'total weight:',W
print 'total value:',V
print 'solution:',solution
#def knapsack(visited, capacity, values):
def main():
#Read info from file and extract integers
f = open("knapsack.txt","r")
#read first line
capacity = f.readline()
capacity = capacity[9:len(capacity)].strip("\n") #slice out leading nonintegers
capacity = int(capacity) #set capacity to first int
#read second line
tmpweight = f.readline()
tmpweight = tmpweight[8:len(tmpweight)].strip("\n") #slice out leading nonintegers
weight = tmpweight.split(",") #split at commas
weight = list(map(int, weight)) #map int weights to list
#read last line
tmpvals = f.readline()
tmpvals = tmpvals[7:len(tmpvals)].strip("\n") #slice out leading nonintegers
values = tmpvals.split(",") #split at commas
values = list(map(int, values)) #map int values to list
#close file
f.close()
items = []
for i in range(1, len(values)+1):
items.append(i)
DFS(items, weight, values, capacity)
#knapsack(visited, weight, values)
main()
| true |
5aba2ea5b9045c316324f3c6c990346edef8b709 | Python | mwendar/test | /grade_calculator.py | UTF-8 | 1,361 | 3.75 | 4 | [] | no_license | def grade(scores):
average = sum(scores) /3
if average >= 90:
return 'A'
elif average >= 80:
return 'B'
elif average >= 70:
return 'C'
elif average >= 60:
return 'D'
else:
return 'E'
def getHighest(data):
highest = 0
for i in range(len(data)):
if sum(data[i][1:])>sum(data[highest][1:]):
highest = i
return highest
def summary(data):
for record in data:
print(record[0], grade(record[1:]))
print('==Summary==')
print('Number of students:',len(data))
highest = getHighest(data)
print('The student with highest percent is',data[highest][0])
percent = sum(data[highest][1:])/5
print('The percent is {:.2f}%'.format(percent))
As =0
total= 0
for record in data:
if(grade(record[1:]))=='A':
As+=1
total += sum(record[1:])
print('Number of A students is',As,'.')
average = total/(5*len(data))
print('The average is {:.1f}'.format(average))
def main():
file = input('Enter the data file name > ')
data = []
with open(file, 'r') as infile:
for line in infile:
line = line.strip().split(',')
row = [int(score.strip()) for score in line[1:]]
row.insert(0, line[0])
data.append(row)
summary(data)
main()
| true |
0feb02214575f5f36d2d992862ed186f32be3b9d | Python | collinkatz/BotLate | /translate_test.py | UTF-8 | 1,708 | 2.78125 | 3 | [] | no_license | from Translator import Translator
from Conversation import Conversation
from Content import Content
from google.cloud import dialogflow
if __name__ == '__main__':
trans = Translator()
convo = Conversation(trans, "English")
while not convo.is_done():
prompt, hint = convo.ask()
print(prompt)
print(hint)
res = input()
print(convo.answer(res))
"""
trans = Translator()
session_client = dialogflow.SessionsClient()
session = session_client.session_path("botlate", 123456)
print(session)
text_input = dialogflow.TextInput(text="Book a table for 5", language_code="en-US")
query_input = dialogflow.QueryInput(text=text_input)
response = session_client.detect_intent(
request={"session": session, "query_input": query_input}
)
print("Detected intent: ")
print(response.query_result.intent.display_name)
print(response.query_result.fulfillment_text)
"""
"""
text = trans.translate("es", "Doctor")
print(trans.detected_lang)
print(trans.name_to_code("Spanish"))
an = "Medico"
print(trans.translate("en", an))
print(text)
#trans.speak("es-ES", text, "female")
#testing a quiz
cont = Content(trans, "Spanish")
cont.load_quizzes()
occ_quiz = cont.get_quiz(1)
print(occ_quiz.ask())
ans = input("Answer the question: ")
right, real_ans = occ_quiz.answer(trans, ans)
print(right)
print(real_ans)
print(occ_quiz.ask())
ans = input("Answer the question: ")
right, real_ans = occ_quiz.answer(trans, ans)
print(right)
print(real_ans)
print("Res: ")
print(occ_quiz.percent())
occ_quiz.reset()
"""
| true |
c04284cedf590c1fe8d9eaf84a9f4949b86183fc | Python | sirikata/sirikata | /scripts/img/imgdiff.py | UTF-8 | 1,110 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python
# imgdiff.py - Computes error values for a pair of images or a set of
# images against a reference image.
import Image
import sys
def filter_name(orig):
filtered = ''
for x in orig:
if x.isdigit():
filtered = filtered + x
return filtered
args = sys.argv
opt_filter_name = False
if '--filter-name' in args:
args = [x for x in args if x != '--filter-name']
opt_filter_name = True
im_name_ref = args[-1]
im_name_others = args[1:-1]
im_ref = Image.open(im_name_ref)
for im_name in im_name_others:
im_other = Image.open(im_name)
assert( im_ref.size[0] == im_other.size[0] and im_ref.size[1] == im_other.size[1] )
num_wrong = 0
ref_data = im_ref.getdata()
other_data = im_other.getdata()
for i in range(0, im_ref.size[0]*im_ref.size[1]):
if ref_data[i] != other_data[i]:
num_wrong += 1
if opt_filter_name:
print filter_name(im_name), float(num_wrong) / float(im_ref.size[0]*im_ref.size[1])
else:
print im_name, num_wrong, float(num_wrong) / float(im_ref.size[0]*im_ref.size[1])
| true |
1281d4e79dd37b15388d6d14add62be69258d947 | Python | ooni/pm-tools | /cycle_planner.py | UTF-8 | 5,688 | 2.5625 | 3 | [] | no_license | import os
import csv
import argparse
import json
from pprint import pprint
from github import Github
from constants import OONI_TEAMS_BY_NAME, EFFORT_MAP
g = Github(os.environ["GITHUB_TOKEN"])
total_effort = 0
efforts_by_person = {}
class IssueError(Exception):
def __init__(self, issue_title, issue_url):
self.issue_title = issue_title
self.issue_url = issue_url
def print_error(self):
print("Problem with '{}' ({})".format(self.issue_title, self.issue_url))
class MissingLabel(IssueError):
def __init__(self, label_class, issue_title, issue_url):
super().__init__(issue_title, issue_url)
self.label_class = label_class
def print_error(self):
print("{} Missing '{}' ({})".format(self.label_class, self.issue_title, self.issue_url))
class DuplicateLabel(IssueError):
def __init__(self, label_class, issue_title, issue_url):
super().__init__(issue_title, issue_url)
self.label_class = label_class
def print_error(self):
print("Duplicate {} '{}' ({})".format(self.label_class, self.issue_title, self.issue_url))
def get_effort(issue):
effort_list = list(filter(lambda x: x.name.startswith("effort/"), issue.labels))
if len(effort_list) == 0:
raise MissingLabel("Effort", issue.title, issue.html_url)
if len(effort_list) > 1:
raise DuplicateLabel("Effort", issue.title, issue.html_url)
effort = effort_list[0].name.upper().split("/")[1]
effort_num = EFFORT_MAP[effort]
return effort, effort_num
def get_priority(issue):
priority_list = list(filter(lambda x: x.name.startswith("priority/"), issue.labels))
if len(priority_list) == 0:
raise MissingLabel("Priority", issue.title, issue.html_url)
if len(priority_list) > 1:
raise DuplicateLabel("Priority", issue.title, issue.html_url)
return priority_list[0].name.split("/")[1].lower()
class MissingAssignee(IssueError):
def print_error(self):
print("Missing Assignee '{}' ({})".format(self.issue_title, self.issue_url))
def get_assignees(issue):
assignees = []
for a in issue.assignees:
assignees.append(a.login)
if len(assignees) == 0:
raise MissingAssignee(issue.title, issue.html_url)
return assignees
def issues_in_column(column_name):
for team_name, repos in OONI_TEAMS_BY_NAME.items():
project = None
for p in g.get_organization("ooni").get_projects():
if p.name == team_name:
project = p
column = None
for c in project.get_columns():
if c.name.lower() == column_name.lower():
column = c
break
for card in column.get_cards():
issue = card.get_content()
yield project, issue
def get_issues_in_column(column_name):
column = []
for project, issue in issues_in_column(column_name):
if issue is None:
continue
try:
effort, effort_num = get_effort(issue)
except IssueError as err:
err.print_error()
continue
try:
priority = get_priority(issue)
except IssueError as err:
err.print_error()
continue
all_labels = list(map(lambda x: x.name, issue.labels))
assignee_list = []
try:
assignee_list = get_assignees(issue)
except MissingAssignee as err:
err.print_error()
column.append({
"assignees": "|".join(assignee_list),
"repository": issue.repository.name,
"labels": "|".join(all_labels),
"priority": priority,
"effort": effort,
"effort_points": effort_num,
"issue_title": issue.title,
"issue_url": issue.html_url,
"project": project.name,
"column": column_name.lower()
})
return column
def write_to_csv(dst_file, column_name):
print("Fetching {}".format(column_name))
column = get_issues_in_column(column_name)
print("Writing to {}".format(dst_file))
with open(dst_file, 'a') as out_file:
field_names = [
"priority",
"effort",
"effort_points",
"issue_title",
"issue_url",
"assignees",
"repository",
"labels",
"column",
"project"
]
writer = csv.DictWriter(out_file, fieldnames=field_names)
writer.writeheader()
for row in column:
writer.writerow(row)
def print_summary(src_file):
total_effort = 0
efforts_by_person = {}
with open(src_file) as in_file:
reader = csv.DictReader(in_file)
for row in reader:
try:
effort_num = int(row["effort_points"])
except:
effort_num = 0
for assignee in row["assignees"].split("|"):
efforts_by_person[assignee] = efforts_by_person.get(assignee, 0)
efforts_by_person[assignee] += int(effort_num)
total_effort += effort_num
for name, val in efforts_by_person.items():
print("{}: {}".format(name, val))
print("Total Story Points: {}".format(total_effort))
def main():
parser = argparse.ArgumentParser(description="Cycle planner")
parser.add_argument("--output", help="Where to write the csv file to", required=True)
parser.add_argument("--column", help="Which column to retrieve", default="Cycle Backlog")
args = parser.parse_args()
write_to_csv(args.output, args.column)
print_summary(args.output)
if __name__ == "__main__":
main()
| true |
f052ba513894d21705a5000eb2a4d42fa1269a73 | Python | b73201020/codeingInterview | /Roman_to_Integer.py | UTF-8 | 1,350 | 3.34375 | 3 | [] | no_license | class Solution:
# @return an integer
def romanToInt(self, s):
if (s == None):
return None
counter = 0
currentNum = 0
lastNum = 0
charNum = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
for i in range(len(s)):
index = len(s)-1-i #start from right
currentNum = charNum.get(s[index])
if (currentNum == None):
return None
if (lastNum == 0):
lastNum = currentNum
counter += currentNum
currentNum = None
continue
if (currentNum != lastNum):
if (max(currentNum, lastNum) == lastNum):
counter -= currentNum
lastNum = currentNum
currentNum = None
continue
if (max(currentNum, lastNum) == currentNum):
counter += currentNum
lastNum = currentNum
currentNum = None
continue
if (currentNum == lastNum):
counter += currentNum
currentNum = None
continue
return counter
| true |
a8fb538b4021f552a67fb0a91698a76a14e6eb73 | Python | clefever/aoc2019 | /day02.py | UTF-8 | 1,275 | 3.21875 | 3 | [
"MIT"
] | permissive | import adventofcode
def run_program(codes, noun = None, verb = None):
"""
>>> run_program([1, 0, 0, 0, 99])
[2, 0, 0, 0, 99]
>>> run_program([2, 3, 0, 3, 99])
[2, 3, 0, 6, 99]
>>> run_program([2, 4, 4, 5, 99, 0])
[2, 4, 4, 5, 99, 9801]
>>> run_program([1, 1, 1, 4, 99, 5, 6, 0, 99])
[30, 1, 1, 4, 2, 5, 6, 0, 99]
"""
ip = 0
prog = codes.copy()
prog[1] = noun if noun != None else prog[1]
prog[2] = verb if verb != None else prog[2]
while prog[ip] != 99:
if prog[ip] == 1:
prog[prog[ip+3]] = prog[prog[ip+1]] + prog[prog[ip+2]]
elif prog[ip] == 2:
prog[prog[ip+3]] = prog[prog[ip+1]] * prog[prog[ip+2]]
ip += 4
return prog
def brute_force(codes, output):
for noun in range(100):
for verb in range(100):
if run_program(codes, noun, verb)[0] == output:
return 100 * noun + verb
return -1
def main():
puzzle_input = adventofcode.read_input(2)
codes = [int(code) for code in puzzle_input.split(',')]
adventofcode.answer(1, 4945026, run_program(codes, 12, 2)[0])
adventofcode.answer(2, 5296, brute_force(codes, 19690720))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| true |
029b3e47b3d917b5949b2892612f5c820b128d2b | Python | theromis/mlpiper | /mlcomp/parallelm/components/restful/uwsgi_cheaper_subsystem.py | UTF-8 | 1,575 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | """
For internal use only. The uwsgi cheaper sub system provides the ability to dynamically
scale the number of running workers via pluggable algorithms
Reference: https://uwsgi-docs.readthedocs.io/en/latest/Cheaper.html
"""
import math
import multiprocessing
class UwsgiCheaperSubSystem:
CPU_COUNT = multiprocessing.cpu_count()
# workers - maximum number of workers that can be spawned
WORKERS = 'workers'
# cheaper - minimum number of workers to keep at all times
CHEAPER = 'cheaper'
# cheaper-initial - number of workers to spawn at startup
CHEAPER_INITIAL = 'cheaper-initial'
# cheaper-step - how many workers should be spawned at a time
CHEAPER_STEP = 'cheaper-step'
CONF = [
{WORKERS: 2, CHEAPER: 1, CHEAPER_INITIAL: 2, CHEAPER_STEP: 1}, # CPU_COUNT: 1 ~ 3
{WORKERS: 3, CHEAPER: 2, CHEAPER_INITIAL: 2, CHEAPER_STEP: 1}, # CPU_COUNT: 4 ~ 7
{WORKERS: CPU_COUNT - 4, CHEAPER: 2, CHEAPER_INITIAL: 3, CHEAPER_STEP: 2}, # CPU_COUNT: 8 ~ 15
{WORKERS: CPU_COUNT - 4, CHEAPER: 5, CHEAPER_INITIAL: 5, CHEAPER_STEP: 3}, # CPU_COUNT: 16 ~ 23
{WORKERS: CPU_COUNT - 4, CHEAPER: 5, CHEAPER_INITIAL: 5, CHEAPER_STEP: 5} # CPU_COUNT: > 24
]
@staticmethod
def get_config():
entry = int(math.log(UwsgiCheaperSubSystem.CPU_COUNT, 2)) - 1
if entry < 0:
entry = 0
elif entry > (len(UwsgiCheaperSubSystem.CONF) - 1):
entry = len(UwsgiCheaperSubSystem.CONF) - 1
return UwsgiCheaperSubSystem.CONF[entry]
| true |
26e9be8e25b5f562b0193bcb8fb60ff677c13e1a | Python | owen94/CVI_RL | /REINFORCE.py | UTF-8 | 7,942 | 2.78125 | 3 | [] | no_license | '''
In this file, we will implement the REINFORCE algorithm: Monte-Carlo Policy Gradient with
OpenAI gym and tesnforflow.
'''
import gym
import itertools
import matplotlib
import numpy as np
import sys, random
import tensorflow as tf
import collections
import matplotlib.pyplot as plt
env = gym.make('CartPole-v0')
observation = env.reset()
env.render()
print(observation)
class REINFORCE(object):
def __init__(self, session,
policy_network,
optimizer,
input_dim,
num_actions,
discount_factor = 0.99,
regularization = 0,
):
self.sess = session
self.optimizer = optimizer
self.policy_network = policy_network # policy network take as a tf graph for computation
# openAI gym environment
self.input_dim = input_dim
self.num_actions = num_actions
# RL algorithm parameters
self.discount_factor = discount_factor
self.state_buffer = []
self.reward_buffer = []
self.action_buffer = []
self.construct_graph()
var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.sess.run(tf.variables_initializer(var_lists))
def construct_graph(self):
with tf.name_scope('model_input'):
self.states = tf.placeholder(dtype=tf.float32,shape=(None,self.input_dim), name = 'states')
self.sample_action = tf.placeholder(dtype=tf.int32,shape=(None,),name='sample_action')
self.discount_reward = tf.placeholder(dtype=tf.float32, shape = (None,), name = 'discount_reward')
self.policy_output = self.policy_network(self.states)
self.all_act_prob = tf.nn.softmax(self.policy_output, name='act_prob')
#self.action_scores = tf.identity(input=self.policy_output)
# compute the loss and gradients, input is the: sample_action(A_t), reward(G_t), and states
with tf.name_scope('loss_gradients'):
# compute the cross entropy loss
entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits\
(labels=self.sample_action,logits=self.policy_output) # This cross entropy is equivalent to
# p(a_t|s_t) in the REINFORCE algorithm.
self.loss = tf.reduce_mean(entropy_loss * self.discount_reward)
# train the policy network
with tf.name_scope('train_network'):
#self.train_op = self.optimizer.apply_gradients(self.gradients)
self.train_op = self.optimizer.minimize(self.loss)
def get_action(self,states):
# Here we first explore the action space with \eplison greedy strategy.
# use the epsilon-greedy function
# if random.random() < self.exploration:
# return random.randint(0, self.num_actions-1)
# else:
action_scores = self.sess.run(self.all_act_prob, {self.states: states})
action = np.random.choice(range(action_scores.shape[1]), p=action_scores.ravel())
return action
def get_update(self):
# In the function, we update the police network for an episode
# computing G_t which is the expected future reward that would be used for
r = 0
T = len(self.reward_buffer)
discounted_rewards = np.zeros(T)
for i in reversed(range(T)):
r = self.reward_buffer[i] + self.discount_factor * r
discounted_rewards[i] = r
# discounted_rewards -= np.mean(discounted_rewards)
# discounted_rewards /= np.std(discounted_rewards)
for t in range(T-1):
states = self.state_buffer[t][np.newaxis, :] # why need to add a newaxis here??
actions = np.array([self.action_buffer[t]])
rewards = np.array([discounted_rewards[t]])
#grad = [grad for grad, var in self.gradients]
self.sess.run(self.train_op, {
self.states: states,
self.discount_reward: rewards,
self.sample_action:actions
})
self.cleanUp()
def get_episode_update(self):
discount_rewards = self.get_discount_reward()
self.sess.run(self.train_op, {
self.states: np.vstack(self.state_buffer),
self.discount_reward: discount_rewards,
self.sample_action: np.array(self.action_buffer)
})
self.cleanUp()
def get_discount_reward(self):
r = 0
T = len(self.reward_buffer)
discounted_rewards = np.zeros(T)
for i in reversed(range(T)):
r = self.reward_buffer[i] + self.discount_factor * r
discounted_rewards[i] = r
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
return discounted_rewards
def cleanUp(self):
self.state_buffer = []
self.reward_buffer = []
self.action_buffer = []
def store_episode(self, state, action, reward):
self.state_buffer.append(state)
self.action_buffer.append(action)
self.reward_buffer.append(reward)
def train_Games():
env_name = 'CartPole-v0'
env = gym.make(env_name)
env.seed(1)
#env = env.unwrapped
sess = tf.Session()
optimizer = tf.train.AdamOptimizer(learning_rate=0.02)
state_dim = env.observation_space.shape[0]
num_actions = env.action_space.n
def policy_network(states):
# define policy neural network
# W1 = tf.get_variable("W1", [state_dim, 20],
# initializer=tdamf.random_normal_initializer(mean=0,stddev=0.3))
# b1 = tf.get_variable("b1", [20],
# initializer=tf.constant_initializer(0))
# h1 = tf.nn.tanh(tf.matmul(states, W1) + b1)
# W2 = tf.get_variable("W2", [20, num_actions],
# initializer=tf.random_normal_initializer(mean=0,stddev=0.3))
# b2 = tf.get_variable("b2", [num_actions],
# initializer=tf.constant_initializer(0))
# p = tf.matmul(h1, W2) + b2
layer = tf.layers.dense(
inputs=states,
units=10,
activation=tf.nn.tanh, # tanh activation
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
bias_initializer=tf.constant_initializer(0.1),
name='fc1'
)
# fc2
all_act = tf.layers.dense(
inputs=layer,
units= num_actions,
activation=None,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
bias_initializer=tf.constant_initializer(0.1),
name='fc2'
)
return all_act
pg_reinforce = REINFORCE(session=sess,optimizer=optimizer, input_dim= state_dim,num_actions=num_actions,
policy_network=policy_network)
episode_reward = []
for i in range(1000):
state = env.reset()
total_rewards = 0
#episode_length = 0
for t in range(1000):
env.render()
action = pg_reinforce.get_action(states=state[np.newaxis,:])
next_state, reward, done, _ = env.step(action)
pg_reinforce.store_episode(state=state, action=action, reward=reward)
#episode_length += 1
state = next_state
total_rewards += reward
if done:
if i%20 == 0:
print('Episode {} runs for {} timesteps with reward {}.'.format(i, t, total_rewards))
break
episode_reward += [total_rewards]
pg_reinforce.get_episode_update() # in this step, we update the policy for an around.
plt.plot(episode_reward)
plt.savefig('reward.png')
plt.show()
if __name__ == '__main__':
train_Games()
| true |
72307946f67825c3545d9baf81c96fe3879bc7fb | Python | ht5678/yzh-learn | /demo_pythond_jango/templatedemo/views.py | UTF-8 | 6,548 | 2.65625 | 3 | [] | no_license | from django.shortcuts import render,redirect
from django.template import loader,RequestContext
from django.http import HttpResponse
from templatedemo.models import BookInfo
# Create your views here.
def my_render(request,template_path,context={}):
#1.加载模板文件,获取一个模板对象
temp = loader.get_template(template_path)
#2.定义模板上下文,给模板文件传数据
context = RequestContext(request,{});
#3.模板渲染,产生一个替换后的html内容
res_html = temp.render(context);
#4.返回应答
return HttpResponse(res_html);
#http://localhost:8000/template/index
def index(request):
return my_render(request,'templatedemo/index.html');
def temp_var(request):
'''模板变量'''
my_dict = {'title':'字典键值'};
my_list=[1,2,3];
book = BookInfo.objects.get(id=1);
#定义模板上下文
context = {'my_dict':my_dict,'my_list':my_list,'book':book};
return render(request,'templatedemo/temp_var.html',context);
def temp_vars(request):
'''模板标签'''
#查找所有图书信息
books = BookInfo.objects.all();
return render(request,'templatedemo/temp_tags.html',{'books':books});
def temp_filter(request):
'''模板标签'''
#查找所有图书信息
books = BookInfo.objects.all();
return render(request,'templatedemo/temp_filter.html',{'books':books});
def temp_inherit(request):
'''模板继承'''
return render(request,'templatedemo/child.html')
def html_escape(request):
'''html转义'''
return render(request,'templatedemo/html_scape.html',{'content':'<h1>hello</h1>'})
def login(request):
'''跳转到登录页'''
return render(request,'templatedemo/login.html')
def login_check(request):
'''登录逻辑处理'''
username = request.POST.get('username');
password = request.POST.get('password');
remember = request.POST.get('remember');
#获取用户输入的验证码
vcode1 = request.POST.get('vcode');
#获取session中保存的验证码
vcode2 = request.session.get('verifycode');
print(vcode1)
print(vcode2)
#进行验证码校验
if vcode1 != vcode2:
#验证码错误
return redirect('/template/login');
#2.进行登录的校验
#实际开发,根据用户名和密码查找数据库
#模拟:
if username=='smart' and password=='123':
#用户名密码正确 , 跳转到首页
response = redirect('/template/change_pwd');
#判断是否需要记住用户名
if remember == 'on':
#设置cookie , username,过期时间为一周
response.session['username']=username;
#记住用户登录状态
#只有session中有isLogin,就认为用户已登录
request.session['isLogin'] = True;
return response;
else:
#用户名密码错误 , 跳转到登录页面
return redirect('/template/login');
#登录状态判断的装饰器
def login_required(view_func):
'''登录判断装饰器'''
def wrapper(request,*view_args,**view_kwargs):
#判断用户是否登录
if request.session.has_key('isLogin'):
#用户已登录,调用对应的视图
return view_func(request,*view_args,**view_kwargs);
else:
#用户未登录,转到登录页
return redirect('/template/login');
return wrapper;
@login_required
def change_pwd(request):
'''显示修改密码页面'''
#进行用户是否登录的限制
#if not request.session.has_key('isLogin'):
# return redirect('/template/login');
return render(request,'templatedemo/change_pwd.html')
@login_required
def change_pwd_action(request):
'''模拟修改密码处理'''
# 进行用户是否登录的限制
#if not request.session.has_key('isLogin'):
# return redirect('/template/login');
#1.获取新密码
pwd = request.POST.get('pwd');
#2.实际开发的时候,修改对应数据库的内容
#3.返回一个应答
return HttpResponse('修改密码为:%s' % pwd);
from PIL import Image, ImageDraw, ImageFont
from django.utils.six import BytesIO
def verify_code(request):
#引入随机函数模块
import random
#定义变量,用于画面的背景色、宽、高
bgcolor = (random.randrange(20, 100), random.randrange(
20, 100), 255)
width = 100
height = 25
#创建画面对象
im = Image.new('RGB', (width, height), bgcolor)
#创建画笔对象
draw = ImageDraw.Draw(im)
#调用画笔的point()函数绘制噪点
for i in range(0, 100):
xy = (random.randrange(0, width), random.randrange(0, height))
fill = (random.randrange(0, 255), 255, random.randrange(0, 255))
draw.point(xy, fill=fill)
#定义验证码的备选值
str1 = 'ABCD123EFGHIJK456LMNOPQRS789TUVWXYZ0'
#随机选取4个值作为验证码
rand_str = ''
for i in range(0, 4):
rand_str += str1[random.randrange(0, len(str1))]
#构造字体对象,ubuntu的字体路径为“/usr/share/fonts/truetype/freefont”
font = ImageFont.truetype('C://Windows//Fonts//arial.ttf', 23)
#构造字体颜色
fontcolor = (255, random.randrange(0, 255), random.randrange(0, 255))
#绘制4个字
draw.text((5, 2), rand_str[0], font=font, fill=fontcolor)
draw.text((25, 2), rand_str[1], font=font, fill=fontcolor)
draw.text((50, 2), rand_str[2], font=font, fill=fontcolor)
draw.text((75, 2), rand_str[3], font=font, fill=fontcolor)
#释放画笔
del draw
#存入session,用于做进一步验证
request.session['verifycode'] = rand_str
#内存文件操作
buf = BytesIO()
#将图片保存在内存中,文件类型为png
im.save(buf, 'png')
#将内存中的图片数据返回给客户端,MIME类型为图片png
return HttpResponse(buf.getvalue(), 'image/png')
def url_reverse(request):
'''url反向解析'''
return render(request,'templatedemo/url_reverse.html');
def show_args(request , a , b):
''''''
return HttpResponse(a+":"+b);
def show_kwargs(request , c , d):
''''''
return HttpResponse(c+":"+d);
#报错但是可以用
from django.core.urlresolvers import reverse
def test_redirect(request):
#重定向到/index
#url = reverse('template:show_args',args=(1,2))
#url = reverse('template:index')
url = reverse('template:show_kwargs', kwargs={'c':3 ,'d':4})
return redirect(url)
| true |
f8cfed74b1d60dcff5d80f2b76dc8a0daeaef540 | Python | kmjawadurrahman/bengali-to-english-translator | /translator/datasets.py | UTF-8 | 1,823 | 2.765625 | 3 | [] | no_license | import io
import os
import utils
class SUParaDataset():
def __init__(self, path_to_eng_file, path_to_beng_file, num_data_to_load):
self.path_to_eng_file = path_to_eng_file
self.path_to_beng_file = path_to_beng_file
self.num_data_to_load = num_data_to_load
def read_data(self):
eng_lines = io.open(
self.path_to_eng_file, encoding="UTF-8").read().strip().split("\n")
beng_lines = io.open(
self.path_to_beng_file, encoding="UTF-8").read().strip().split("\n")
return eng_lines, beng_lines
def make_sequence_pair(self, eng_lines, beng_lines):
seq_pairs = []
for eng_line, beng_line in zip(eng_lines[:self.num_data_to_load], beng_lines[:self.num_data_to_load]):
pair = []
for seq in [eng_line, beng_line]:
seq = utils.clean_seq(seq)
seq = utils.add_start_and_end_token_to_seq(seq)
pair.append(seq)
seq_pairs.append(pair)
return seq_pairs
def create_dataset(self):
eng_lines, beng_lines = self.read_data()
word_pairs = self.make_sequence_pair(eng_lines, beng_lines)
return zip(*word_pairs)
def load_data(self):
# creating cleaned input, output pairs
targ_lang_text, inp_lang_text = self.create_dataset()
targ_lang_tokenizer = utils.get_lang_tokenizer(targ_lang_text)
inp_lang_tokenizer = utils.get_lang_tokenizer(inp_lang_text)
target_tensor = utils.texts_to_sequences(targ_lang_text, targ_lang_tokenizer)
input_tensor = utils.texts_to_sequences(inp_lang_text, inp_lang_tokenizer)
tensor_pair = (input_tensor, target_tensor)
tokenizer_pair = (inp_lang_tokenizer, targ_lang_tokenizer)
return tensor_pair, tokenizer_pair
| true |
3a0616005153e303b04d561467f46ff0f3e113a6 | Python | flaminghakama/part-format | /layoutFormats.py | UTF-8 | 6,079 | 2.53125 | 3 | [
"MIT"
] | permissive | # layoutFormats.py
# Define the valid page formats
validPageFormats = {
'1': 'half',
'1L': 'half',
'1R': 'half',
'2': 'half',
'2L': 'half',
'2R': 'half',
'2X': 'full',
'3': 'half',
'3X': 'half',
'3XL': 'half',
'3XR': 'half',
'4': 'full',
'4X': 'full',
'5': 'half',
'5L': 'half',
'5R': 'half',
'5X': 'half',
'5XL': 'half',
'5XR': 'half',
'6': 'half',
'6L': 'half',
'6R': 'half',
'6X': 'full',
'7': 'half',
'7X': 'half',
'7XL': 'half',
'7XR': 'half',
'8': 'full',
'8X': 'full',
'9': 'half',
'9L': 'half',
'9R': 'half',
'9X': 'half',
'9XL': 'half',
'9XR': 'half',
'10': 'half',
'10L': 'half',
'10R': 'half',
'10X': 'full',
'11': 'half',
'11X': 'half',
'11XL': 'half',
'11XR': 'half',
'12': 'full',
'12X': 'full'
}
pageFormats = {
'1L': {
'full': [ None ],
'half': { 'outside':1 'inside':None }
},
'1R': {
'full': [ None ],
'half': { 'outside':None 'inside':1 }
},
'2L': {
'full': [ None ],
'half': { 'outside':1 'inside':2 }
},
'2R': {
'full': [ None ],
'half': { 'outside':2 'inside':1 }
},
'2X': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] } ],
'half': None
},
'3': {
'full': [ { 'outside':[0, 1], 'inside':[2, 3] } ],
'half': None
},
'3XL': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] } ],
'half': { 'outside':0 'inside':3 }
},
'3XR': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] } ],
'half': { 'outside':3 'inside':0 }
},
'4': {
'full': [ { 'outside':[4, 1], 'inside':[2, 3] } ],
'half': None
},
'4X': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] } ],
'half': None
},
'5L': {
'full': [ { 'outside':[0, 1], 'inside':[2, 5] } ],
'half': { 'outside':3 'inside':4 }
},
'5R': {
'full': [ { 'outside':[0, 1], 'inside':[2, 5] } ],
'half': { 'outside':4 'inside':3 }
},
'5XL': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] } ],
'half': { 'outside':0 'inside':5 }
},
'5XR': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] } ],
'half': { 'outside':5 'inside':0 }
},
'6L': {
'full': [ { 'outside':[6, 1], 'inside':[2, 5] } ],
'half': { 'outside':3 'inside':4 }
},
'6R': {
'full': [ { 'outside':[6, 1], 'inside':[2, 5] } ],
'half': { 'outside':4 'inside':3 }
},
'6X': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] } ],
'half': None
},
'7': {
'full': [ { 'outside':[0, 1], 'inside':[2, 7] },
{ 'outside':[6, 3], 'inside':[4, 5] } ],
'half': None
},
'7XL': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] } ],
'half': { 'outside':0 'inside':7 }
},
'7XR': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] } ],
'half': { 'outside':7 'inside':0 }
},
'8': {
'full': [ { 'outside':[8, 1], 'inside':[2, 7] },
{ 'outside':[6, 3], 'inside':[4, 5] } ],
'half': None
},
'8X': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] },
{ 'outside':[0, 0], 'inside':[7, 8] } ],
'half': None
},
'9L': {
'full': [ { 'outside':[0, 1], 'inside':[2, 9] },
{ 'outside':[8, 3], 'inside':[4, 7] } ],
'half': { 'outside':5 'inside':6 }
},
'9R': {
'full': [ { 'outside':[0, 1], 'inside':[2, 9] },
{ 'outside':[8, 3], 'inside':[4, 7] } ],
'half': { 'outside':6 'inside':5 }
},
'9XL': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] },
{ 'outside':[0, 0], 'inside':[7, 8] } ],
'half': { 'outside':0 'inside':9 }
},
'9XR': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] },
{ 'outside':[0, 0], 'inside':[7, 8] } ],
'half': { 'outside':9 'inside':0 }
},
'10L': {
'full': [ { 'outside':[10, 1], 'inside':[2, 9] },
{ 'outside':[8, 3], 'inside':[4, 7] } ],
'half': { 'outside':5 'inside':6 }
},
'10R': {
'full': [ { 'outside':[10, 1], 'inside':[2, 9] },
{ 'outside':[8, 3], 'inside':[4, 7] } ],
'half': { 'outside':6 'inside':5 }
},
'10X': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] },
{ 'outside':[0, 0], 'inside':[7, 8] },
{ 'outside':[0, 0], 'inside':[9, 10] } ],
'half': None
},
'11': {
'full': [ { 'outside':[0, 1], 'inside':[2, 11] },
{ 'outside':[10, 3], 'inside':[4, 9] },
{ 'outside':[8, 5], 'inside':[6, 7] } ],
'half': None
},
'11XL': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] },
{ 'outside':[0, 0], 'inside':[7, 8] },
{ 'outside':[0, 0], 'inside':[9, 10] } ],
'half': { 'outside':0 'inside':11 }
},
'9XR': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] },
{ 'outside':[0, 0], 'inside':[7, 8] },
{ 'outside':[0, 0], 'inside':[9, 10] } ],
'half': { 'outside':11 'inside':0 }
},
'12': {
'full': [ { 'outside':[12, 1], 'inside':[2, 11] },
{ 'outside':[10, 3], 'inside':[4, 9] },
{ 'outside':[8, 5], 'inside':[6, 7] } ],
'half': None
},
'12X': {
'full': [ { 'outside':[0, 0], 'inside':[1, 2] },
{ 'outside':[0, 0], 'inside':[3, 4] },
{ 'outside':[0, 0], 'inside':[5, 6] },
{ 'outside':[0, 0], 'inside':[7, 8] },
{ 'outside':[0, 0], 'inside':[9, 10] },
{ 'outside':[0, 0], 'inside':[11, 12] } ],
'half': None
},
}
| true |
bd2e01419b8df99070a8b70c873d301159ba35a5 | Python | Inderway/MyProjects | /shells/prepare_data.py | UTF-8 | 2,021 | 3 | 3 | [] | no_license | # prepare the data: turn the raw data into json format
# created by wei
# April 13, 2023
import json
import os
from tqdm import tqdm
data=[]
def hasData(li):
if 'source.txt' in li:
return True
else:
return False
def visit(path):
folder=os.listdir(path)
if hasData(folder):
with open(os.path.join(path, 'source.txt'), 'r', encoding='utf-8') as src:
src_text=src.readlines()
with open(os.path.join(path, 'target.txt'), 'r', encoding='utf-8') as tgt:
tgt_text=tgt.readlines()
for i in range(len(src_text)):
data.append([src_text[i].rstrip('\n'),tgt_text[i].rstrip('\n')])
else:
for dir in folder:
visit(os.path.join(path,dir))
def addData(path):
with open(os.path.join(path, 'source.txt'), 'r', encoding='utf-8') as src:
src_text=src.readlines()
with open(os.path.join(path, 'target.txt'), 'r', encoding='utf-8') as tgt:
tgt_text=tgt.readlines()
for i in range(len(src_text)):
data.append([src_text[i].rstrip('\n'),tgt_text[i].rstrip('\n')])
if __name__ =="__main__":
dir_path=r'D:\download\Classical-Modern-main\Classical-Modern-main\azh_mzh_raw'
data_path='./all.json'
# get all sentences
# with open(data_path, 'w', encoding='utf-8') as f:
# for document in tqdm(os.listdir(dir_path)):
# print(f"-----------{document}-----------")
# doc_path=os.path.join(dir_path, document)
# visit(doc_path)
# print(f'length: {len(data)}')
# json.dump(data, f, ensure_ascii=False)
# segment the dataset
dataset=json.load(open(data_path, 'r'))
with open('./train.json', 'w', encoding='utf-8') as f:
json.dump(dataset[:-4000],f,ensure_ascii=False)
with open('./valid.json', 'w', encoding='utf-8') as f:
json.dump(dataset[-4000:-2000],f,ensure_ascii=False)
with open('./test.json', 'w', encoding='utf-8') as f:
json.dump(dataset[-2000:],f,ensure_ascii=False) | true |
5ce68498eaa46422a6909a2f0e42e26fee448f7f | Python | Omkar02/geture_recon | /main_app.py | UTF-8 | 785 | 2.984375 | 3 | [] | no_license | import streamlit as st
import real_time_capture
import user_cust_pannel
class MultiApp:
def __init__(self):
self.apps = []
def add_app(self, title, func):
self.apps.append({
"title": title,
"function": func
})
def run(self):
app = st.selectbox(
'Navigation',
self.apps,
format_func=lambda app: app['title'])
app['function']()
app = MultiApp()
st.markdown("""# Gesture Recognition System.
This is an Gesture Recognition which uses CNN
to classify hand gesture to text with user customization""")
# Add all your application here
app.add_app("Gesture Reconizer", real_time_capture.app)
app.add_app("User Customization", user_cust_pannel.app)
# The main app
app.run()
| true |
4833f3c665f32f6711477b1aa794f20f65ed064a | Python | snehilk1312/Python-Progress | /python_revision/datetime_module/datetime_1.py | UTF-8 | 454 | 3.25 | 3 | [] | no_license | import datetime
import pytz
dt_utcnow = datetime.datetime.now(tz=pytz.UTC) # could use 'dt_utcnow=datetime.datetime.utcnow()' too
print(dt_utcnow)
'''
for tz in pytz.all_timezones:
print(tz)
'''
dt_indnow = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
print(dt_indnow)
print(dt_indnow.isoformat())
print(dt_indnow.strftime('%B %d, %Y'))
dt_str = 'February 13, 2020'
dt = datetime.datetime.strptime(dt_str, '%B %d, %Y')
print(dt)
| true |
91c16cb598568131195c728b375d8a01e9565a98 | Python | suchennuo/book-example | /openwx/config.py | UTF-8 | 2,274 | 3.484375 | 3 | [] | no_license | from importlib.util import module_from_spec
"""
getattr(obj, "attribute)
Get a named attribute from an object. AttributeError
eg:
class A(object):
bar=1
a = A()
getattr(a, 'bar')
http://www.cnblogs.com/pylemon/archive/2011/06/09/2076862.html
compile(source, filename, model[, flags[, dont_inherit]])
source -- 字符串或者AST(Abstract Syntax Trees)对象。。
filename -- 代码文件名称,如果不是从文件读取代码则传递一些可辨认的值。
mode -- 指定编译代码的种类。可以指定为 exec, eval, single。
flags -- 变量作用域,局部命名空间,如果被提供,可以是任何映射对象。。
flags和dont_inherit是用来控制编译源码时的标志
eg:
str = "for i in range(0, 10): print(i)"
c = compile(str, '', 'exec')
exec(c)
exec 执行存储在字符串或文件中的 python 语句
1. string
2. file obj
3. 代码对象
4. tuple
exec 等同于 if, while 等
exec(expr, globals, locals) 等效于 exec expr in globals, locals
http://www.mojidong.com/python/2013/05/10/python-exec-eval/
使用exec的时候应该总是记得,详细制定其执行的作用域
eg:
m_dic={'a':8, 'b':9}
exec("print(a, b)", m_dic)
"""
class ConfigAttribute(object):
"""
让一个属性指向一个配置
"""
def __init__(self, name):
self.__name__ = name
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""
在一个 python 文件中读取配置
"""
def from_pyfile(self, filename):
# Return a new empty module object called name.
d = module_from_spec('config')
d.__file__ = filename
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
self.from_object(d)
return True
def from_object(self, obj):
# dir 返回参数的属性,方法列表; 没有参数则返回当前域中的属性,方法列表
for key in dir(obj):
if key.isupper(): # 大写检查
self[key] = getattr(obj, key)
# obj 是上一步 new 的 module
| true |
6fb63dbededa86772d09fc9371821dcb9eefa63f | Python | Bshel419/Spatial-DS-Shelton | /Assignments/Assignments/Program_5/Query2.py | UTF-8 | 3,975 | 2.5625 | 3 | [] | no_license | from mongo_helper import *
from map_helper import *
import pprint as pp
import sys
import pygame
DIRPATH = os.path.dirname(os.path.realpath(__file__))
#display stuff
background_colour = (255,255,255)
black = (0,0,0)
(width, height) = (1024,512)
color_list = {'volcanos':(255,0,0),'earthquakes':(0,0,255),'meteorites':(0,255,0)}
#pygame stuff
pygame.init()
bg = pygame.image.load(DIRPATH+'/draw_world_map/images/1024x512.png')
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('query2: Nearest Neighbor')
screen.fill(background_colour)
pygame.display.flip()
#initialize variables
feature = None
field = None
field_value = None
min_max = None
max_results = None
radius = None
lat,lon = (None,None)
#if you used the command line for variable input
if(len(sys.argv)==1):
radius = 200
max_results = 100
#radius and max results
elif(len(sys.argv)>1 and (len(sys.argv))<3):
radius = float(sys.argv[1])
max_results = 100
#everything is passed
elif(len(sys.argv)>3):
feature = sys.argv[1]
field = sys.argv[2]
field_value = float(sys.argv[3])
min_max = sys.argv[4]
max_results = int(sys.argv[5])
radius = float(sys.argv[6])
#if a lat,lon is passed
if(len(sys.argv) > 7):
lat,lon = eval(sys.argv[7])
#running loop stuff
x_y_coords = None
result_list = []
res = []
feature_list = ['volcanos','earthquakes','meteorites']
#lists to hold x,y conversions along with lat lon conversions
allx = []
ally = []
points = []
extremes = {}
adj = {}
picked_pt = False
if(len(sys.argv))>7:
picked_pt = True
converted_to_lat_lon = False
find_feature = True
drawn = False
#display background
screen.blit(bg, (0, 0))
pygame.display.flip()
mh = MongoHelper()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and (len(sys.argv) < 3) and picked_pt == False:
x_y_coords = (event.pos[0],event.pos[1])
picked_pt = True
#convert x_y_coords to lat,lon for mongodb
if x_y_coords != None and converted_to_lat_lon == False:
lon,lat = (event.pos[0],event.pos[1])
lat = y_to_lat(lat,height)
lon = x_to_lon(lon,width)
converted_to_lat_lon = True
#finds all the features
if picked_pt == True and find_feature == True:
#if no feature was chosen
if feature == None:
adj = {'volcanos':None,'earthquakes':None,'meteorites':None}
for f in feature_list:
result_list = mh.get_features_near_me(f,(lon,lat),radius)
extremes,points = find_extremes(result_list, width, height)
adj[f] = (adjust_location_coords(extremes,points,width,height))
else:
#if a feature was chosen
result_list = mh.get_features_near_me(feature,(lon,lat),radius)
adj = {feature: None}
for r in result_list:
if min_max == 'min':
if float(r['properties'][field]) > field_value:
res.append(r)
if min_max == 'max':
if r['properties.'+field] < field_value:
res.append(r)
result_list = []
#narrrows results down
for f in range(max_results):
result_list.append(res[f])
extremes,points = find_extremes(result_list, width, height)
adj[feature] = (adjust_location_coords(extremes,points,width,height))
find_feature = False
#prints all pts
if picked_pt == True and drawn == False:
for f in adj.keys():
for pt in adj[f]:
pygame.draw.circle(screen, color_list[f], pt, 2,0)
pygame.display.flip()
#saves the image
pygame.image.save(screen, DIRPATH+'/query2.png') | true |
f7ed7a59dc36e2226c3754a83e69d6a6b1c53e7d | Python | reverbdotcom/datarobot-2.25.1 | /datarobot/models/shap_matrix_job.py | UTF-8 | 1,879 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | from .job import Job
from .shap_matrix import ShapMatrix
class ShapMatrixJob(Job):
def __init__(self, data, model_id, dataset_id, **kwargs):
super(Job, self).__init__(data, **kwargs)
self._model_id = model_id
self._dataset_id = dataset_id
@classmethod
def get(cls, project_id, job_id, model_id=None, dataset_id=None):
"""
Fetches one SHAP matrix job.
Parameters
----------
project_id : str
The identifier of the project in which the job resides
job_id : str
The job identifier
model_id : str
The identifier of the model used for computing prediction explanations
dataset_id : str
The identifier of the dataset against which prediction explanations should be computed
Returns
-------
job : ShapMatrixJob
The job
Raises
------
AsyncFailureError
Querying this resource gave a status code other than 200 or 303
"""
url = cls._job_path(project_id, job_id)
data, completed_url = cls._data_and_completed_url_for_job(url)
return cls(
data, model_id=model_id, dataset_id=dataset_id, completed_resource_url=completed_url,
)
def _make_result_from_location(self, location, params=None):
return ShapMatrix.from_location(
location, model_id=self._model_id, dataset_id=self._dataset_id,
)
def refresh(self):
"""
Update this object with the latest job data from the server.
"""
data, completed_url = self._data_and_completed_url_for_job(self._this_job_path())
self.__init__(
data,
model_id=self._model_id,
dataset_id=self._dataset_id,
completed_resource_url=completed_url,
)
| true |
b6a755319ce3a71e8bda8d0dd2a0233533bd8f1e | Python | willsheffler/pymol | /misc/xyzGeom.py | UTF-8 | 41,505 | 2.984375 | 3 | [] | no_license | """
Easy 3D Linear Algebra, like xyz\* in rosetta
"""
from random import gauss, uniform
from math import pi, sqrt, sin, cos, acos, asin, atan2, degrees, radians, copysign
from itertools import chain, product, izip
import operator as op
import re
EPS = 0.000000001
SQRTEPS = sqrt(EPS)
def isint(x):
return type(x) is int
def isfloat(x):
return type(x) is float
def isnum(x):
return isint(x) or isfloat(x)
def ispoint(x):
return type(x) is Point
def ispoints(x):
return type(x) is Points
def isvec(x):
return type(x) is Vec
def isvecs(x):
return type(x) is Vecs
def isvorpt(x):
return isvec(x) or ispoint(x)
def isline(x):
return type(x) is Line
def isplane(x):
return type(x) is Plane
def ismat(x):
return type(x) is Mat
def isxform(x):
return type(x) is Xform
def islist(x):
return type(x) is list
def istuple(x):
return type(x) is tuple
def isiter(x):
return hasattr(x, "__iter__")
def sametype(x, y):
return type(x) is type(y)
def allints(*X):
return reduce(op.and_, (type(x) is int for x in X), True)
def allfloats(*X):
return reduce(op.and_, (type(x) is float for x in X), True)
def allnums(*X):
return reduce(op.and_, (isint(x) or isfloat(x) for x in X), True)
def allpoints(*X):
return reduce(op.and_, (type(x) is Point for x in X), True)
def allvecs(*X):
return reduce(op.and_, (type(x) is Vec for x in X), True)
def allvorpts(*X):
return reduce(op.and_, (isvec(x) or ispoint(x) for x in X), True)
def alllines(*X):
return reduce(op.and_, (type(x) is Line for x in X), True)
def allplanes(*X):
return reduce(op.and_, (type(x) is Plane for x in X), True)
def allmats(*X):
return reduce(op.and_, (type(x) is Mat for x in X), True)
def allxforms(*X):
return reduce(op.and_, (type(x) is Xform for x in X), True)
def alllists(*X):
return reduce(op.and_, (type(x) is list for x in X), True)
def alltuples(*X):
return reduce(op.and_, (type(x) is tuple for x in X), True)
def alliters(*X):
return reduce(op.and_, (hasattr(x, "__iter__") for x in X), True)
def anyints(*X):
return reduce(op.or_, (type(x) is int for x in X), False)
def anyfloats(*X):
return reduce(op.or_, (type(x) is float for x in X), False)
def anynums(*X):
return reduce(op.or_, (isint(x) or isfloat(x) for x in X), False)
def anypoints(*X):
return reduce(op.or_, (type(x) is Point for x in X), False)
def anyvecs(*X):
return reduce(op.or_, (type(x) is Vec for x in X), False)
def anyvorpts(*X):
return reduce(op.or_, (isvec(x) or ispoint(x) for x in X), False)
def anylines(*X):
return reduce(op.or_, (type(x) is Line for x in X), False)
def anyplanes(*X):
return reduce(op.or_, (type(x) is Plane for x in X), False)
def anymats(*X):
return reduce(op.or_, (type(x) is Mat for x in X), False)
def anyxforms(*X):
return reduce(op.or_, (type(x) is Xform for x in X), False)
def anylists(*X):
return reduce(op.or_, (type(x) is list for x in X), False)
def anytuples(*X):
return reduce(op.or_, (type(x) is tuple for x in X), False)
def anyiters(*X):
return reduce(op.or_, (hasattr(x, "__iter__") for x in X), False)
def typeerror(o, t1, t2):
raise TypeError("unsupported operand type(s) for " + o + ": '" + type(t1).__name__ +
"' and '" + type(t2).__name__ + "'")
def stripfloats(s):
"""
>>> stripfloats(" 1.10 100.00 0. 1.230000 3.34534500 (0.00000) ")
' 1.1 100 0 1.23 3.345345 (0) '
"""
s = re.sub(r"(\b\d+[.]\d*?)0+\b", r"\1", s)
s = re.sub(r"(\b\d+)[.]([ ,\s\)$])", r"\1\2", s)
return s
def sin_cos_range(x):
assert -1.001 < x < 1.001
return min(1.0, max(-1.0, x))
class Point(object):
"""a Point like xyzVector<Real> in rosetta
>>> p = Point(1,2,3)
>>> p
P(1.000000,2.000000,3.000000)
>>> print p
P(1,2,3)
>>> print ispoint(p),isvec(p)
True False
>>> 10+p
Traceback (most recent call last):
TypeError: unsupported operand type(s) for +: 'int' and 'Point'
>>> print 10*p
P(10,20,30)
>>> p.key()
(1.0, 2.0, 3.0)
elementwise mult
>>> p*p
Traceback (most recent call last):
TypeError: unsupported operand type(s) for *: 'Point' and 'Point'
>>> assert Point(1,0,-0) == Point(1,-0,0)
>>> round(Point(1,2,3).distance(Point(3,2,1)),6)
2.828427
>>> r = randpoint()
>>> assert r.distance(r) < EPS
>>> p.angle(p)
Traceback (most recent call last):
AttributeError: 'Point' object has no attribute 'angle'
>>> p.dot(p)
Traceback (most recent call last):
AttributeError: 'Point' object has no attribute 'dot'
>>> p.length()
Traceback (most recent call last):
AttributeError: 'Point' object has no attribute 'length'
"""
def __init__(self, x=0.0, y=None, z=None):
if y is None:
if isnum(x):
self.x, self.y, self.z = (float(x), ) * 3
elif isvec(x) | ispoint(x):
self.x, self.y, self.z = x.x, x.y, x.z
elif isiter(x):
i = iter(x)
self.x, self.y, self.z = i.next(), i.next(), i.next()
else:
raise TypeError
elif z is not None:
assert isnum(x) and isnum(y) and isnum(z)
self.x, self.y, self.z = float(x), float(y), float(z)
else:
raise TypeError
assert allfloats(self.x, self.y, self.z)
def distance_squared(p, q):
if not allpoints(p, q): raise TypeError("distance between vecs / ints doesn't make sense")
return reduce(op.add, ((f - g)**2 for f, g in zip(p, q)))
def distance(u, v):
return sqrt(u.distance_squared(v))
def __sub__(u, r):
if allpoints(u, r): return Vec(u.x - r.x, u.y - r.y, u.z - r.z)
return u + -r
def __rsub__(u, l):
return l + -u
def __eq__(self, other):
return (type(self) is type(other) and abs(self.x - other.x) < EPS
and abs(self.y - other.y) < EPS and abs(self.z - other.z) < EPS)
def rounded(self, sd):
return Vec(round(self.x, sd), round(self.y, sd), round(self.z, sd))
def __len__(v):
return 3
def abs(v):
return Vec(abs(v.x), abs(v.y), abs(v.z))
def __getitem__(v, i):
if i is 0: return v.x
if i is 1: return v.x
if i is 2: return v.x
raise IndexError
def tuple(v):
return (v.x, v.y, v.z)
def key(v):
return v.rounded(6).tuple()
def __iter__(p):
yield p.x
yield p.y
yield p.z
def __mul__(p, a):
if isnum(a): return Point(a * p.x, a * p.y, a * p.z)
typeerror('*', p, a)
def __rmul__(p, a):
if isnum(a): return Point(a * p.x, a * p.y, a * p.z)
typeerror('*', a, p)
def __repr__(self):
return "P(%f,%f,%f)" % (self.x, self.y, self.z)
def __str__(self):
return stripfloats(repr(self))
class Points(list):
pass
class Vec(Point):
"""a 3D direction
>>> p = Point(1,1,1)
>>> v = Vec(1,2,3)
>>> p*v
Traceback (most recent call last):
TypeError: unsupported operand type(s) for *: 'Point' and 'Vec'
>>> v*p
Traceback (most recent call last):
TypeError: unsupported operand type(s) for *: 'Vec' and 'Point'
>>> v/p
Traceback (most recent call last):
TypeError: unsupported operand type(s) for /: 'Vec' and 'Point'
>>> print p+v
P(2,3,4)
>>> print p-v
P(0,-1,-2)
>>> print v-p
Traceback (most recent call last):
TypeError: bad operand type for unary -: 'Point'
>>> assert p-(p+v) == -v and p-p+v == v
>>> v.distance_squared(v)
Traceback (most recent call last):
TypeError: distance between vecs / ints doesn't make sense
pairwise +/-/*/div on vecs ok
>>> print v+v
V(2,4,6)
>>> print v-v
V(0,0,0)
>>> print v*v
V(1,4,9)
>>> print v/v
V(1,1,1)
"""
def __init__(self, *args, **kwargs):
super(Vec, self).__init__(*args, **kwargs)
def dot(u, v):
assert isvec(v)
return u.x * v.x + u.y * v.y + u.z * v.z
def cross(u, v):
assert isvec(v)
return Vec(u.y * v.z - u.z * v.y, u.z * v.x - u.x * v.z, u.x * v.y - u.y * v.x)
__and__ = dot
__or__ = cross
def normdot(u, v):
assert isvec(v)
return min(1.0, max(-1.0, u.dot(v) / u.length() / v.length()))
def angle(u, v):
assert isvec(v)
d = u.normdot(v)
if d > 1.0 - EPS: return 0.0
if d < EPS - 1.0: return pi
return acos(d)
def angle_degrees(u, v):
return degrees(u.angle(v))
def lineangle(u, v):
assert isinstance(v, Vec)
if u.length() < SQRTEPS or v.length < SQRTEPS: return 0.0
ang = abs(acos(u.normdot(v)))
return ang if ang < pi / 2.0 else pi - ang
def linemaxangle(u, v):
return math.pi - u.lineangle(v)
def lineangle_degrees(u, v):
return degrees(lineangle(u, v))
def linemaxangle_degrees(u, v):
return degrees(linemaxangle(u, v))
def length(u):
return sqrt(u.dot(u))
def length_squared(u):
return u.dot(u)
def unit(v):
if abs(v.x) > SQRTEPS: return v / v.x
elif abs(v.y) > SQRTEPS: return v / v.y
elif abs(v.z) > SQRTEPS: return v / v.z
def normalize(u):
l = u.length()
u.x /= l
u.y /= l
u.z /= l
def normalized(u):
v = Vec(u)
v.normalize()
return v
def outer(u, v):
assert isvec(v)
return Mat(u.x * v.x, u.x * v.y, u.x * v.z, u.y * v.x, u.y * v.y, u.y * v.z, u.z * v.x,
u.z * v.y, u.z * v.z)
def __add__(v, r):
assert isvorpt(v)
if isnum(r): return type(v)(v.x + r, v.y + r, v.z + r)
elif isvorpt(r):
if isvec(v) and isvec(r): return Vec(v.x + r.x, v.y + r.y, v.z + r.z)
if isvec(v) or isvec(r): return Point(v.x + r.x, v.y + r.y, v.z + r.z)
raise TypeError
return v.__radd__(v)
def __radd__(v, r):
return v + r
def __mul__(p, a):
assert isvorpt(p)
if isnum(a): return type(p)(f * a for f in p)
if allvecs(p, a): return type(p)(f * g for f, g in izip(p, a))
if isvorpt(a): typeerror('*', p, a)
else: return a.__rmul__(p)
def __rmul__(u, a):
if anypoints(u, a): typeerror('*', a, u)
return u * a
def __neg__(u):
return Vec(-u.x, -u.y, -u.z)
def __div__(u, a):
if anypoints(u, a): typeerror('/', u, a)
if isnum(a): return Vec(u.x / a, u.y / a, u.z / a)
if isvec(a): return Vec(u.x / a.x, u.y / a.y, u.z / a.z)
return a.__rdiv__(u)
def __rdiv__(u, a):
return a / u
def __repr__(self):
return "V(%f,%f,%f)" % (self.x, self.y, self.z)
def __str__(self):
return stripfloats(repr(self))
def proj(v, u):
"""
>>> print Vec(1,1,1).proj(Vec(abs(gauss(0,10)),0,0))
V(1,0,0)
>>> print Vec(2,2,2).proj(Vec(abs(gauss(0,10)),0,0))
V(2,0,0)
>>> u,v = randvec(2)
>>> puv = v.proj(u).normalized()
>>> assert abs(abs(puv.dot(u.normalized()))-1.0) < EPS
"""
return u.dot(v) / u.dot(u) * u
def perp(v, u):
"""
>>> u = Vec(1,0,0); v = Vec(1,1,1)
>>> print v.perp(u)
V(0,1,1)
>>> u,v = randvec(2)
>>> assert abs(v.perp(u).dot(u)) < EPS
>>> assert abs( u.dot( v.perp(u) ) ) < EPS
"""
return v - v.proj(u)
Ux = Vec(1, 0, 0)
Uy = Vec(0, 1, 0)
Uz = Vec(0, 0, 1)
V0 = Vec(0, 0, 0)
Px = Point(1, 0, 0)
Py = Point(0, 1, 0)
Pz = Point(0, 0, 1)
P0 = Point(0, 0, 0)
class Vecs(list):
pass
def randpoint(n=1):
if n is 1: return Point(gauss(0, 1), gauss(0, 1), gauss(0, 1))
return Points(Point(gauss(0, 1), gauss(0, 1), gauss(0, 1)) for i in range(n))
def randvec(n=1):
if n is 1: return Vec(gauss(0, 1), gauss(0, 1), gauss(0, 1))
return Vecs(Vec(gauss(0, 1), gauss(0, 1), gauss(0, 1)) for i in range(n))
def randnorm(n=1):
"""
>>> assert abs(randnorm().length()-1.0) < 0.0000001
"""
if n is 1: return randvec().normalized()
return Vecs(randvec().normalized() for i in range(n))
def coplanar(x1, x2, x3, x4):
"""
>>> u,v,w = randpoint(3)
>>> a,b,c = (gauss(0,10) for i in range(3))
>>> assert coplanar(u, v, w, u + a*(u-v) + b*(v-w) + c*(w-u) )
>>> assert not coplanar(u, v, w, u + a*(u-v) + b*(v-w) + c*(w-u) + randvec().cross(u-v) )
"""
if allpoints(x1, x2, x3, x4): return abs((x3 - x1).dot((x2 - x1).cross(x4 - x3))) < SQRTEPS
raise NotImplementedError
def rmsd(l, m):
"""
>>> l,m = randpoint(6),randpoint(6)
>>> rmsd(l,l)
0.0
"""
assert ispoints(l)
assert ispoints(m)
rmsd = 0.0
for u, v in izip(l, m):
rmsd += u.distance_squared(v)
return sqrt(rmsd)
def dihedral(p1, p2, p3, p4=None):
"""
3 Vecs or 4 points
>>> dihedral_degrees(Px,Py,P0,Pz)
90.0
>>> dihedral_degrees(Px,P0,Py,Pz)
-90.0
>>> dihedral_degrees(Uy,Uz,Ux)
90.0
>>> dihedral_degrees(Uy,Uz,Ux)
90.0
"""
if allpoints(p1, p2, p3, p4):
a = (p2 - p1).normalized()
b = (p3 - p2).normalized()
c = (p4 - p3).normalized()
x = -a.dot(c) + a.dot(b) * b.dot(c)
y = a.dot(b.cross(c))
return atan2(y, x)
if allvecs(p1, p2, p3) and p4 is None:
return dihedral(P0, P0 + p1, P0 + p1 + p2, P0 + p1 + p2 + p3)
if anypoints(p1, p2, p3, p4):
raise NotImplementedError
def dihedral_degrees(p1, p2, p3, p4=None):
return degrees(dihedral(p1, p2, p3, p4))
def angle(p1, p2, p3=None):
if allvecs(p1, p2) and p3 is None:
return p1.angle(p2)
elif allpoints(p1, p2, p3):
a = (p2 - p1).normalized()
b = (p2 - p3).normalized()
return acos(a.dot(b))
class Line(object):
"""
from a direction and a point
>>> print Line(Ux,P0)
Line( P(0,0,0) + r * V(1,0,0) )
from two points:
>>> print Line(P0,Px)
Line( P(1,0,0) + r * V(1,0,0) )
>>> print Line(P0,P0)
Traceback (most recent call last):
assert direction.length_squared() > SQRTEPS
AssertionError
>>> assert Line(Ux,P0) == Line(Ux,Px)
>>> assert Line(Ux,P0) == Line(-Ux,Px)
>>> assert Line(Ux,P0) != Line(Ux,Py)
"""
def __init__(self, direction, position):
assert ispoint(position)
if ispoint(direction):
direction = position - direction
assert direction.length_squared() > SQRTEPS
self.d = direction.normalized()
self.p = position
def __eq__(l1, l2):
return (l1.d == l2.d or l1.d == -l2.d) and l1.d.lineangle(l1.p - l2.p) < EPS
def __str__(l):
return "Line( %s + r * %s )" % (str(l.p), str(l.d))
def __repr__(l):
return "Line(%s,%s)" % (repr(p.d), repr(p.p))
def distance(l, r):
"""
>>> l = Line(Uy,P0)
>>> l.distance(P0)
0.0
>>> round(l.distance(Px+Uz),8)
1.41421356
>>> round(Line(Ux,Px).distance(Point(3,2,1)) , 8)
2.23606798
>>> Line(Ux,P0).distance(Line(Uy,P0))
0.0
>>> l1 = Line(Ux,Point(0,1,2))
>>> l2 = Line(Ux,Point(3,2,1))
>>> round(l1.distance(l2) , 8)
1.41421356
>>> l3 = Line(Uz,99.0*Px)
>>> Line(Ux,10*Py).distance(l3)
10.0
# >>> X = randxform()
# >>> round(Line(X.R*Ux,X*Point(0,1,2)).distance(Line(X.R*Ux,X*Point(3,2,1))) , 8)
"""
if ispoint(r): return (r - l.p).perp(l.d).length()
if isvec(r): raise TypeError("Line distance to Vec not defined")
if isline(r):
a1 = l.d.normalized()
a2 = r.d.normalized()
if abs(a1.dot(a2)) > 0.9999: return (r.p - l.p).perp(a1).length()
a = a1
b = a2
c = r.p - l.p
n = abs(c.dot(a.cross(b)))
d = a.cross(b).length()
if abs(d) < EPS: return 0
return n / d
# def line_line_distance(a1,c1,a2,c2):
# """
# >>> line_line_distance(Ux,V0,Uy,V0)
# 0.0
# >>> round(line_line_distance(Ux,Vec(0,1,2),Ux,Vec(3,2,1)) , 8)
# 1.41421356
# >>> line_line_distance(Ux,10*Uy,Uz,99.0*Ux)
# 10.0
# # >>> X = randxform()
# # >>> round(line_line_distance(X.R*Ux,X*Vec(0,1,2),X.R*Ux,X*Vec(3,2,1)) , 8)
# # 1.41421356
# """
# a1 = a1.normalized()
# a2 = a2.normalized()
# if abs(a1.dot(a2)) > 0.9999: return (c1-c2).perp(a1).length()
# a = a1
# b = a2
# c = c2-c1
# n = abs(c.dot(a.cross(b)))
# d = a.cross(b).length()
# if abs(d) < EPS: return 0
# return n/d
def line_plane_intersection(l, l0, n, p0):
"""
>>> l = Ux
>>> l0 = randvec()
>>> n = Ux
>>> p0 = V0
>>> assert line_plane_intersection(l,l0,n,p0)[1] == Vec(0,l0.y,l0.z)
>>> n = randnorm()
>>> p0 = randvec().cross(n)
>>> l = randvec()
>>> l0 = p0+l*gauss(0,10)
>>> assert line_plane_intersection(l,l0,n,p0)[1] == p0
"""
n = n.normalized()
d = (p0 - l0).dot(n) / l.dot(n)
return d, d * l + l0
def slide_to_make_lines_intersect(dof, l, l0, m, m0):
"""
>>> v = randvec()
>>> assert abs(slide_to_make_lines_intersect(Ux,Uy,v,Uz,V0) + v.x ) < EPS
>>> dof,l,l0,m,m0 = randvec(5)
>>> d = slide_to_make_lines_intersect(dof,l,l0,m,m0)
>>> l0 = l0 + d*dof
>>> assert abs(Line(l,P0+l0).distance(Line(m,P0+m0))) < EPS
"""
n = l.cross(m)
p0 = m0
d, i = line_plane_intersection(dof, l0, n, p0)
assert ((i - l0).normalized().dot(dof.normalized()) - 1.0) < EPS
assert i - l0 == dof * d
return d
# def slide_to_make_lines_intersect(dof,l,l0,m,m0):
# """
# >>> v = randvec()
# >>> assert abs(slide_to_make_lines_intersect(Ux,Uy,v,Uz,V0) + v.x ) < EPS
# >>> dof,l,l0,m,m0 = randvec(5)
# >>> d = slide_to_make_lines_intersect(dof,l,l0,m,m0)
# >>> l0 = l0 + d*dof
# >>> assert abs(line_line_distance(l,l0,m,m0)) < EPS
# """
# l0 = Point(l0)
# m0 = Point(m0)
# n = l.cross(m)
# d,i = Plane(n,m0).intersection(Line(dof,l0))
# assert ( (i-l0).normalized().dot(dof.normalized()) - 1.0 ) < EPS
# assert i == dof*d+l0
# return d
class Plane(object):
"""
from normal and center
>>> print Plane(Ux,P0)
Plane(norm=V(1,0,0),p0=P(0,0,0))
from 3 points:
>>> print Plane(P0,Py,Pz)
Plane(norm=V(1,0,0),p0=P(0,0,0))
from line and point
>>> print Plane( Line(Uy,Pz), P0)
Plane(norm=V(1,0,0),p0=P(0,0,0))
from line and vec
>>> print Plane( Line(Uy,P0), Uz)
Plane(norm=V(1,0,0),p0=P(0,0,0))
>>> assert Plane( Line(Uy,P0), Uz) == Plane(-Ux,P0)
>>> assert Plane( Line(Uy,P0), Uz) != Plane(-Ux,P0+Vec(0.0001) )
"""
def __init__(self, a, b=None, c=None):
if isplane(a): a, b = a.n, a.p
elif isline(a) and ispoint(b) and c is None: a, b = a.d.cross(a.p - b), b
elif isline(a) and isvec(b) and c is None: a, b = a.d.cross(b), a.p
if allpoints(a, b, c): a, b = (a - b).cross(a - c), a
assert isvec(a) and ispoint(b)
assert a.length_squared() > SQRTEPS
self.n = a.normalized()
self.p = b
def __eq__(p1, p2):
return (p1.n == p2.n or p1.n == -p2.n) and abs(p1.n.dot(p1.p - p2.p)) < EPS
def __str__(p):
return "Plane(norm=%s,p0=%s)" % (str(p.n), str(p.p))
def __repr__(p):
return "Plane(%s,%s)" % (repr(p.n), repr(p.p))
def intersection(p, l):
"""
>>> l = Ux
>>> l0 = randpoint()
>>> n = Ux
>>> p0 = P0
>>> assert Plane(n,p0).intersection(Line(l,l0))[1] == Point(0,l0.y,l0.z)
>>> n = randnorm()
>>> p0 = P0 + randvec().cross(n)
>>> l = randvec()
>>> l0 = p0+l*gauss(0,10)
>>> assert Plane(n,p0).intersection(Line(l,l0))[1] == p0
"""
n = p.n.normalized()
d = (p.p - l.p).dot(n) / l.d.dot(n)
return d, d * l.d + l.p
# class Mat(object):
# """docstring for Mat
# >>> m = Mat(2,0,0,0,1,0,0,0,1)
# >>> print m
# Mat[ (2.000000,0.000000,0.000000), (0.000000,1.000000,0.000000), (0.000000,0.000000,1.000000) ]
# >>> print m*m
# Mat[ (4.000000,0.000000,0.000000), (0.000000,1.000000,0.000000), (0.000000,0.000000,1.000000) ]
# >>> print Mat(*range(1,10)) * Mat(*range(10,19))
# Mat[ (84.000000,90.000000,96.000000), (201.000000,216.000000,231.000000), (318.000000,342.000000,366.000000) ]
# >>> assert Mat(0.0,1.0,2.0,3,4,5,6,7,8) == Mat(-0,1,2,3,4,5.0,6.0,7.0,8.0)
# >>> print Mat(100,2,3,4,5,6,7,8,9).det()
# -297.0
# >>> m = Mat(100,2,3,4,5,6,7,8,9)
# >>> assert m * ~m == Imat
# """
# def __init__(self, xx=None, xy=None, xz=None, yx=None, yy=None, yz=None, zx=None, zy=None, zz=None):
# super(Mat, self).__init__()
# if xx is None: # identity default
# self.xx, self.xy, self.xz = 1.0,0.0,0.0
# self.yx, self.yy, self.yz = 0.0,1.0,0.0
# self.zx, self.zy, self.zz = 0.0,0.0,1.0
# elif xy is None and ismat(xx):
# self.xx, self.xy, self.xz = xx.xx, xx.xy, xx.xz
# self.yx, self.yy, self.yz = xx.yx, xx.yy, xx.yz
# self.zx, self.zy, self.zz = xx.zx, xx.zy, xx.zz
# elif yx is None and isvec(xx) and isvec(xy) and isvec(xz):
# self.xx, self.xy, self.xz = xx.x, xy.x, xz.x
# self.yx, self.yy, self.yz = xx.y, xy.y, xz.y
# self.zx, self.zy, self.zz = xx.z, xy.z, xz.z
# elif isnum(xx):
# self.xx, self.xy, self.xz = float(xx), float(xy), float(xz)
# self.yx, self.yy, self.yz = float(yx), float(yy), float(yz)
# self.zx, self.zy, self.zz = float(zx), float(zy), float(zz)
# else:
# assert not isnum(xx)
# assert not ismat(xx)
# assert not isvec(xx)
# raise TypeError
# assert isfloat(self.xx) and isfloat(self.xy) and isfloat(self.xz)
# assert isfloat(self.yx) and isfloat(self.yy) and isfloat(self.yz)
# assert isfloat(self.zx) and isfloat(self.zy) and isfloat(self.zz)
# def row(m,i):
# assert isint(i)
# if i is 0: return Vec(m.xx,m.xy,m.xz)
# elif i is 1: return Vec(m.yx,m.yy,m.yz)
# elif i is 2: return Vec(m.zx,m.zy,m.zz)
# else: assert 0 <= i and i <= 2
# def col(m,i):
# assert isint(i)
# if i is 0: return Vec(m.xx,m.yx,m.zx)
# elif i is 1: return Vec(m.xy,m.yy,m.zy)
# elif i is 2: return Vec(m.xz,m.yz,m.zz)
# else: assert 0 <= i and i <= 2
# def rowx(m): return m.row(0)
# def rowy(m): return m.row(1)
# def rowz(m): return m.row(2)
# def colx(m): return m.col(0)
# def coly(m): return m.col(1)
# def colz(m): return m.col(2)
# def __invert__(m): return Mat( m.zz*m.yy-m.zy*m.yz , -(m.zz*m.xy-m.zy*m.xz) , m.yz*m.xy-m.yy*m.xz ,
# -(m.zz*m.yx-m.zx*m.yz) , m.zz*m.xx-m.zx*m.xz , -(m.yz*m.xx-m.yx*m.xz) ,
# m.zy*m.yx-m.zx*m.yy , -(m.zy*m.xx-m.zx*m.xy) , m.yy*m.xx-m.yx*m.xy ) / m.det()
# def __mul__(m,r):
# if isnum(r): return Mat( r*m.xx, r*m.xy, r*m.xz, r*m.yx, r*m.yy, r*m.yz, r*m.zx, r*m.zy, r*m.zz )
# elif isvec(r): return Vec( m.rowx()*r, m.rowy()*r, m.rowz()*r )
# elif ismat(r): return Mat( m.rowx()&r.colx(), m.rowx()&r.coly(), m.rowx()&r.colz(),
# m.rowy()&r.colx(), m.rowy()&r.coly(), m.rowy()&r.colz(),
# m.rowz()&r.colx(), m.rowz()&r.coly(), m.rowz()&r.colz() )
# else: return r.__rmul__(m)
# def __rmul__(m,v):
# if isnum(v): return m*v
# elif isvec(v): return Vec( m.colx()*v, m.coly()*v, m.colz()*v )
# def __div__(m,v): return m*(1/v)
# def __add__(m,v):
# if isnum(v): return Mat(v +m.xx,v +m.xy,v +m.xz,v +m.yx,v +m.yy,v +m.yz,v +m.zx,v +m.zy,v +m.zz)
# elif ismat(v): return Mat(v.xx+m.xx,v.xy+m.xy,v.xz+m.xz,v.yx+m.yx,v.yy+m.yy,v.yz+m.yz,v.zx+m.zx,v.zy+m.zy,v.zz+m.zz)
# else: return v.__radd__(m)
# def __sub__(m,v): return m + -v
# def __neg__(m): return m * -1
# def __str__(m): return "Mat[ %s, %s, %s ]" % (str(m.rowx()),str(m.rowy()),str(m.rowz()))
# def transpose(m):
# m = Mat( m.xx, m.yx, m.zx, m.xy, m.yy, m.zy, m.xz, m.yz, m.zz )
# def transposed(m): return Mat( m.xx, m.yx, m.zx, m.xy, m.yy, m.zy, m.xz, m.yz, m.zz )
# def det(m):
# # a11 (a33 a22- a32 a23)- a21 ( a33 a12- a32 a13)+ a31( a23 a12- a22 a13)
# return m.xx*(m.zz*m.yy-m.zy*m.yz)-m.yx*(m.zz*m.xy-m.zy*m.xz)+m.zx*(m.yz*m.xy-m.yy*m.xz)
# def trace(m):
# return m.xx+m.yy+m.zz
# def add_diagonal(m,v):
# return Mat( v.x+m.xx, m.xy, m.xz, m.yx, v.y+m.yy, m.yz, m.zx, m.zy, v.z+m.zz )
# def is_rotation(m):
# return (m.colx().isnormal() and m.coly().isnormal() and m.colz().isnormal() and
# m.rowx().isnormal() and m.rowy().isnormal() and m.rowz().isnormal() )
# def __eq__(self,other): return ( abs(self.xx-other.xx) < EPS and
# abs(self.xy-other.xy) < EPS and
# abs(self.xz-other.xz) < EPS and
# abs(self.yx-other.yx) < EPS and
# abs(self.yy-other.yy) < EPS and
# abs(self.yz-other.yz) < EPS and
# abs(self.zx-other.zx) < EPS and
# abs(self.zy-other.zy) < EPS and
# abs(self.zz-other.zz) < EPS )
# def rotation_axis(R):
# """
# >>> axis ,ang = randnorm(),uniform(-pi,pi)
# >>> axis2,ang2 = rotation_matrix(axis,ang).rotation_axis()
# >>> assert abs( abs(ang) - abs(ang2) ) < EPS
# >>> assert axis == axis2 * copysign(1,ang*ang2)
# """
# cos_theta = sin_cos_range((R.trace()-1.0)/2.0);
# if cos_theta > -1.0+EPS and cos_theta < 1.0-EPS:
# x = ( 1.0 if R.zy > R.yz else -1.0 ) * sqrt( max(0.0, ( R.xx - cos_theta ) / ( 1.0 - cos_theta ) ) )
# y = ( 1.0 if R.xz > R.zx else -1.0 ) * sqrt( max(0.0, ( R.yy - cos_theta ) / ( 1.0 - cos_theta ) ) )
# z = ( 1.0 if R.yx > R.xy else -1.0 ) * sqrt( max(0.0, ( R.zz - cos_theta ) / ( 1.0 - cos_theta ) ) )
# theta = acos( cos_theta );
# assert abs( x*x + y*y + z*z - 1 ) <= 0.01
# return Vec(x,y,z),theta
# elif cos_theta >= 1.0-EPS: return Vec(1.0,0.0,0.0),0.0
# else:
# nnT = (R+Imat)/2.0
# x,y,z = 0.0,0.0,0.0;
# if nnT.xx > EPS:
# x = sqrt( nnT.xx )
# y = nnT.yx / x
# z = nnT.zx / x
# elif nnT.yy > EPS:
# x = 0
# y = sqrt(nnT.yy)
# z = nnT.zy / y
# else:
# assert( nnT.zz > EPS );
# x = 0
# y = 0
# z = sqrt( nnT.zz )
# assert abs( x*x + y*y + z*z - 1.0 ) <= 0.01
# return Vec( x, y, z ),pi
# Imat = Mat(1,0,0,0,1,0,0,0,1)
# def projection_matrix(v):
# m = Mat( v.x * v.x, v.x * v.y, v.x * v.z, v.y * v.x, v.y * v.y, v.y * v.z, v.z * v.x, v.z * v.y, v.z * v.z )
# return m / v.dot(v)
# def rotation_matrix(axis,angle):
# n = axis.normalized()
# sin_theta = sin( angle )
# cos_theta = cos( angle )
# R = projection_matrix(n)
# R *= 1.0 - cos_theta
# R.xx += cos_theta; R.xy -= sin_theta * n.z; R.xz += sin_theta * n.y
# R.yx += sin_theta * n.z; R.yy += cos_theta; R.yz -= sin_theta * n.x
# R.zx -= sin_theta * n.y; R.zy += sin_theta * n.x; R.zz += cos_theta
# return R;
# def rotation_matrix_degrees(axis,angle):
# """ get a rotation matrix
# >>> rx180 = rotation_matrix_degrees(Vec(1,0,0),180.0)
# >>> rx90 = rotation_matrix_degrees(Vec(1,0,0),90.0)
# >>> print rx90*rx90 == rx180
# True
# >>> r = rotation_matrix_degrees(Vec(1,0,0),45.0)
# >>> print r
# Mat[ (1.000000,0.000000,0.000000), (0.000000,0.707107,-0.707107), (0.000000,0.707107,0.707107) ]
# >>> assert r*r == rx90
# >>> assert r*r*r*r == rx180
# >>> assert r*r*r*r*r*r*r*r == Imat
# >>> assert ~r == r.transposed()
# >>> ang = uniform(0,1)*360.0-180.0
# >>> v = randvec()
# >>> axs = randnorm()
# >>> while(abs(v.dot(axs))>0.9): axs = randnorm()
# >>> u = rotation_matrix_degrees(projperp(v,axs),ang)*v
# >>> assert abs(u.angle_degrees(v)-abs(ang)) < SQRTEPS
# >>> test_rotation_mat()
# test_rotation_mat PASS
# """
# return rotation_matrix(axis,radians(angle))
# def test_rotation_mat():
# import random
# for i in range(100):
# a0 = randnorm()
# t0 = uniform(-pi,pi)
# a,t = rotation_matrix(a0,t0).rotation_axis()
# if t0 < 0.01: continue
# if abs(t-pi) < EPS:
# if (abs(a.x-a0.x) < 0.001 and abs(a.y-a0.y) < 0.001 and abs(a.z-a0.z) < 0.001) or \
# (abs(a.x+a0.x) < 0.001 and abs(a.y+a0.y) < 0.001 and abs(a.z+a0.z) < 0.001):
# continue
# else:
# print a0
# print a
# return False
# if not abs(t-t0) < EPS or not (a.normalized()-a0.normalized()).length() < EPS:
# print a0.normalized(), t0
# print a.normalized() , t
# print "FAIL"
# return
# print "test_rotation_mat PASS"
# def randrot(n=1):
# if n is 1: return rotation_matrix_degrees(randvec(),uniform(0,1)*360)
# return (rotation_matrix_degrees(randvec(),uniform(0,1)*360) for i in range(n))
# class Xform(object):
# """Coordinate frame like rosetta Xform, behaves also as a rosetta Stub
# >>> x = Xform(R=Imat,t=Uz)
# >>> print x
# Xform( Mat[ (1.000000,0.000000,0.000000), (0.000000,1.000000,0.000000), (0.000000,0.000000,1.000000) ], (0.000000,0.000000,1.000000) )
# >>> assert (x*x) == Xform(R=Imat,t=2*Uz)
# >>> x = Xform(R=rotation_matrix_degrees(Vec(1,0,0),90.0),t=Vec(0,0,0))
# >>> print x
# Xform( Mat[ (1.000000,0.000000,0.000000), (0.000000,0.000000,-1.000000), (0.000000,1.000000,0.000000) ], (0.000000,0.000000,0.000000) )
# >>> assert x*x*x*x == Ixform
# >>> x.t = Ux
# >>> assert x*x*x*x == Xform(R=Imat,t=4*Ux)
# >>> x.t = Uz
# >>> print x
# Xform( Mat[ (1.000000,0.000000,0.000000), (0.000000,0.000000,-1.000000), (0.000000,1.000000,0.000000) ], (0.000000,0.000000,1.000000) )
# >>> assert x == Xform(R=rotation_matrix_degrees(Ux, 90.0),t=Vec(0, 0,1))
# >>> assert x*x == Xform(R=rotation_matrix_degrees(Ux,180.0),t=Vec(0,-1,1))
# >>> assert x*x*x == Xform(R=rotation_matrix_degrees(Ux,270.0),t=Vec(0,-1,0))
# >>> assert x*x*x*x == Xform(R=rotation_matrix_degrees(Ux, 0.0),t=Vec(0, 0,0))
# >>> assert x*x*x*x*x == Xform(R=rotation_matrix_degrees(Ux, 90.0),t=Vec(0, 0,1))
# >>> assert x*x*x*x*x*x == Xform(R=rotation_matrix_degrees(Ux,180.0),t=Vec(0,-1,1))
# >>> assert x*x*x*x*x*x*x == Xform(R=rotation_matrix_degrees(Ux,270.0),t=Vec(0,-1,0))
# >>> assert x*x*x*x*x*x*x*x == Xform(R=rotation_matrix_degrees(Ux, 0.0),t=Vec(0, 0,0))
# >>> x = Xform(rotation_matrix_degrees(Vec(1,2,3),123),Vec(5,7,9))
# >>> assert ~x * x == Ixform
# >>> assert x * ~x == Ixform
# Frames / RTs are interchangable:
# >>> fr = Xform(rotation_matrix_degrees(Vec(1,2,3), 65.64),t=Vec(3,2,1))
# >>> to = Xform(rotation_matrix_degrees(Vec(7,5,3),105.44),t=Vec(10,9,8))
# >>> x = to/fr
# >>> assert to/Ixform == to
# >>> assert Ixform/fr == ~fr
# >>> assert (to * ~fr) * fr == to
# >>> assert x * fr == to
# >>> a1 = randnorm()
# >>> b1 = randnorm()
# >>> ang = uniform(0,1)*360.0-180.0
# >>> a2 = rotation_matrix_degrees(a1.cross(randnorm()),ang) * a1
# >>> b2 = rotation_matrix_degrees(b1.cross(randnorm()),ang) * b1
# >>> assert abs(angle(a1,a2) - angle(b1,b2)) < EPS
# >>> xa = Xform().from_two_vecs(a1,a2)
# >>> xb = Xform().from_two_vecs(b1,b2)
# >>> assert xa.tolocal(a1) == xb.tolocal(b1)
# >>> assert xa.tolocal(a2) == xb.tolocal(b2)
# >>> assert ~xa*a1 == ~xb*b1
# >>> assert ~xa*a2 == ~xb*b2
# >>> assert xb/xa*a1 == b1
# >>> assert xb/xa*a2 == b2
# add/sub with Vecs:
# >>> X = randxform()
# >>> u,v = randvec(2)
# >>> assert isxform(u+X) and isxform(X+u) and isxform(u-X) and isxform(X-u)
# >>> assert X*v+u == (u+X)*v
# >>> assert X*(v+u) == (X+u)*v
# >>> assert Xform(u)*X*v == (u+X)*v
# >>> assert X*Xform(u)*v == (X+u)*v
# >>> assert X*v-u == (u-X)*v
# >>> assert X*(v-u) == (X-u)*v
# mul,div with Mats:
# >>> R = randrot()
# >>> assert isxform(R*X) and isxform(X*R)
# >>> assert R*X*u == (R*X)*u == R*(X*u)
# >>> assert X*R*u == (X*R)*u == X*(R*u)
# >>> assert Xform(R)*X*u == Xform(R)*(X*u)
# >>> assert X*Xform(R)*u == X*(Xform(R,V0)*u)
# >>> assert X/X*v == v
# mul/div Xforms:
# >>> Y = randxform()
# >>> assert isxform(X/Y) and isxform(X*Y)
# >>> assert X/Y*v == X*~Y*v
# # >>> axis,ang,cen = randnorm(),uniform(-pi,pi),randvec()
# # >>> X = rotation_around(axis,ang,cen)
# # >>> axis2,ang2,cen2 = X.rotation_center()
# # >>> assert abs( abs(ang) - abs(ang2) ) < EPS
# # >>> assert axis == axis2 * copysign(1,ang*ang2)
# # >>> print cen
# # >>> print cen2
# """
# def __init__(self, R=None, t=None):
# super(Xform, self).__init__()
# if isvec(R) and t is None: R,t = Imat,R
# self.R = R if R else Imat
# self.t = t if t else V0
# assert ismat(self.R) and isvec(self.t)
# # def rotation_center(X):
# # axis,ang = X.rotation_axis()
# # cen = -(X.R-Imat).transposed()*X.t
# # return axis,ang,cen
# def from_four_points(s,cen,a,b,c):
# s.t = cen
# e1 = (a-b).normalized()
# e3 = e1.cross(c-b).normalized()
# e2 = e1.cross(e3).normalized()
# # print "from_four_points"
# # print e1
# # print e2
# # print e3
# s.R = Mat(e1.x,e2.x,e3.x,e1.y,e2.y,e3.y,e1.z,e2.z,e3.z)
# return s
# def from_two_vecs(s,a,b):
# e1 = a.normalized()
# e2 = projperp(a,b).normalized()
# e3 = e1.cross(e2)
# return Xform( Mat(e1.x,e2.x,e3.x,e1.y,e2.y,e3.y,e1.z,e2.z,e3.z),V0)
# def tolocal(s,x): return s.R.transposed() * (x - s.t)
# def toglobal(s,x): return (s.R * x) + s.t
# def __invert__(self):
# R = ~self.R
# t = R * -self.t
# return Xform(R,t)
# def __mul__(X,o):
# if isvec(o): return X.R * o + X.t
# elif isxform(o): return Xform(X.R*o.R,X.R*(o.t) + X.t)
# elif ismat(o): return Xform(X.R*o,X.t)
# elif islist(o): return [X*x for x in o]
# elif istuple(o): return tuple([X*x for x in o])
# elif isiter(o): return (X*x for x in o)
# else: return o.__rmul__(X)
# def __rmul__(X,o):
# if ismat(o): return Xform(o*X.R,o*X.t)
# raise TypeError
# def __div__(X,o):
# if isxform(o): return X*~o
# return o.__rdiv__(X)
# def __add__(X,v):
# if isvec(v): return Xform( X.R, X.t + X.R*v )
# return v.__radd__(X)
# def __radd__(X,v):
# if isvec(v): return Xform( X.R, X.t + v )
# raise TypeError
# def __sub__(X,v):
# if isvec(v): return Xform( X.R, X.t - X.R*v )
# return v.__rsub__(X)
# def __rsub__(X,v):
# if isvec(v): return Xform( X.R, X.t - v )
# raise TypeError
# def __eq__(self,other): return self.R==other.R and self.t==other.t
# def __repr__(self): return "Xform( %s, %s )" % (str(self.R),str(self.t))
# def __eq__(X,Y):
# assert isxform(Y)
# return X.R == Y.R and X.t == Y.t
# def rotation_axis(X): return X.R.rotation_axis()
# def pretty(self):
# a,r = self.rotation_axis()
# if self.t.length() > EPS: return "Xform( axis=%s, ang=%f, dir=%s, dis=%f )"%(str(a),degrees(r),str(self.t.normalized()),self.t.length())
# else: return "Xform( axis=%s, ang=%f, dir=%s, dis=%f )"%(str(a),degrees(r),str(V0),0)
# Ixform = Xform(Imat,V0)
# def stub(cen=None, a=None, b=None, c=None): return Xform().from_four_points(cen,a,b,c)
# def randxform(n=1):
# if n is 1: return Xform(randrot(),randvec())
# return (Xform(randrot(),randvec()) for i in range(n))
# def rotation_around(axs,ang,cen):
# """
# >>>
# """
# R = rotation_matrix(axs,ang)
# return Xform(R,R*-cen+cen)
# def rotation_around_degrees(axs,ang,cen): return rotation_around(axs,radians(ang),cen)
# def test():
# test_rotation_mat()
# def alignvector(a,b):
# """
# >>> u = randvec()
# >>> v = randvec()
# >>> assert v.angle(alignvector(u,v)*u) < EPS
# """
# return rotation_around(a.normalized()+b.normalized(),pi,V0)
# def alignaroundaxis(axis,u,v):
# """
# >>> axis = randnorm()
# >>> u = randvec()
# >>> angle = uniform(-pi,pi)
# >>> v = rotation_matrix(axis,angle)*u
# >>> uprime = alignaroundaxis(axis,u,v)*u
# >>> assert v.angle(uprime) < EPS
# >>> v = randvec()
# >>> uprime = alignaroundaxis(axis,u,v)*u
# >>> assert coplanar(V0,axis,v,uprime)
# """
# return rotation_around(axis, -dihedral(u,axis,V0,v), V0 )
# def alignvectors_minangle(a1,a2,b1,b2):
# """
# exact alignment:
# >>> angdeg = uniform(-180,180)
# >>> a1 = randvec()
# >>> b1 = randnorm()*a1.length()
# >>> l2 = gauss(0,1)
# >>> a2 = rotation_matrix_degrees(a1.cross(randnorm()),angdeg) * a1 * l2
# >>> b2 = rotation_matrix_degrees(b1.cross(randnorm()),angdeg) * b1 * l2
# >>> assert abs(angle(a1,a2) - angle(b1,b2)) < EPS
# >>> Xa2b = alignvectors_minangle(a1,a2,b1,b2)
# >>> assert Xa2b.t.length() < EPS
# >>> assert (Xa2b*a1).distance(b1) < EPS
# >>> assert (Xa2b*a2).distance(b2) < EPS
# if angle(a1,a2) != angle(b1,2b), minimize deviation
# >>> a1,a2,b1,b2 = randvec(4)
# >>> Xa2b = alignvectors_minangle(a1,a2,b1,b2)
# >>> assert coplanar(b1,b2,Xa2b*a1,Xa2b*a2)
# >>> assert (b1.angle(a1)+b2.angle(a2)) > (b1.angle(Xa2b*a1)+b2.angle(Xa2b*a2))
# """
# aaxis = (a1.normalized()+a2.normalized())/2.0
# baxis = (b1.normalized()+b2.normalized())/2.0
# Xmiddle = alignvector(aaxis,baxis)
# assert (baxis).angle(Xmiddle*(aaxis)) < SQRTEPS
# Xaround = alignaroundaxis(baxis, Xmiddle*a1, b1 )#
# X = Xaround * Xmiddle
# assert (b1.angle(a1)+b2.angle(a2)) > (b1.angle(X*a1)+b2.angle(X*a2))
# return X
# # not so good if angles don't match:
# # xa = Xform().from_two_vecs(a2,a1)
# # xb = Xform().from_two_vecs(b2,b1)
# # return xb/xa
# def alignvectors(a1,a2,b1,b2): return alignvectors_minangle(a1,a2,b1,b2)
# # def alignvectors_kindamindis(a1,a2,b1,b2):
# # """
# # >>> ang = uniform(0,1)*360.0-180.0
# # >>> a1 = randvec()
# # >>> b1 = randnorm()*a1.length()
# # >>> l2 = gauss(0,1)
# # >>> a2 = rotation_matrix_degrees(a1.cross(randnorm()),ang) * a1 * l2
# # >>> b2 = rotation_matrix_degrees(b1.cross(randnorm()),ang) * b1 * l2
# # >>> assert abs(angle(a1,a2) - angle(b1,b2)) < EPS
# # >>> Xa2b = alignvectors(a1,a2,b1,b2)
# # >>> assert Xa2b.t.length() < EPS
# # >>> assert (Xa2b*a1).distance(b1) < EPS
# # >>> assert (Xa2b*a2).distance(b2) < EPS
# # >>> a1 = randvec()
# # >>> b1 = randvec()
# # >>> a2 = randvec()
# # >>> b2 = randvec()
# # >>> Xa2b = alignvectors(a1,a2,b1,b2)
# # >>> assert coplanar(b1,b2,Xa2b*a1,Xa2b*a2)
# # >>> if not (b1.distance(a1)+b2.distance(a2)) > (b1.distance(Xa2b*a1)+b2.distance(Xa2b*a2)):
# # ... print b1
# # ... print b2
# # ... print a1
# # ... print a2
# # ... print Xa2b*a1
# # ... print Xa2b*a2
# # """
# # Xmiddle = alignvector(a1+a2,b1+b2)
# # assert (b1+b2).angle(Xmiddle*(a1+a2)) < SQRTEPS
# # assert (b1+b2).angle(Xmiddle*a1+Xmiddle*a2) < SQRTEPS
# # Xaround = alignaroundaxis(b1+b2, Xmiddle*a1, b1 )#
# # return Xaround * Xmiddle
# # # xa = Xform().from_two_vecs(a2,a1)
# # # xb = Xform().from_two_vecs(b2,b1)
# # return xb/xa
# def get_test_generators1():
# x1 = rotation_around_degrees(Vec(0,0,1),180,Vec(0,0,0))
# x2 = rotation_around_degrees(Vec(1,1,1),120,Vec(1,0,0))
# return x1,x2
# def expand_xforms(G,N=3,c=Vec(1,3,10)):
# """
# >>> G = get_test_generators1()
# >>> for x in expand_xforms(G): print x*Ux
# (-1.000000,0.000000,0.000000)
# (1.000000,0.000000,0.000000)
# (1.000000,-0.000000,0.000000)
# (-1.000000,0.000000,0.000000)
# (1.000000,-2.000000,0.000000)
# (1.000000,0.000000,0.000000)
# (-1.000000,2.000000,0.000000)
# (-1.000000,0.000000,0.000000)
# (1.000000,-2.000000,0.000000)
# (1.000000,-0.000000,-2.000000)
# """
# seenit = set()
# for Xs in chain(G,*(product(G,repeat=n) for n in range(2,N+1))):
# X = Xs if isinstance(Xs,Xform) else reduce(Xform.__mul__,Xs)
# v = X*c
# key = (round(v.x,3),round(v.y,3),round(v.z,3))
# if key not in seenit:
# seenit.add(key)
# yield X
# def find_identities(G,n=6,c=Vec(1,3,10)):
# """
# >>> G = get_test_generators1()
# >>> for I in find_identities(G): print I.t
# (0.000000,0.000000,0.000000)
# (-2.000000,2.000000,2.000000)
# (2.000000,-2.000000,2.000000)
# """
# for x in expand_xforms(G,n,c):
# if (abs(x.R.xx-1.0) < 0.0000001 and
# abs(x.R.yy-1.0) < 0.0000001 and
# abs(x.R.zz-1.0) < 0.0000001 ):
# yield x
# def get_cell_bounds_orthogonal_only(G,n=6,c=Vec(1,3,10)):
# """
# very slow... need to speed up
# # >>> G = get_test_generators1()
# # >>> get_cell_bounds_orthogonal_only(G[:2],12)
# # (4.0, 4.0, 4.0)
# """
# mnx,mny,mnz = 9e9,9e9,9e9
# for i in (I.t for I in find_identities(G,n)):
# if abs(i.x) > SQRTEPS and abs(i.y) < SQRTEPS and abs(i.z) < SQRTEPS: mnx = min(mnx,abs(i.x))
# if abs(i.x) < SQRTEPS and abs(i.y) > SQRTEPS and abs(i.z) < SQRTEPS: mny = min(mny,abs(i.y))
# if abs(i.x) < SQRTEPS and abs(i.y) < SQRTEPS and abs(i.z) > SQRTEPS: mnz = min(mnz,abs(i.z))
# return round(mnx,3),round(mny,3),round(mnz,3)
if __name__ == '__main__':
import doctest
for i in range(10):
r = doctest.testmod()
print r
if r[0] is not 0: break
| true |
d35d5f545241c764fb91a2eed31938812edd33a5 | Python | proevgenii/EPAM-HW-2020- | /hw2/hw4.py | UTF-8 | 744 | 3.765625 | 4 | [] | no_license | """
Write a function that accepts another function as an argument. Then it
should return such a function, so the every call to initial one
should be cached.
def func(a, b):
return (a ** b) ** 2
cache_func = cache(func)
some = 100, 200
val_1 = cache_func(*some)
val_2 = cache_func(*some)
assert val_1 is val_2
"""
import functools
from typing import Callable
def cache(func: Callable) -> Callable:
cached_val = {}
@functools.wraps(func)
def decorator(*args, **kwargs):
decorator.called = 0
key = args + tuple(sorted(kwargs.items()))
if key not in cached_val:
cached_val[key] = func(*args, **kwargs)
decorator.called += 1
return cached_val[key]
return decorator
| true |
e6064ea9a48f667e99cde440be58b0b0abbad154 | Python | hafsaabbas/-saudidevorg | /52 day.py | UTF-8 | 274 | 3.296875 | 3 | [] | no_license | import datetime
x=datetime.datetime.now()
print(x)
import datetime
x=datetime.datetime.now()
print(x.year)
print(x.strftime("%A"))
import datetime
x=datetime.datetime(2202,5,17)
print(x)
import datetime
x=datetime.datetime(2018,6,1)
print(x.strftime("%B"))
| true |
605206875d9afea1f54adfcf6118946254de0c93 | Python | mmurch/advent2020 | /main.py | UTF-8 | 441 | 3.359375 | 3 | [] | no_license | def star_one():
print(f'star one answer: { "shrug" }')
def star_two():
print(f'star two answer: { "shrug" }')
def get_input_as_strings():
with open('input.txt', 'r') as fd:
return fd.read().splitlines()
def get_input_as_ints():
with open('input.txt', 'r') as fd:
lines = fd.read().splitlines()
return list(map(lambda l: int(l), lines))
if __name__ == '__main__':
star_one()
star_two()
| true |
b24ebb11682eed0e8d4a6c5cac1163b01ffd8932 | Python | chelsyx/photoPreprocess | /crop_resize_sketch.py | UTF-8 | 3,064 | 2.890625 | 3 | [] | no_license | import sys
import cv2
import numpy as np
import os
"""
Using OpenCV Python interface, cv2, this script execute the following task:
1, Detect face in a photo
2, Crop the photo into a square with the face in the center
3, Resize the image
4, Create a sketch from the photo
Usage:
python crop_resize_sketch.py xdim ydim photo_dir [file ...]
Note that the edgePreservingFilter, detailEnhance and pencilSketch are only available in OpenCV 3. Installation instruction is here:
http://www.learnopencv.com/install-opencv-3-on-yosemite-osx-10-10-x/
"""
def detect(path):
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
xmlpath = os.path.dirname(__file__)
cascade = cv2.CascadeClassifier(xmlpath+'/haarcascade_frontalface_alt.xml')
rects = cascade.detectMultiScale(gray, scaleFactor=1.15, \
minNeighbors=6, minSize=(20,20))
if len(rects) == 0:
print path
return [], img
points = np.empty_like(rects)
points[:] = rects
points[:, 2:] += points[:, :2]
return rects, points, img
def facecrop_max(path):
rects, points, img= detect(path)
height, width, channels = img.shape
x1,y1,x2,y2 = points[0]
final_delta = min(x1,width-x2,y1,height-y2)
x1 = x1-final_delta
x2 = x2+final_delta
y1 = y1-final_delta
y2 = y2+final_delta
cropped = img[y1:y2, x1:x2]
return cropped
def resize_img(img, outsize):
height, width = img.shape[:2]
if height > outsize[0] or width > outsize[1]:
res = cv2.resize(img, outsize, interpolation = cv2.INTER_CUBIC)
else:
res = cv2.resize(img, outsize, interpolation = cv2.INTER_AREA)
return res
def sketch_img(img):
# outimg = cv2.stylization(img, sigma_s=60, sigma_r=0.07)
outimg = cv2.edgePreservingFilter(img, flags=1, sigma_s=60, sigma_r=0.4)
outimg = cv2.detailEnhance(outimg, sigma_s=10, sigma_r=0.15)
dst_gray, dst_color = cv2.pencilSketch(outimg, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
return dst_gray
def main():
args = sys.argv[1:]
if not args:
print 'usage: xdim ydim photo_dir [file ...]'
sys.exit(1)
outsize = (int(args[0]), int(args[1]))
dir = args[2]
if len(args)>3:
filenames = args[3:]
else:
filenames = next(os.walk(dir))[2]
filenames = [f for f in filenames if not f[0] == '.']
for fname in filenames:
path = dir + fname
cropimg = facecrop_max(path)
resizeimg = resize_img(cropimg, outsize)
resdir = dir + 'resizedImg/'
if not os.path.exists(resdir):
os.mkdir(resdir)
cv2.imwrite(resdir+fname, resizeimg)
sketchimg = sketch_img(cropimg)
sketchimg = resize_img(sketchimg, outsize)
sktdir = dir + 'sketchImg/'
if not os.path.exists(sktdir):
os.mkdir(sktdir)
cv2.imwrite(sktdir+fname, sketchimg)
sys.exit(0)
if __name__ == '__main__':
main()
| true |
63c2ce47b4fff9a03418c2ab0e676c7ec22c8120 | Python | Fredpwol/AdventofCode2020 | /DAY3/PART2/day3part2.py | UTF-8 | 519 | 3.421875 | 3 | [] | no_license | data = open("input.txt", "r")
path = data.readlines()
cleaned_path = [ ln.strip() for ln in path ]
max_window = len(cleaned_path[0])
def find_path_tree(right, down):
x = 0
y = 0
tree_count = 0
while y < len(cleaned_path):
if cleaned_path[y][x % max_window] == "#":
tree_count += 1
x += right
y += down
return tree_count
directions = [(1,1), (3, 1), (5, 1), (7, 1), (1, 2)]
res = 1
for dir in directions:
res *= find_path_tree(dir[0], dir[1])
print(res) | true |
c73405f127eda2c9d4ce492c2a4184c4a7242d1a | Python | sergogoose/ivt105 | /Гускин ЛР-1.py | UTF-8 | 1,624 | 4.125 | 4 | [] | no_license | @author: sergey
"""
Name = input("Как вас зовут?")
print("Привет, {}".format(Name))
#2 Задание
print('''Ага!
Я могу управлять этим компьютером!
Вот только зачем? :'C
Пойду поищу смысл жизни "__"''')
#3 Задание
print("Задайте последовательно 3 числа: ")
a = int(input("Первое число: "))
b = int(input("Второе число: "))
c = int(input("Третье число: "))
print("Сумма: {}" .format(a + b + c))
#4 Задание
radius = int(input("Радиус: "))
x = 3.14
pi = x
area = pi * radius **2
print(area)
#5 Задание
Radius = float(input("Введите радиус: "))
print(Radius)
#6 Задание
x = int(input("Школьников было:"))
y = int(input("Яблок было:"))
m = y // x
l = y % x
print(m,l)
#7 Задание
n = int(input("Сколько минут я спал?"))
hours = n % (60 * 24) // 60
minutes = n % 60
print("Так,я спал {} часов и {} минут" .format(hours, minutes))
#8 задание
a=int(input("Введите первое число:"))
b=int(input("Введите второе число:"))
a=a+b
b=a-b
a=a-b
print("Первое число: {} ; Второе число: {};" .format(a,b))
#9 задание
а=float(input("Введите сторону квадрата:"))
p=(a*4)
s=(a**2)
print("Периметр: {}; Площадь: {};" .format(p,s))
#10 задание
print(int("24"*15)**2)
| true |
107f45ec08b2b81a5167b8cf46aa544b9bcaf6e9 | Python | galipkaya/exercism-python | /tournament/tournament.py | UTF-8 | 2,096 | 3.140625 | 3 | [] | no_license | import functools
class Info:
def __init__(self, name, match_played, win, draw, loss, point):
self.name = name
self.match_played = match_played
self.win = win
self.draw = draw
self.loss = loss
self.point = point
def compare(item1, item2):
if item1.point < item2.point:
return -1
elif item1.point > item2.point:
return 1
else:
if item1.name > item2.name:
return -1
elif item1.name < item2.name:
return 1
else:
return 0
def get_info(table, name):
for info in table:
if info.name == name:
return info
table.append(Info(name, 0, 0, 0, 0, 0))
return get_info(table, name)
def tally(rows):
table = []
for row in rows:
team1, team2, status = row.split(";")
if status == "win":
win_info = get_info(table, team1)
win_info.match_played += 1
win_info.win += 1
win_info.point += 3
loss_info = get_info(table, team2)
loss_info.match_played += 1
loss_info.loss += 1
elif status == "loss":
win_info = get_info(table, team2)
win_info.match_played += 1
win_info.win += 1
win_info.point += 3
loss_info = get_info(table, team1)
loss_info.match_played += 1
loss_info.loss += 1
else:
draw1_info = get_info(table, team1)
draw1_info.match_played += 1
draw1_info.draw += 1
draw1_info.point += 1
draw2_info = get_info(table, team2)
draw2_info.match_played += 1
draw2_info.draw += 1
draw2_info.point += 1
table = sorted(table, key=functools.cmp_to_key(compare), reverse=True)
result = ["Team | MP | W | D | L | P"]
for row in table:
name = row.name.ljust(31)
result.append(f"{name}| {row.match_played} | {row.win} | {row.draw} | {row.loss} | {row.point}")
return result
| true |
5933116f96bbd3e286c4b32bd0a8e4061db1afc2 | Python | dkeProjekt/LoginSignupSettingsService | /signupService/signup_server.py | UTF-8 | 1,521 | 2.53125 | 3 | [] | no_license | #!flask/bin/python
from flask import Flask, jsonify, request, abort
from flask_cors import CORS
from pymongo import MongoClient
from datetime import date
import json
today = date.today()
app = Flask(__name__)
CORS(app, support_credentials=True)
@app.route('/signup', methods=['POST'])
def signup():
if not request.json:
abort(400)
email = request.json.get("email")
username = request.json.get("username")
password = request.json.get("password")
client = MongoClient("localhost", 27017)
database = client['PRDKE']
collection = database['UserData']
user = collection.find_one({"name": username})
if user is not None:
return jsonify({'signup_successful': False, 'username': username, 'error': "Username already taken!"})
else:
new_user = {"name": username, "password": password, "email": email, "registration_date": str(date.today())}
collection.insert_one(new_user)
return jsonify({'signup_successful': True, 'username': username, 'error': ''})
@app.route('/get_all_users', methods=['GET'])
def get_all_users():
client = MongoClient("localhost", 27017)
database = client['PRDKE']
collection = database['UserData']
users = collection.find()
user_names = []
for user in users:
user_names.append(user["name"])
user_names_json = json.dumps(user_names)
return jsonify({'get_all_users_successful': True, "list_of_users": user_names_json})
if __name__ == '__main__':
app.run(debug=True, port=5002)
| true |
00c5151a20ffe28c00fd374a776b23d551ea0cc5 | Python | yusufa84/PythonBaseCamp | /Assignments/IteratorsGeneratorsHomeAssignment.py | UTF-8 | 422 | 4.1875 | 4 | [] | no_license | # Problem 1
def gensquares(n):
for num in range(n):
yield num**2
for x in gensquares(10):
print(x)
# Problem 2
import random
def rand_num(low,high,n):
for num in range(n):
yield random.randint(low,high)
for num in rand_num(1,10,5):
print(num)
# Problem 3
s = 'hello'
s_iter = iter(s)
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
| true |
1309186340621f4ccbbc2ee5155e02f36c51d9d9 | Python | soumilmishra/Tkinter | /sample2.py | UTF-8 | 595 | 2.8125 | 3 | [] | no_license | import tkinter
from tkinter import *
app = Tk()
app.title("Welcome")
#image2 =Image.open('sad1.png')
image1 = PhotoImage(file="gradecap1.png")
w = image1.width()
h = image1.height()
app.geometry('%dx%d+0+0' % (w,h))
#app.geometry("800x600")
#app.configure(background='C:\\Usfront.png')
#app.configure(background = image1)
labelText = StringVar()
labelText.set("Welcome !!!!")
#labelText.fontsize('10')
label1 = Label(app, image=image1, textvariable=labelText,
font=("Times New Roman", 24),
justify=CENTER, fg="white")
label1.pack(pady=(100,400))
app.mainloop() | true |
65691836cc71b27e5f2f56028ba1a9bca26c60d1 | Python | shanbady/sent-suggest | /sent-suggest.py | UTF-8 | 3,752 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python
"""
sent-suggest.py:
Generates an alernative sentence by looking up synonyms for particlar words based on their part of speech.
This was inspired by an email I received which I have used as the initial example text.
"""
__author__ = "Shankar Ambady"
__copyright__ = "Copyright 2012, sambady@sessionm.com"
from nltk import ne_chunk,pos_tag
from nltk import ne_chunk,pos_tag
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.tokenize.treebank import TreebankWordTokenizer
from nltk.corpus import stopwords
import nltk.data
from nltk.corpus import wordnet
text = '''
Hello Shankar,
My name is Walter White and I was watching your NLP in Python video on YouTube and had some questions as well as a request from you.
My first question:
Would it be possible, through NLP, to have a computer suggest changes or variants to a sentence based on certain key points in a sentence. Similar to the way Summly summarizes a full body of work by pulling out key pieces. I was wondering if this was possible by pulling out the subject, main verb and key adjectives. So, for example:
The cat drank the milk.
NLP variant: The cat (lapped, drank, sipped) (up) the milk (ferociously, with much appreciation, with a hungry intent).
So essentially, what the program should do is act as a sentence thesaurus, suggesting bonus adjective phrases to a sentence as well as building a thesaurus type response that would input phrase variations. In an extremely simplistic 100% prototype kind of way, is this possible?
My second question is: Can, through NLP, a program find flaws in a sentence? I know Microsoft Word can find fragments, and every app can basically find spelling errors and thesaurus's are already real, but is there a way for a program, running Python, to find sentence variants based on a sentence weak sound? So if a sentence just isn't strong, can a program understand that and change that?
My request now is, would you be willing to help me better understand Python and NLP through a sort of email back and forth, share code kind of way?
I have an idea and I really want to build it and I think the plausibility of it working is there.
The idea is essentially to thesaurus sentences from essays submitted by any person. So a person, without an actual account or anything (I'd rather save all the extra difficulty for down the line) can come on plug their essay into a text field, click submit and the essay comes up in a box and you click on sentence and a box next to it gives you variants to a sentence. Is that possible and if so, how would I begin going about doing it?
I hope you consider helping me,
Thanks,
Walter White
'''
def find_synonym(word,pos):
l_syns = []
synsets = wordnet.synsets(word,pos=pos)
for synset in synsets:
for syn in synset.lemma_names:
l_syns.append(syn)
return l_syns
stopwords = stopwords.words('english') # common english words to filter out
sentences = nltk.sent_tokenize(text)
tokens = [nltk.wordpunct_tokenize(sentence) for sentence in sentences]
tagged = [pos_tag(token) for token in tokens]
changedstr = []
inlinestr = []
for taggedsentence in tagged:
for postagged in taggedsentence:
word = postagged[0]
postag = postagged[1][0].lower()
synonyms = []
try:
synonyms = find_synonym(word.lower(),postag)
except:
pass
inlinestr.append(postagged[0])
if(len(synonyms) > 0):
word = synonyms[0]
inlinestr.extend([' (' ,','.join(synonyms) ,')'])
changedstr.append(word)
changedstr = ' '.join(changedstr)
inlinestr = ' '.join(inlinestr)
print "ORIGINAL TEXT: \n" + text + "\n\n"
print "ALTERNATE TEXT: \n" + changedstr + "\n\n"
print "\n\nINLINE CHANGES: \n" + inlinestr + "\n\n"
| true |
a6a1d4257a1dabf6ff6f3ce3f57deb43674285b7 | Python | jekin000/Fluent_Python | /ch01_Python_Data_Structure_Magic_Function/deck.py | UTF-8 | 2,279 | 3.953125 | 4 | [] | no_license | ##########################################
# 1.1 The deck by __getitem__, __len__
## collections.nametuple, only property,no method's object
## for use __getitem__,__len__, the FrenchDeck get benefit like
### 1. could use python standard method, such as len,
### 2. could use python standard module, such as random.choice
### 3. list[12::13] = get list[12] then get a item for per interval 13
## other note: #doctest: +ELLIPSIS
## key:
### 0. dunder method
### 1. collections.nametuple
### 2. __getitem__, __len__
### 3. random.choice
### 5. list[12::13]
### 6. #doctest: +ELLIPSIS
### 7. reversed, sorted
### 8. in, e.g. Card('Q','hearts') in deck
### 9. order
## note:
### FrenchDeck can not Shuffle(change order), because it is fixed order & no __setitem__
##########################################
import collections
Card = collections.namedtuple('Card',['rank','suit'])
class FrenchDeck:
ranks = [str(n) for n in range(2,11)] +list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
def __init__(self):
self._cards = [Card(rank,suit) for suit in self.suits
for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self,position):
return self._cards[position]
suit_values = dict(spades=3,hearts=2,diamonds=1,clubs=0)
def spades_high(card):
rank_value = FrenchDeck.ranks.index(card.rank)
return rank_value * len(suit_values) +suit_values[card.suit]
if __name__ == '__main__':
beer_card = Card('7','diamonds')
print(beer_card)
#len
deck = FrenchDeck()
print(len(deck))
#access by position
print(deck[0])
print(deck[-1])
#directly use random.choice
from random import choice
print (choice(deck))
print (choice(deck))
#in summary
#1. you need not remember standard function. Instead, you just use such as len(),sorted() etc.
#2. you could easily use Python standard lib
#slice, __getitem__ use [] on self._cards
print(deck[:2])
print(deck[12::13])
#by use __getitem__ we could iterator the deck or reverse(deck)
for card in deck: #doctest: +ELLIPSIS
print(card)
#if we do not implement __contains__ 'in' will search by order
print('-->The ordered "in" ')
print(Card('Q','hearts') in deck)
print(Card('7','beasts') in deck)
for card in sorted(deck,key=spades_high):
print(card)
| true |
a1e289edfb9ddf639c97ab945b631edf99212f6f | Python | sammersheikh/python | /numeric.py | UTF-8 | 388 | 4.375 | 4 | [] | no_license | num = 1
num += 1 #equivalent to num = num + 1
print(abs(-3)) #absolute value (get positive number)
print(round(3.75, 1)) #round to nearest digit, second number means round to the first digit after the decimal
num_1 = '100'
num_2 = '200' #these are strings, not integers
num_1 = int(num_1) #prefacing with int() casts the string to that type
num_2 = int(num_2)
print(num_1 + num_2)
| true |
3ce73f557d254d89d8050f5c11aa39438069220f | Python | gregsalvesen/diskspec | /analysis/scripts/gs_stats.py | UTF-8 | 15,629 | 3.40625 | 3 | [
"BSD-3-Clause"
] | permissive | import numpy as np
from scipy.integrate import quad, simps
from scipy.special import erf, erfc
from scipy.optimize import curve_fit
'''
This is a collection of handy statistics tools:
interpolated_median -
confidence_interval -
error1D -
'''
#================================================================================
def interpolated_median(x, y):
'''
Find the (x,y) point corresponding to the interpolated median of the distribution
'''
N = len(x)
area = 0.0
area50 = 0.5
i = 0
while (area < area50):
area = np.trapz(y=y[0:i+1], x=x[0:i+1])
i = i+1
i = i-1 # <-- Account for +1 on last iteration
iL, iR = i, i-1
xR, yR = x[iR], y[iR]
xL, yL = x[iL], y[iL]
areaR = np.trapz(y=y[0:iR+1], x=x[0:iR+1])
areaL = np.trapz(y=y[0:iL+1], x=x[0:iL+1])
# Interpolation factor
f = (area50 - areaL) / (areaR - areaL)
x_ML = f * (xR - xL) + xL
y_ML = f * (yR - yL) + yL
return x_ML, y_ML
#================================================================================
def confidence_interval(x, y, sigma=0.682689492137, thold=0.001):
x_ML, y_ML = interpolated_median(x=x, y=y)
# Split the x-array into left and right of the x-median
iL_arr = np.where(x <= x_ML)[0]
iR_arr = np.where(x > x_ML)[0]
xL_arr = x[iL_arr]
xR_arr = x[iR_arr]
yL_arr = y[iL_arr]
yR_arr = y[iR_arr]
NiL = len(iL_arr)
NiR = len(iR_arr)
# Start at y = 0.5*y_ML and area = 0
y_now = 0.5 * y_ML
area_now = 0.0
y_top = y_ML
y_bot = 0.0
# Iterate until the enclosed area is within the desired threshold
while (np.abs(area_now - sigma) > thold):
# Find the xL value corresponding to y_now
yL = y_ML
iL = NiL-1
while (yL > y_now):
yL = yL_arr[iL]
iL = iL-1
iL = iL+1
xL = xL_arr[iL]
yL = yL_arr[iL]
iLp1 = iL+1
xLp1 = xL_arr[iLp1]
yLp1 = yL_arr[iLp1]
# Interpolate to get xL_now
fL_now = (y_now - yL) / (yLp1 - yL)
xL_now = fL_now * (xLp1 - xL) + xL
# Find the xR value corresponding to y_now
yR = y_ML
iR = 0
while (yR > y_now):
yR = yR_arr[iR]
iR = iR+1
iR = iR-1
xR = xR_arr[iR]
yR = yR_arr[iR]
iRm1 = iR-1
xRm1 = xR_arr[iRm1]
yRm1 = yR_arr[iRm1]
# Interpolate to get xR_now
fR_now = (y_now - yRm1) / (yR - yRm1)
xR_now = fR_now * (xR - xRm1) + xRm1
# Integrate the distribution over xL_now --> xR_now
# (Cut off the area beyond the interpolation)
area = np.trapz(y=y[iL:NiL+iR+1], x=x[iL:NiL+iR+1])
# How much area are we missing to the left [xL --> x_now]?
area_xtraL = np.trapz(y=[yL,y_now], x=[xL,xL_now])
# How much area are we missing to the right [x_now --> xR]?
area_xtraR = np.trapz(y=[y_now,yR], x=[xR_now,xR])
# Find the proper enclosed area
area_now = area - (area_xtraL + area_xtraR)
# Does area_now = the desired sigma to within our threshold?
if (area_now > sigma): # Raise y_now to contain smaller area
if (y_now > y_bot): y_bot = y_now
y_now = 0.5 * (y_now + y_top)
if (area_now < sigma): # Lower y_now to contain larger area
if (y_now < y_top): y_top = y_now
y_now = 0.5 * (y_now + y_bot)
x_valm = xL_now
x_valp = xR_now
y_valm = y_now
y_valp = y_now
return x_ML, y_ML, x_valm, y_valm, x_valp, y_valp
#================================================================================
def error1D(x, y, sigma=0.682689492137, normal=False):
'''
Purpose:
--------
Compute the 1D (possibly asymmetric) errorbar at constant probability
value on the input PDF.
Inputs:
-------
x - Bins corresponding to the PDF
y - Probability density function (PDF)
sigma - Integrate out in the PDF until a fraction "sigma" of the likelihood is enclosed.
Output:
-------
xpeak, errm, errp = [PDF peak, -error, +error]
'''
# Index of the maximum likelihood value.
iML = np.argmax(y)
# Beginning and ending indices.
iB = 0
iE = y.size-1
# Initialize the starting index.
iR = iML+1
if (iR > iE):
iR = iE
# Total enclosed area.
#area_tot = simps(y, x=x) <-- GIVES BAD RESULT!
area_tot = np.trapz(y=y, x=x)
# Initialize the enclosed area.
area = 0.0
# Compute the area under the PDF until sigma-% is achieved. <-- This is very slow, can we do it smarter???
xL = np.interp(y[iR], y[0:iML-1], x[0:iML-1])
iL = int(np.argmin(np.abs(xL - np.array(x))))
while (area < sigma):
#xL = np.interp(y[iR], y[0:iML-1], x[0:iML-1])
#iL = int(np.argmin(np.abs(xL - np.array(x))))
# Handle very skewed distributions
if (iL > iR):
iL = iB
iR = iE
if ((iL != iB) and (iR != iE)):
#area = simps(y[iL:iR+1], x=x[iL:iR+1]) / area_tot <-- GIVES BAD RESULT!
area = np.trapz(y[iL:iR+1], x=x[iL:iR+1]) / area_tot
iR += 1
xL = np.interp(y[iR], y[0:iML-1], x[0:iML-1])
iL = int(np.argmin(np.abs(xL - np.array(x))))
if ((iL == iB) and (iR != iE)):
area = np.trapz(y[iB:iR+1], x=x[iB:iR+1]) / area_tot
iR += 1
xL = np.interp(y[iR], y[0:iML-1], x[0:iML-1])
iL = int(np.argmin(np.abs(xL - np.array(x))))
if ((iL != iB) and (iR == iE)):
area = np.trapz(y[iL:iE+1], x=x[iL:iE+1]) / area_tot
xL = np.interp(y[iE], y[0:iML-1], x[0:iML-1])
iL = int(np.argmin(np.abs(xL - np.array(x))))
iML -= 1
iL -= 1
if ((iL == iB) and (iR == iE)):
area = np.trapz(y[iB:iE+1], x=x[iB:iE+1]) / area_tot
#else:
# xpeak, errm, errp = pdf_peakerr(x, y, sigma=sigma) # Compute xpeak, errm, errp with pdf_peakerr
# area = sigma + 1.0 # Pop out of the loop.
#iR += 1
# Account for the extra iR += 1 performed on the last while loop iteration.
iR -= 1
# Collect the results
#if ((iL != iB) and (iR != iE)):
# xpeak = x[iML]
# errm = x[iML] - x[iL]
# errp = x[iR] - x[iML]
xpeak = x[iML]
errm = x[iML] - x[iL]
errp = x[iR] - x[iML]
return xpeak, errm, errp
#===============================================================================
def pdf_peakerr(x, y, sigma=0.682689492137):
'''
Purpose:
--------
Find the peak of a probability distribution function and compute the 1D
(asymmetric) errorbar.
Inputs:
------
x - Bins corresponding to the PDF
y - Probability distribution function (PDF)
sigma - Integrate out in the PDF until a fraction "sigma" of the
likelihood is enclosed (default is 1-sigma = 68%).
Outputs:
-------
xpeak, errm, errp = [PDF peak, -error, +error]
'''
# Index of the peak (i.e., the maximum likelihood index).
ipeak = np.argmax(y)
# Value corresponding to the peak of the PDF.
xpeak = x[ipeak]
# Beginning and ending indices.
iB = 0
iE = y.size-1
# Initialize the indices that will be rightward and leftward of the peak.
iR = ipeak
iL = ipeak
# Total area contained in the PDF.
#area_tot = simps(y, x=x) <-- GIVES BAD RESULT!
area_tot = np.trapz(y, x=x)
# Initialize the enclosed area to the peak of the distribution.
#area = 0.0
dx = x[1] - x[0]
area = y[ipeak] * dx
# Step to the left and right incrementally until you reach the desired error bar or run out of bins. <-- Use a more sophisticated integrator, this just sums stuff
while ((area / area_tot) < sigma):
if (iR != iE):
if (np.sum(y[iR::]) != 0.0):
iR += 1
area += y[iR] * dx
if (iL != iB):
if (np.sum(y[0:iL+1]) != 0.0):
iL -= 1
area += y[iL] * dx
if ((iL == iB) and (iR == iE)): # Escape this awful loop that needs looking into...
area = area_tot
errm = x[ipeak] - x[iL]
errp = x[iR] - x[ipeak]
return xpeak, errm, errp
#===============================================================================
def distfunc(y, Nbins, xmin, xmax):
'''
Purpose:
--------
Compute the binned distribution from an unbinned distribution determined
from a Monte Carlo simulation.
Inputs:
------
y - Unbinned distribution from a Monte Carlo simulation (array)
Nbins - Number of bins
xmin - Minimum value for the histogram bin range
xmax - Maximum value for the histogram bin range
Output:
-------
Binned distribution, bin center values
'''
bincenters = []
dist, binedges = np.histogram(y, bins=Nbins, range=(xmin,xmax))
for i in xrange(Nbins):
bincenters.append(0.5 * (binedges[i] + binedges[i+1]))
return dist, bincenters
#===============================================================================
def histbound(x, x_peak, errm, errp):
'''
Purpose:
--------
Given a distribution, remove the parts that do not fall within the
specified +/- range.
Inputs:
------
x - Unbinned distribution from a Monte Carlo simulation (array)
x_peak - Peak as determined in the binned distribution
errm - Minus error (lower bound = x_peak - errm)
errp - Plus error (upper bound = x_peak + errp)
Output:
-------
Input distribution with values lying outside of the given bounds
filtered out.
'''
x_errm = x_peak - errm
x_errp = x_peak + errp
iin = range(x.size) # Initialize list of bounded ("in") indices
iout_m = np.where(x < x_errm)[0].tolist() # Leftward out of bounds ("out") indices
iout_p = np.where(x > x_errp)[0].tolist() # Rightward out of bounds ("out") indices
iout = list(set(iout_m + iout_p)) # List of out of bounds indices
for i in xrange(np.array(iout).size):
iin.remove(iout[i]) # Remove "out" indices from list of "in" indices
return x[iin]
#===============================================================================
def sigma_search(x_peak, err, prob, x_min, x_max, A=1.0, acc=1e-2):
'''
Purpose:
--------
Given a confidence interval that is not 1-sigma (e.g. 90%), find the
corresponding 1-sigma confidence interval assuming the errors are
Gaussian distributed.
Inputs:
-------
x_peak - x-value corresponding to the peak of the Gaussian.
(This is the data point measured value in practice)
err - Error bar on x_peak, the measured data point
prob - Confidence associated with err (e.g. 0.90 for 90% confidence)
x_min - Minimum allowable value for the data point (e.g. +1 for black hole spin)
x_max - Maximum allowable value for the data point (e.g. -1 for black hole spin)
A - Amplitude of the peak of the Gaussian (i.e. where CDF = 0.5)
acc - Absolute (not relative) accuracy to which the 1-sigma value is computed
(also used to compute the number of x-bins to use)
Output:
-------
Standard deviation (sigma) corresponding to the Gaussian distribution that
encloses the confidence region 'prob' within the x-range, (x_peak +/- err).
'''
sigma_68 = 0.682689492137 # Confidence interval corresponding to 1-sigma
sigma_guess = err # Initial guess for the confidence interval
x_err = x_peak + err # x-location of the error bar extent
# Compute the magic factor for converting between sigma and prob
dprob = acc + 1.0
while (np.abs(dprob) > acc):
# Compute the CDF probability given the guessed sigma
prob_guess = 0.5 * (1.0 + erf((x_err - x_peak) / (2.0 * sigma_guess**2.0)))
# Modify sigma_guess based on if prob_guess is > or < prob
dprob = prob_guess - prob
if (prob_guess >= prob):
dsigma_guess = 0.5 * dprob # Need to increase sigma_guess
else:
dsigma_guess = 0.5 * dprob # Need to decrease sigma_guess
# Increment the sigma guess
sigma_guess += dsigma_guess
# This technique below should work too, but it takes forever.
'''
x_err = x_peak - err # x-location of the error bar extent
dx = acc + 1.0 # Initialize the dx value to be > the accuracy requested.
# Make *sure* there are adequate bins to resolve the 1-sigma value.
Nbins = int(10.0 * np.abs(x_max - x_min) / acc)
# Iteratively compute Gaussian PDFs until converging on the proper sigma.
x = np.linspace(x_min, x_max, Nbins)
while (np.abs(dx) > acc):
# Compute the PDF
pdf = Gauss1D_pdf(x, A=A, mu=x_peak, sigma=sigma_guess)
# Given the PDF just constructed, find the x-values that enclose a
# fraction 'prob' of the distribution.
x_peak_guess, errm_guess, errp_guess = error1D(x, pdf, sigma=prob)
# +/- value from the peak of the distribution where a fraction 'prob' of the distribution is enclosed.
x_err_guess = x_peak - errm_guess # x-location
if (np.allclose(errm_guess, errp_guess, rtol=acc, atol=acc) == False):
print "ERRORS ARE ASYMMETRIC -- THIS IS NOT A GAUSSIAN!"
sys.exit(1)
# Modify sigma_guess based on if x_prob is > or < x_err
dx = x_err_guess - x_err
x_sigma_guess = x_peak - sigma_guess
if (x_err_guess >= x_err):
dsigma_guess = 0.5 * (x_err_guess - x_err) # Need to increase sigma_guess
else:
dsigma_guess = 0.5 * (x_err_guess - x_err) # Need to decrease sigma_guess
sigma_guess += dsigma_guess
'''
# Account for the extra +dsigma made on the final iteration.
sigma_guess -= dsigma_guess
return sigma_guess
#================================================================================
def dblGauss_fit(xdata, ydata):
'''
Purpose:
--------
'''
# Define the Gaussian fitting function.
def func_Gauss(x, mu, sigma, A):
return A * np.exp(-1.0*(x-mu)**2.0 / (2.0*sigma**2.0))
# Define the double matched Gaussian fitting function.
def func_dblGauss(x, mu, sigmaL, sigmaR, A):
pdf = np.zeros(len(x))
i_peak = int((np.abs(x-mu)).argmin())
pdf[0:i_peak] = A * np.exp(-1.0*(x[0:i_peak]-mu)**2.0 / (2.0*sigmaL**2.0))
pdf[i_peak::] = A * np.exp(-1.0*(x[i_peak::]-mu)**2.0 / (2.0*sigmaR**2.0))
return pdf
# Generate initial parameter guesses by fitting the distribution with a single Gaussian.
pGauss_opt, pGauss_cov = curve_fit(func_Gauss, xdata, ydata)
p0_guess = [pGauss_opt[0], pGauss_opt[1], pGauss_opt[1], pGauss_opt[2]]
# Fit the distribution with a double matched Gaussian.
p_opt, p_cov = curve_fit(func_dblGauss, xdata, ydata, p0=p0_guess)
x_mu = p_opt[0]
# Compute the best-fit double matched Gaussian distribution
y_bestfit = func_dblGauss(xdata, p_opt[0], p_opt[1], p_opt[2], p_opt[3])
return x_mu
| true |
8c89d30c981cb861af7b4230afe8c752c1e8648b | Python | manoelrui/python-data-structures | /test/datastructure/list/TestQ1.py | UTF-8 | 5,407 | 4.125 | 4 | [] | no_license | import unittest
from datastructure.list.LinkedList import LinkedList
class TestQ1(unittest.TestCase):
# 1. Criar uma lista vazia;
def test_creation(self):
l = LinkedList()
self.assertIsNotNone(l)
self.assertIsNone(l.head)
self.assertEqual(len(l), 0)
# 2. Inserir elemento no inicio;
def test_insertion(self):
l = LinkedList()
self.assertEqual(len(l), 0)
self.assertEqual("", str(l))
l.add(56)
self.assertEqual(len(l), 1)
self.assertEqual("56", str(l))
l.add(342)
self.assertEqual(len(l), 2)
self.assertEqual("342 56", str(l))
l.add(70)
self.assertEqual(len(l), 3)
self.assertEqual("70 342 56", str(l))
l.add(60)
self.assertEqual(len(l), 4)
self.assertEqual("60 70 342 56", str(l))
# 3. Imprimir os valores armazenados na lista;
def test_print(self):
l = LinkedList()
self.assertEqual("", str(l))
l.add(44)
self.assertEqual("44", str(l))
l.add(98)
l.add(12)
l.add(6)
self.assertEqual("6 12 98 44", str(l))
# 4 Imprimir os valores armazenados na lista usando recursao;
def test_recursive_print(self):
l = LinkedList()
self.assertEqual("", l.print_recursive())
l.add(34)
self.assertEqual("34", l.print_recursive())
l.add(8)
l.add(23)
l.add(112)
self.assertEqual("112 23 8 34", l.print_recursive())
# 5.Imprimir os valores armazenados na lista em ordem reversa (da cauda para a cabeca da lista);
def test_print_reverse(self):
l = LinkedList()
self.assertEqual("", l.print_recursive())
l.add(13)
self.assertEqual("13", l.print_recursive())
l.add(76)
l.add(2)
l.add(150)
self.assertEqual("13 76 2 150", l.print_reverse())
# 6.Verificar se a lista esta vazia (retorna 1 se vazia ou 0 se nao vazia);
def test_is_empty(self):
l = LinkedList()
self.assertTrue(l.is_empty())
l.add(676)
self.assertFalse(l.is_empty())
# 7.Recuperar/Buscar um determinado elemento da lista;
def test_find(self):
l = LinkedList()
self.assertIsNone(l.find(45))
l.add(236)
l.add(34)
l.add(50)
l.add(670)
self.assertEquals(50, l.find(50))
self.assertEquals(670, l.find(670))
self.assertEquals(236, l.find(236))
self.assertIsNone(l.find(666))
# .8 Remover um determinado elemento da lista;
def test_remove(self):
l = LinkedList()
self.assertTrue(l.remove(78) == False)
self.assertEqual("", str(l))
l.add(34)
l.add(3)
l.add(78)
l.add(55)
l.add(60)
l.add(70)
l.add(66)
self.assertTrue(l.remove(66))
self.assertEqual(len(l), 6)
self.assertEqual("70 60 55 78 3 34", str(l))
self.assertTrue(l.remove(34))
self.assertEqual(len(l), 5)
self.assertEqual("70 60 55 78 3", str(l))
self.assertTrue(l.remove(60))
self.assertEqual(len(l), 4)
self.assertEqual("70 55 78 3", str(l))
self.assertTrue(l.remove(70))
self.assertEqual(len(l), 3)
self.assertEqual("55 78 3", str(l))
self.assertTrue(l.remove(3))
self.assertEqual(len(l), 2)
self.assertEqual("55 78", str(l))
self.assertTrue(l.remove(55))
self.assertEqual(len(l), 1)
self.assertEqual("78", str(l))
self.assertTrue(l.remove(3454) == False)
self.assertEqual(len(l), 1)
self.assertEqual("78", str(l))
# 9.Remover um determinado elemento da lista usando recursao;
def test_remove_recursion(self):
l = LinkedList()
self.assertTrue(l.remove_recursive(78) == False)
self.assertEqual("", str(l))
l.add(34)
l.add(3)
l.add(78)
l.add(55)
l.add(60)
l.add(70)
l.add(66)
self.assertTrue(l.remove_recursive(66))
self.assertEqual(len(l), 6)
self.assertEqual("70 60 55 78 3 34", str(l))
self.assertTrue(l.remove_recursive(34))
self.assertEqual(len(l), 5)
self.assertEqual("70 60 55 78 3", str(l))
self.assertTrue(l.remove_recursive(60))
self.assertEqual(len(l), 4)
self.assertEqual("70 55 78 3", str(l))
self.assertTrue(l.remove_recursive(70))
self.assertEqual(len(l), 3)
self.assertEqual("55 78 3", str(l))
self.assertTrue(l.remove_recursive(3))
self.assertEqual(len(l), 2)
self.assertEqual("55 78", str(l))
self.assertTrue(l.remove_recursive(55))
self.assertEqual(len(l), 1)
self.assertEqual("78", str(l))
self.assertTrue(l.remove_recursive(3454) == False)
self.assertEqual(len(l), 1)
self.assertEqual("78", str(l))
# 10. Liberar a lista;
def test_list_clean(self):
l = LinkedList()
self.assertEqual(len(l), 0)
self.assertIsNone(l.head)
l.add(5)
l.add(34)
l.add(980)
l.clean()
self.assertEqual(len(l), 0)
self.assertIsNone(l.head)
l.clean()
self.assertEqual(len(l), 0)
self.assertIsNone(l.head)
if __name__ == '__main__':
unittest.main()
| true |
a8a63b8c7496b1eb943c677bd251964ce1472a7e | Python | foone/vs-movie-generator | /generatevs.py | UTF-8 | 312 | 3.375 | 3 | [] | no_license | import random
def initial_caps(name):
return name[0].upper()+name[1:]
names={}
for line in open('names.txt','r'):
name = line.strip()
names[name.lower()]=initial_caps(name)
for i in range(10):
selections = [random.choice(names.keys()) for _ in range(2)]
print ' vs. '.join([names[x] for x in selections]) | true |
a01673b12a33cf6de60d97dbdf629a8a4e1e2def | Python | thedavidharris/advent-of-code-2020 | /day3/3b.py | UTF-8 | 220 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python3
from math import prod
print((lambda m:prod(sum(line[i*dx %len(line)] == '#' for i,line in enumerate(m[::dy])) for dx,dy in [(3,1),(1,1),(5,1),(7,1),(1,2)]))(open('input.txt').read().splitlines())) | true |
b222193c5cd500c735c8ceda60014d3ad219335e | Python | awaz456/Test_feb_19 | /Q_5.py | UTF-8 | 345 | 3.75 | 4 | [] | no_license | class Student:
def __init__(self, name, sec):
self.name = name
self.sec = sec
@classmethod
def gen_stu_from_string(cls, inp):
name, sec = inp.split("-")
return cls(name, sec)
stu1 = Student.gen_stu_from_string(input("Enter input in the form of 'Name-Sec': "))
print(stu1.__dict__)
| true |
c58ff46f6270da051758cbfbdb95e689c4c97503 | Python | Edgar-Saavedra/intro-machine-learning-python | /ch1/iris.py | UTF-8 | 4,406 | 3.171875 | 3 | [] | no_license | # these imports are assumed
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from IPython.display import display
# END: these imports are assumed
from sklearn.datasets import load_iris
iris_dataset = load_iris()
print("Keys of iris_dataset: \n", iris_dataset.keys())
print(iris_dataset['DESCR'][:193]+"\n")
print("Target names: ", iris_dataset['target_names'])
# Target names: ['setosa' 'versicolor' 'virginica']
print("Feature names:\n", iris_dataset['feature_names'])
print("Type of data:", type(iris_dataset['data']))
print("Shape of data:", iris_dataset['data'].shape)
# contains measurement for 150 flowers.
# Items are called samples
# properties Features
# the shape is the number of samples times the number of features
print("First five rows of data:\n", iris_dataset['data'][:5])
# ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']
# First five rows of data:
# [[5.1 3.5 1.4 0.2]
# [4.9 3. 1.4 0.2]
# [4.7 3.2 1.3 0.2]
# [4.6 3.1 1.5 0.2]
# [5. 3.6 1.4 0.2]]
print("Type of target", type(iris_dataset['target']))
print("Shape of target", iris_dataset['target'].shape)
print("Target:\n", iris_dataset['target'])
# Target:
# [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
# 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
# 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
# 2 2]
# ['setosa' 'versicolor' 'virginica']
# 0 = setosa
# 1 = versicolor
# 2 = virginica
# will model "generalize"
# show it new data to asses performance
# training data - data used to build our machine learning model
# training set - test data, test set, hod-out set
# scikit-learn has function that shuffles dataset and splits it: train_test_split()
# scikit-learn data denoted capital X (X cuz data is 2-D)
# scikit-learn labels denoted lowercase y (lowercase 1-D)
from sklearn.model_selection import train_test_split
# random_state makes outcome deterministic, will always have same outcome
X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state = 0)
print("X_train shape:", X_train.shape)
print("y_train shape:", y_train.shape)
print("X_test shape:", X_test.shape)
print("y_test shape:", y_test.shape)
# IMPORTANT STEP: First make sure to look at data:
# Visualize it using scatter plot
# convert Numpy array to pandas DataFrame
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
# create a scattermatrix form the dataframe, color by y_train
pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o', hist_kwds={'bins': 20}, s=60, alpha=.8, cmap=mglearn.cm3)
plt.show()
# many classification algorithms in scikit-learn
# ** k-nearest neighbors classifier **
# only consists of storing the training set
# finds the point in the training set that is closest to the new point
# assigsn the label of this training point to the new data point
# k in k-nearest instead of using the only the closes neighbor,
# we can consider any fixed number k of neighbors
# we can make a prediction using the majority class amongst these neighbors
# all machine learning models in scikit-learn implement teir own classes
# called Estimator classes
# k-nearest uses KNeighborsClassifier class in neighbors module
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
# to build the model on the training set, we call the fit method of the
# knn object. X_train containing training data and y_train the training labels
# fit returns the knn object itself
knn.fit(X_train, y_train)
# IMPORTANT: Making predictions
# we can make predictions
# we found an iris in the whild with
# sepal length - 5 cm
# sepal width - 2.9 cm
# petal length - 1 cm
# petal width - 0.2 cm
# scikit-learn alwys expects 2-D arrays for the data
X_new = np.array([[5, 2.9, 1, 0.2]])
print("X_new.shape", X_new.shape)
# to make a perdiction we call the perdict method
perdiction = knn.predict(X_new)
print("Perdiction:", perdiction)
print("Predicted target name:", iris_dataset['target_names'][perdiction])
# IMPORTANT: Computing accuracy
y_pred = knn.predict(X_test)
print("Test set predictions:\n", y_pred)
print(f"Test set score: {knn.score(X_test, y_test):.2f}")
| true |
25a15137c3d649f8cca1c9297ff387683880ad67 | Python | sertachak/InterviewQuestions | /project/src/crypt/aes.py | UTF-8 | 882 | 2.890625 | 3 | [] | no_license | from Crypto.Cipher import AES
from pbkdf2 import PBKDF2
import os, random, string, struct
def randomword(length):
chars = string.ascii_lowercase+string.digits+string.ascii_uppercase
return ''.join(random.choice(chars) for i in range(length))
password = randomword( 64 )
salt = os.urandom(8)
key = PBKDF2( password, salt).read(32) # AES key must be either 16, 24, or 32 bytes long
IV = 16 * '\x01' # TODO IV must be 16 bytes long initialization vector for maximize security IV should be randomly generated for every encryption in can be stored with ciphered text.
mode = AES.MODE_CBC
encryptor = AES.new(key, mode, IV=IV)
text = 'j' * 64 + 'i' * 128 # nput strings must be a multiple of 16 in length
ciphertext = encryptor.encrypt(text)
print( ciphertext )
decryptor = AES.new( key, mode, IV=IV)
plain = decryptor.decrypt( ciphertext )
print( plain ) | true |
8f1c3e7d6fbfac072d8734001203c5cbf8b7c5a4 | Python | ihaeyong/SphereGAN-Pytorch-implementation | /ops.py | UTF-8 | 981 | 3.1875 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
class HyperSphereLoss(nn.Module):
def forward(self, input):
'''
Calcuate distance between input and N(North Pole) using hypersphere metrics.
Woo Park, Sung, and Junseok Kwon.
"Sphere Generative Adversarial Network Based on Geometric Moment Matching."
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2019.
'''
q = self.project_to_hypersphere(input)
q_norm = torch.norm(q, dim=1) ** 2
loss = (2 * q[:, -1]) / (1 + q_norm)
return torch.mean(torch.acos(loss))
def project_to_hypersphere(self, v):
v_norm = torch.norm(v, dim=1, keepdim=True) ** 2
a = 2 * v / (v_norm + 1)
b = (v_norm - 1) / (v_norm + 1)
return torch.cat([a, b], dim=1)
if __name__ == '__main__':
# Test
criterion = HyperSphereLoss()
for i in range(1, 51):
loss = criterion(torch.randn(64, 2) * i)
print(loss) | true |
b6d21889a6008de07c3eaa2b9c659d79968c4d75 | Python | daniel-reich/ubiquitous-fiesta | /Jjpou65vd6t6xGwvN_1.py | UTF-8 | 108 | 2.859375 | 3 | [] | no_license |
def get_case(txt):
if txt.islower(): return 'lower'
if txt.isupper(): return 'upper'
return 'mixed'
| true |
e4eea3a37fb680074e099af1e40b1ff92995839b | Python | Yuvanshanker/Data-Structures-and-Algorithms | /Linked List/Find the middle of a given linked list.py | UTF-8 | 1,458 | 4.375 | 4 | [] | no_license | class Node:
# Function to initialise the node object
def __init__(self, data):
self.data = data # Assign data
self.next = None # Initialize next as null
# Linked List class contains a Node object
class LinkedList:
# Function to initialize head
def __init__(self):
self.head = None
# Function to insert a new node at the beginning
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
# Print the linked list
def printList(self):
node = self.head
while node:
print(str(node.data) + "->", end="")
node = node.next
print("NULL")
# Function that returns middle.
def printMiddle(self):
# Initialize two pointers, one will go one step a time (slow), another two at a time (fast)
slow = self.head
fast = self.head
# Iterate till fast's next is null (fast reaches end)
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# return the slow's data, which would be the middle element.
print("The middle element is ", slow.data)
# Code execution starts here
if __name__=='__main__':
# Start with the empty list
llist = LinkedList()
for i in range(5, 0, -1):
llist.push(i)
llist.printList()
llist.printMiddle()
| true |
1b753205a9184f8efafc4030322bf69e87bbf4e1 | Python | ahmedaliyahia86/mahratech-python-basics | /Mahartech24.py | UTF-8 | 1,047 | 3.953125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 20:48:18 2020
@author: ahmedali
"""
# Python Collections: 2- Lists
# 13- Lists in Memory:
# ** Storing in both different locations:
L1 = [1, 2, 3]
L2 = [1, 2, 3]
print(L1)
print(L2)
print('-------------------------------')
# ** Alias:
L1 = [4, 5, 6]
L2 = L1
L1[1] = 10
print(L1)
print(L2) # L1, L2 assignments same memory locations
L2[2] = 0
print(L2)
print(L1)
print('-------------------------------')
# ** Cloning a List:
L1 = [10, 20, 30]
L2 = L1[:]
print(L1)
print(L2)
L2[0] = 99
print(L1)
print(L2) # Both Assignments for Different Locations
print('-------------------------------')
# ** Sorting a List: Calling sort()
L1 = [5, -2, 10]
L2 = L1.sort()
print(L1)
print(L2)
print('-------------------------------')
# ** Sorting a List : Calling sorted()
L1 = [5, -2, 10]
L2 = sorted(L1)
print(L1)
print(L2)
print('-------------------------------')
| true |
db059237fa468128895834f12f314905d88cfae7 | Python | westbrookmd/Python-AtBS | /Python-Files/collatzSequence.py | UTF-8 | 378 | 4.21875 | 4 | [] | no_license | # Write your code here :-)
def collatz(number):
if number %2 == 0:
number = number//2
else:
number = (3*number) + 1
print(number)
return number
# Loop the program
a = 0
while a == 0:
number = input("Enter an integer: ")
number = int(number)
while number != 1:
number = collatz(number)
print("We're restarting the loop")
| true |
c0f94401d792c1157b1babf5a0383c80dfdaf082 | Python | Shr1ftyy/casper-python-sdk | /pycspr/api/get_account_info.py | UTF-8 | 1,124 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | import typing
import jsonrpcclient as rpc_client
from pycspr.client import NodeConnectionInfo
# RPC method to be invoked.
# TODO: use new endpoint -> state_get_account_info
_API_ENDPOINT = "state_get_item"
def execute(
connection_info: NodeConnectionInfo,
account_hash: bytes,
state_root_hash: typing.Union[bytes, None] = None
) -> dict:
"""Returns on-chain account information at a certain state root hash.
:param connection_info: Information required to connect to a node.
:param account_hash: An on-chain account identifier derived from it's associated public key.
:param state_root_hash: A node's root state hash at some point in chain time.
:returns: Account information in JSON format.
"""
key=f"account-hash-{account_hash.hex()}"
path = []
state_root_hash = state_root_hash.hex() if state_root_hash else None
response = rpc_client.request(
connection_info.address_rpc,
_API_ENDPOINT,
key=key,
path=path,
state_root_hash=state_root_hash
)
return response.data.result["stored_value"]["Account"]
| true |
8a61e041368ae6c95e2a646b6adc4f979ad8b86c | Python | khushalimehta/Rock_Pepper_Scissors | /Rock_Pepper_Scissors.py | UTF-8 | 688 | 3.78125 | 4 | [] | no_license | print("Please pick one: Rock Scissors Paper")
while True:
dict1 = {'rock':1,'scissors':2,'paper':3}
playera = str(input("Player a:"))
playerb = str(input("Player b:"))
a = dict1.get(playera)
b = dict1.get(playerb)
dif = a - b
if dif in [-1,2]:
print("Player a win")
str = input("Do you want to play another game,yes or no?")
if str == "yes":
continue
else:
break
elif dif in [-2,1]:
print("Player b win")
str = input("Do you want to play another game,yes or no?")
if str == "yes":
continue
else:
break
else:
print("Invalid input")
| true |
7134bd1dec8aff6ced22afbed19ef8988800a194 | Python | NirmaySriharsha/ReinforcementLearning-LunarLander | /reinforce.py | UTF-8 | 5,980 | 3.1875 | 3 | [] | no_license | import torch
from torch import optim
from torch import nn
import gym
import numpy as np
class BanditEnv(gym.Env):
'''
Toy env to test your implementation
The state is fixed (bandit setup)
Action space: gym.spaces.Discrete(10)
Note that the action takes integer values
'''
def __init__(self):
self.action_space = gym.spaces.Discrete(10)
self.observation_space = gym.spaces.Box(low=np.array([-1]), high=np.array([1]), dtype=np.float32)
self.state = np.array([0])
def reset(self):
return np.array([0])
def step(self, action):
assert int(action) in self.action_space
done = True
s = np.array([0])
r = float(-(action - 7) ** 2)
info = {}
return s, r, done, info
class Reinforce:
def __init__(self, policy, env, optimizer):
self.policy = policy
self.env = env
self.optimizer = optimizer
@staticmethod
def compute_expected_cost(trajectory, gamma, baseline):
"""
Compute the expected cost of this episode for gradient backprop
DO NOT change its method signature
:param trajectory: a list of 3-tuple of (reward: Float, policy_output_probs: torch.Tensor, action: Int)
NOTE: policy_output_probs will have a grad_fn, i.e., it's able to backpropagate gradients from your computed cost
:param gamma: gamma
:param baseline: a simple running mean baseline to be subtracted from the total discounted returns
:return: a 2-tuple of torch.tensor([cost]) of this episode that allows backprop and updated baseline
"""
cost = 0
rewards, probs, actions = list(zip(*trajectory))
T = len(rewards)
#Computing the G_t
discounted_reward = 0
G = []
for t in reversed(range(T)):
discounted_reward = rewards[t] + gamma*discounted_reward
G.insert(0, discounted_reward)
G = torch.FloatTensor(G)
#Baseline nonsense that I'll do later.
#p = 0.9
#G = (G - baseline)/torch.std(G, unbiased = False)
#baseline = p*baseline + (1-p)*torch.mean(G)
#Final cost functions.
cost = 0
for t in range(T):
cost = cost - G[t]*torch.log(probs[t][actions[t]])
return cost, baseline
def train(self, num_episodes, gamma):
"""
train the policy using REINFORCE for specified number of episodes
:param num_episodes: number of episodes to train for
:param gamma: gamma
:return: self
"""
baseline = 0
total_reward_per_episode = []
trajectory_lengths = []
running_average_reward = 0
running_average_cost = 0
for episode_i in range(num_episodes):
self.optimizer.zero_grad()
trajectory, trajectory_length, total_reward = self.generate_episode()
loss, baseline = self.compute_expected_cost(trajectory, gamma, baseline)
loss.backward()
self.optimizer.step()
total_reward_per_episode.append(total_reward)
print(episode_i)
if episode_i%200 == 0 and episode_i!=0:
running_average_reward = np.sum(total_reward_per_episode[episode_i - 200:episode_i])/episode_i
print("Episode: %d Reward: %5d " % (episode_i, total_reward))
#iterate over episodes to get costs and use gradient descent to minimize them. Also print out the progress. Need to figure out if you're calculating the right rewards/costs.
if episode_i%200 == 0 and episode_i!=0:
torch.save(self.policy.state_dict(), 'mypolicy.pth')
#print("Checkpoint created at Episode %d" % (episode_i))
return self
def generate_episode(self):
"""
run the environment for 1 episode
NOTE: do not limit the number
:return: whatever you need for training
"""
### YOUR CODE HERE AND REMOVE `pass` below ###
state = torch.FloatTensor(self.env.reset())
total_reward = 0
trajectory = []
while True:
probs = self.policy.forward(state)
action = torch.distributions.Categorical(probs).sample()
state, reward, finished, __ = env.step(action.item())
total_reward = total_reward + reward
state = torch.FloatTensor(state)
trajectory.append([reward, probs, action.item()])
if finished:
break
#if len(trajectory) == 5:
# break
trajectory_length = len(trajectory)
return trajectory, trajectory_length, total_reward
# Do NOT change the name of the class.
# This class should contain your policy model architecture.
# Please make sure we can load your model with:
# policy = MyPolicy()
# policy.load_state_dict(torch.load("mypolicy.pth"))
# This means you must give default values to all parameters you may wish to set, such as output size.
class MyPolicy(nn.Module):
def __init__(self):
super(MyPolicy, self).__init__()
self.net_stack = nn.Sequential(
nn.Linear(8, 25),
nn.ReLU(),
nn.Linear(25, 16),
nn.ReLU(),
nn.Linear(16, 16),
nn.ReLU(),
nn.Linear(16, 4),
nn.Softmax(dim = 0),
)
def kaim_init(m):
if type(m) == nn.Linear:
torch.nn.init.kaiming_uniform_(m.weight)
self.net_stack.apply(kaim_init)
def forward(self, x):
### YOUR CODE HERE AND REMOVE `pass` below ###
result = self.net_stack(x)
return result
if __name__ == '__main__':
policy = MyPolicy()
optimizer = optim.Adam(policy.parameters(), lr = 0.0001)
env = gym.make('LunarLander-v2')
learner = Reinforce(policy, env, optimizer)
learner.train(10000, 0.99)
torch.save(model.state_dict(), 'mypolicy.pth') | true |
e43a086cbbb6d0ed399e1c35492e7a45a0210418 | Python | Yakub-B/PR-DDB | /L1_OOP/models.py | UTF-8 | 380 | 3.03125 | 3 | [] | no_license | from dataclasses import dataclass, asdict
@dataclass
class UserModel:
pk: int
email: str
first_name: str
last_name: str
@property
def full_name(self) -> str:
return f'{self.first_name} {self.last_name}'
@property
def as_dict(self) -> dict:
return asdict(self)
def __str__(self) -> str:
return f'User: {self.email}'
| true |
5589d056bf4c58c08d556719e2aa5511dad6e63d | Python | marcinpgit/Python_days | /day11/metody_klas4.py | UTF-8 | 1,375 | 3.640625 | 4 | [] | no_license | class Pracownik(object):
roczna_podwyzka = 5
ilosc_pracownikow = 0
def __init__(self, imie, stanowisko):
self.imie = imie
self.stanowisko = stanowisko
self.wynagrodzenie = None
Pracownik.ilosc_pracownikow += 1
def ustaw_pensje(self, kwota):
if kwota > 10000:
self.wynagrodzenie = 10000
else:
self.wynagrodzenie = kwota
def daj_podwyzke(self):
self.wynagrodzenie += self.wynagrodzenie * (self.roczna_podwyzka / 100)
def __del__(self):
Pracownik.ilosc_pracownikow -= 1
def __str__(self):
return"{} - {} ma wynagrodzenie: {}".format(self.imie, self.stanowisko, self.wynagrodzenie)
@classmethod
def ustaw_roczna_podwyzke(cls, ilosc_p_proc):
cls.roczna_podwyzka = ilosc_p_proc
@classmethod
def pracownik_wynagr(cls, imie, stanowisko, pensja):
"""Alternatywny inicjalizator"""
prac = Pracownik(imie, stanowisko)
prac.ustaw_pensje(pensja)
return prac
@staticmethod
def sprawdz_pesel(pesel):
if len(str(pesel)) != 11:
print("Pesel nieprawidłowy")
Pracownik.sprawdz_pesel(1123456789101112)
prac1 = Pracownik("adam", "kowal")
prac2 = Pracownik.pracownik_wynagr("jakub", "spawacz", 5000)
print(prac1)
print(prac2)
| true |
7586cb6694df0b15990b73da7734dfa1d5b6f0d6 | Python | raufmca/pythonCodes | /leadingzeros.py | UTF-8 | 513 | 4.0625 | 4 | [] | no_license | # leadingzeros
# Request input from the user
num = int ( input ( 'Enter the number between 0 - 9999 : ' ) )
if num < 0:
num = 0
if num > 9999:
num = 9999
print ( end=' [ ' )
# Extract and print thousands-place digit
digit = num // 1000 # extract 1000 place number
print ( digit, end='')
num %= 1000
digit = num // 100 # extract 100 place number
print ( digit, end='')
num %= 100
digit = num // 10 # extract 1000 place number
print ( digit, end='')
num %= 10
print (num, end='')
print(' ] ')
| true |
e5029462714ef9e8e1c5d96acab06a15459570f0 | Python | wonggamggik/algorithm_solving | /dongbin_book/chap7_binary_search/iterative_binary_search.py | UTF-8 | 763 | 3.96875 | 4 | [] | no_license | """
# Input Data 1
7
1 3 5 7 9 11 13 15 17 19
# Output 1
4
# Input Data 2
7
1 3 5 6 9 11 13 15 17 19
# Output 2
Cannot find 7 in list
"""
import sys
readline = lambda: sys.stdin.readline().rstrip()
def binary_search(array, target, start, end):
while start <= end:
mid = (start + end) // 2
if array[mid] == target:
return mid
elif array[mid] > target:
end = mid - 1
else:
start = mid + 1
return None
if __name__ == '__main__':
target = int(readline())
array = list(map(int, readline().split()))
result = binary_search(array, target, 0, len(array) - 1)
if result == None:
print('Cannot find {} in list'.format(target))
else:
print(result + 1) | true |
fccbd2b2c54494dbd9da41dde28ccd85668a9729 | Python | teacupfull/python_system | /programming/ex1.py | UTF-8 | 115 | 3.5 | 4 | [] | no_license | x = 34 - 23
y = "Hello"
z = 3.45
if z == 3.45 or y == "Hello":
x = x + 1
y = y + " World"
print (x)
print (y)
| true |
961716580451524186426a87171683b937116ebe | Python | HashCodeINSA/hashcode2016 | /simu.py | UTF-8 | 2,151 | 2.625 | 3 | [] | no_license | from drone import DRONE_STATUS
#
# DM = DroneManager
# OM = OrderManager
# WM = WarehouseManager
#
def load_drone(drone, order, WM):
item_id, item_qty = order.item_left() # recuperation de l'item restant
# tant que le drone n'est pas plein ou que la commande n'est pas complète
while item_id is not None and not drone.is_full(item_id, item_qty):
# si on a au moins une une warehouse qui possède l'item
warehouse = WM.get_closest_having(item_id, item_qty, drone)
if warehouse is not None:
# on demande au drone de charger
drone.load(warehouse, item_id, item_qty, order)
else: # aucune warehouse ne contient cet item
# on abandonne la commande doit être abandonnée
order.cancel()
drone.status = DRONE_STATUS.AVAILABLE
break
# on recupere le dernier item
item_id, item_qty = order.item_left() # recuperation de l'item restant
# quand on arrive là, la commande est abandonnée OU la commande est chargée OU le drone est plein
# si le drone est plein OU la commande est complètement chargée
if item_id is None or drone.is_full(item_id, item_qty):
# on dit au drone de faire la livraison
drone.deliver_all(order) # , item_id, item_qty)
def update_drones(DM, OM, WM):
# tant qu'on a un drone disponible
drone = DM.next_available_drone()
max_it = 100
i = 0
while drone is not None:
# si une commande non traitée subsiste
order = OM.next_unhandled_order()
if not order is None:
load_drone(drone, order, WM)
else: # si aucune commande existe
# le drone attend
break#drone.wait()
# on va chercher le prochain drone disponible
drone = DM.next_available_drone()
i += 1
def simu(DM, OM, WM):
# tant qu'on a pas atteint la fin de la simulation
while not DM.simu_finished():
# on met a jour les drone en donnant des ordres si necessaire
update_drones(DM, OM, WM)
# on fait avancer la simulation d'un pas
DM.next_turn()
| true |
624bb95440202df6934ec3011d2e14214b0b2456 | Python | 17craigiec/Connect_Four_AI | /Bomberman/group02/testcharacter.py | UTF-8 | 9,761 | 3.390625 | 3 | [] | no_license | # This is necessary to find the main code
import sys
sys.path.insert(0, '../bomberman')
# Import necessary stuff
from entity import CharacterEntity
from colorama import Fore, Back
class TestCharacter(CharacterEntity):
char = CharacterEntity
char_x = 0
char_y = 0
def do(self, wrld):
# Your code here
# print("I am Located at ("+str(self.char_x)+", "+str(self.char_y)+")")
print("\n\n")
# next_move = self.nextMoveToExit(wrld)
next_move = self.nextMoveHeuristic(wrld)
self.moveChar(wrld, next_move[0], next_move[1])
print("Monsters Located at: "+str(self.locateMonsters(wrld)))
print("distance to EXIT is: "+str(self.distanceToExit(wrld, self.char_x, self.char_y)))
def nextMoveHeuristic(self, wlrd):
best_option = ((0,0), -1)
for neigh in self.getNeighbors(wlrd, self.char_x, self.char_y):
h = self.calcHeuristic(wlrd, neigh[0], neigh[1])
if h > best_option[1]:
best_option = (neigh, h)
return (best_option[0][0]-self.char_x, best_option[0][1]-self.char_y)
def calcHeuristic(self, wrld, x, y):
monsters = self.distanceToMonsters(wrld, x, y)
if monsters:
dist_from_monsters = 0
else:
dist_from_monsters = 1
for m in monsters:
dist_from_monsters += m[1]
print(-20/dist_from_monsters)
return (-10/dist_from_monsters)+(100-self.distanceToExit(wrld, x, y))
def nextMoveToExit(self, wrld):
# Simple BFS to poll the exit
q = [(self.char_x, self.char_y)]
visited = []
path = {}
while q:
cur = q.pop(0)
visited.append(cur)
if wrld.exit_at(cur[0],cur[1]):
print("EXIT FOUND: (" + str(cur[0])+", "+str(cur[1])+")")
# Find next movement
tmp = cur
while path[tmp] != (self.char_x, self.char_y):
tmp = path[tmp]
return (tmp[0]-self.char_x, tmp[1]-self.char_y)
for neigh in self.getNeighbors(wrld, cur[0], cur[1]):
if neigh not in visited and neigh not in q:
q.append(neigh)
path[neigh] = cur
print("!!! No path to EXIT detected !!!")
return (0,0)
def distanceToExit(self, wrld, x, y):
# Simple BFS to poll the exit
q = [(x, y)]
visited = []
path = {}
while q:
cur = q.pop(0)
visited.append(cur)
if wrld.exit_at(cur[0],cur[1]):
# Find next movement
dist = 1
tmp = cur
while tmp != (x, y):
dist = dist + 1
tmp = path[tmp]
return dist
for neigh in self.getNeighbors(wrld, cur[0], cur[1]):
if neigh not in visited and neigh not in q:
q.append(neigh)
path[neigh] = cur
print("!!! No path to EXIT detected !!!")
return -1
def locateMonsters(self, wrld):
# Simple BFS to poll the monsters
monsters = []
q = [(self.char_x, self.char_y)]
visited = []
path = {}
while q:
cur = q.pop(0)
visited.append(cur)
# If cur holds a monster log its location
if wrld.monsters_at(cur[0], cur[1]) != None:
dist = 1
tmp = cur
while tmp != (self.char_x, self.char_y):
dist = dist + 1
tmp = path[tmp]
monsters.append( ((cur[0], cur[1]), dist) )
# break early if both monsters are found
if len(monsters) >= 2:
return monsters
for neigh in self.getNeighbors(wrld, cur[0], cur[1]):
if neigh not in visited and neigh not in q:
q.append(neigh)
path[neigh] = cur
return monsters
def distanceToMonsters(self, wrld, x, y):
# Simple BFS to poll the monsters
monsters = []
q = [(x, y)]
visited = []
path = {}
while q:
cur = q.pop(0)
visited.append(cur)
# If cur holds a monster log its location
if wrld.monsters_at(cur[0], cur[1]) != None:
dist = 1
tmp = cur
while tmp != (x, y):
dist = dist + 1
tmp = path[tmp]
monsters.append( ((cur[0], cur[1]), dist) )
# break early if both monsters are found
if len(monsters) >= 2:
return monsters
for neigh in self.getNeighbors(wrld, cur[0], cur[1]):
if neigh not in visited and neigh not in q:
q.append(neigh)
path[neigh] = cur
return monsters
def isCoordinateValid(self, wrld, x, y):
is_valid = True
# Bounds check
if x >= wrld.width() or x < 0:
return False
if y >= wrld.height() or y < 0:
return False
if wrld.wall_at(x, y):
is_valid = False
return is_valid
def getNeighbors(self, wrld, x, y):
offsets = [-1, 0, 1]
neighbors = []
for dx in offsets:
for dy in offsets:
# dx,dy == 0,0 is the searchng node
if not (dx == 0 and dy == 0):
if self.isCoordinateValid(wrld, x+dx, y+dy):
neighbors.append((x+dx, y+dy))
return neighbors
def moveChar(self, wrld, dx, dy):
# X check
if dx > 1:
dx = 1
elif dx < -1:
dx = -1
# Y check
if dy > 1:
dy = 1
elif dy < -1:
dy = -1
# Bounds check
if not self.isCoordinateValid(wrld, self.char_x+dx, self.char_y+dy):
print("Invalid Move Detected")
dx = 0
dy = 0
self.char_x = self.char_x + dx
self.char_y = self.char_y + dy
print("I am now at ("+str(self.char_x)+", "+str(self.char_y)+")")
self.move(dx, dy)
def moveSouth2(self, wrld):
# Simple BFS move to a south most point
q = [(self.char_x, self.char_y)]
visited = []
path = {}
while q:
cur = q.pop(0)
visited.append(cur)
for neigh in self.getNeighbors(wrld, cur[0], cur[1]):
if neigh not in visited and neigh not in q:
q.append(neigh)
path[neigh] = cur
# Find next movement
south_most = (0, 0)
for v in visited:
if v[1] > south_most[1]:
south_most = v
print("SOUTH MOST: " + str(south_most))
if south_most[1] > self.char_y:
tmp = south_most
while path[tmp] != (self.char_x, self.char_y):
tmp = path[tmp]
return (tmp[0] - self.char_x, tmp[1] - self.char_y)
else:
return (0, 0)
# def __ge__(self, other):
# if other > self:
def findWalls(self, wrld):
# Simple BFS to poll the walls
walls = []
q = [(self.char_x, self.char_y)]
visited = []
path = {}
while q:
current = q.pop(0)
visited.append(current)
# If cur holds a wall log its location
if wrld.walls_at(current[0], current[1]) != None:
dist = 1
tmp = current
while tmp != (self.char_x, self.char_y):
dist = dist + 1
tmp = path[tmp]
walls.append(((current[0], current[1]), dist))
# break early if both walls are found
if len(walls) >= 4:
return walls
for neigh in self.getNeighbors(wrld, current[0], current[1]):
if neigh not in visited and neigh not in q:
q.append(neigh)
path[neigh] = current
return monsters
def moveThruWall(self, wrld):
# find all the walls based on current location
walls = self.findWalls(wrld)
# identify wall closest to exit - valid moveable point, lowest y val (south)
wallToBomb = []
for wall in walls:
if walls[wall].y > wallToBomb[1]:
wallToBomb = [wall.x, wall.y]
# move to said wall
self.move(wallToBomb[0], wallToBomb[1])
# drop bomb against wall
self.char.drop_bomb()
# move diagonally while bomb explodes
self.move(x - 1, y + 1)
# wait for explosion - change to bomb time
while wrld.explosion_at(self.char_x, self.char_y):
# don't move
wait
# move back to bomb space
self.move(x + 1, y - 1)
nextMove = self.moveSouth2(wrld)
self.move(nextMove[0], nextMove[1])
return
def moveThruWall2(self, wrld):
# identify wall closest to exit - valid moveable point, lowest y val (south)
moveToWall = self.moveSouth2(wrld)
# move to said wall
self.move(moveToWall[0], wallToWall[1])
# drop bomb against wall
self.char.drop_bomb()
# move diagonally while bomb explodes
self.move(x - 1, y + 1)
# wait for explosion - change to bomb time
while wrld.explosion_at(moveToWall[0], moveToWall[1]):
# don't move
wait
# move back to bomb space
self.move(x + 1, y - 1)
return
| true |
93af56a6439b3027ddbd7f4d24aa632f095e0043 | Python | Pandani07/Scripting-Languages-Lab | /Assignment7/Odd number range/python.py | UTF-8 | 223 | 3.734375 | 4 | [] | no_license | def OddRange(num1,num2):
list1=[]
for n in range(num1,num2+1):
if n%2!=0:
list1.append(n)
print(list1)
a=int(input("Enter the lower base"))
b=int(input("Enter the upper base"))
OddRange(a,b)
| true |
d948bd63136e06a6e9c6edb97c81404f264b7dcd | Python | AndreaSalmaso/segnalazioniProduzioneGUI | /segnalazioni_prod_GUI.py | UTF-8 | 11,068 | 2.609375 | 3 | [] | no_license | from tkinter import *
from tkinter import ttk, font
import ctypes, center_tk_window, keyboard, math
import pandas as pd
import mytkinter as mytk
import excel_handler as eh
from popup import warning_msg
class InserimentoSegnalazioniGUI:
def __init__(self, master, main_color, path_excel):
self.master = master
self.main_color = main_color
self.path_excel = path_excel
# ***************************************************
# SEZIONE SUPERIORE (INSERIMENTO DATI)
# ***************************************************
self.root0 = Frame(self.master, bg=self.main_color)
self.root0.pack(pady=20)
# ID OPERATORE
self.frm0 = Frame(self.root0, bg=self.main_color)
self.frm0.pack(padx=10, pady=10, fill=BOTH)
mytk.create_label(self.frm0, 'ID operatore', 12)
self.opID = StringVar()
self.opID_ent = mytk.create_entry(self.frm0, self.opID, 7)
self.opID_ent.pack()
self.opID_ent.focus()
#---------------------------------------------
self.root1 = Frame(self.master, bg=self.main_color)
self.root1.pack()
#----- LATO SX --------------------------------------
self.container1 = Frame(self.root1, bg=self.main_color)
self.container1.pack(side=LEFT, anchor=NW, padx=30, pady=15)
# ORDINE DI PRODUZIONE
self.frm1 = Frame(self.container1, bg=self.main_color)
self.frm1.pack(padx=10, pady=20, fill=BOTH)
mytk.create_label(self.frm1, 'Ordine di produzione')
self.ordine = StringVar()
self.ordine_ent = mytk.create_entry(self.frm1, self.ordine, 9)
self.ordine_ent.bind('<FocusOut>', mytk.to_uppercase)
self.ordine_ent.pack(side=LEFT)
# CODICE ARTICOLO
self.frm2 = Frame(self.container1, bg=self.main_color)
self.frm2.pack(padx=10, pady=20, fill=BOTH)
mytk.create_label(self.frm2, 'Codice articolo')
self.codice = StringVar()
self.codice_ent = mytk.create_entry(self.frm2, self.codice, 20)
self.codice_ent.bind('<KeyRelease>', self.update_listbox)
self.codice_ent.pack(side=LEFT)
# LISTA CODICI ARTICOLO
self.frm3 = Frame(self.container1, bg=self.main_color)
self.frm3.pack(padx=10, pady=20, fill=BOTH)
self.frm31 = Frame(self.frm3)
self.frm31.pack(side=LEFT)
mytk.create_label(self.frm31)
self.frm32 = Frame(self.frm3, bg='white')
self.frm32.pack(side=LEFT, fill=BOTH)
self.lb = Listbox(self.frm32, bd=0, highlightthickness=0, highlightbackground='white', takefocus=0, selectmode=SINGLE)
self.scrollb = Scrollbar(self.frm32, width=20, command=self.lb.yview)
self.lb.config(yscrollcommand=self.scrollb.set)
self.lb.bind('<Double-Button-1>', self.enter_item_code)
self.scrollb.pack(side=RIGHT, fill=Y)
self.lb.pack(padx=15, pady=15, fill=BOTH, expand=True)
# add products code into the listbox
self.item_list = eh.get_item_codes(self.path_excel)
self.fill_listbox(self.item_list)
# -------------------------------------------------------
#----- LATO DX ---------------------------------------
self.container2 = Frame(self.root1, bg=self.main_color)
self.container2.pack(side=LEFT, anchor=NW, padx=30, pady=15, fill=X)
# OGGETTO DELLA RILEVAZIONE
self.frm5 = Frame(self.container2, bg=self.main_color)
self.frm5.pack( padx=10, pady=20, anchor=NW, fill=BOTH)
mytk.create_label(self.frm5, 'Oggetto della rilevazione')
self.ogg_rilev = ttk.Combobox(self.frm5, width=15, values=['prodotto finito', 'semilavorato', 'altro'])
self.ogg_rilev.pack(side=LEFT, padx=50)
mytk.create_label(self.frm5, '', width=10)
# NUMERO SERIALE
self.frm6 = Frame(self.container2, bg=self.main_color)
self.frm6.pack(padx=10, pady=20, fill=BOTH)
mytk.create_label(self.frm6, 'Numero seriale', 15)
self.seriale = StringVar()
self.seriale_ent = mytk.create_entry(self.frm6, self.seriale)
self.seriale_ent.bind('<FocusOut>', mytk.to_uppercase)
self.seriale_ent.pack(side=LEFT)
self.checkVar = IntVar()
mytk.create_label(self.frm6, '', 5)
Checkbutton(self.frm6, var=self.checkVar, bg=self.main_color, activebackground=self.main_color, takefocus=0, relief=FLAT).pack(side=LEFT, padx=5)
self.chkbtn_lbl = Label(self.frm6, text='Seriale non disponibile', anchor='w', fg='white', bg=self.main_color, font=('ubuntu', 12))
self.chkbtn_lbl.pack(side=LEFT)
# DESCRIZIONE DEL PROBLEMA
self.frm7 = Frame(self.container2, bg=self.main_color)
self.frm7.pack(padx=10, pady=4, fill=BOTH)
mytk.create_label(self.frm7, 'Descrizione')
self.frm8 = Frame(self.container2, bg='white')
self.frm8.pack(padx=10, pady=10, fill=BOTH)
self.descr = Text(self.frm8, height=9, width=63, bd=0)
self.descr.pack(side=LEFT, padx=15, pady=15)
# -------------------------------------------------------
# ***************************************************
# SEZIONE INFERIORE (TASTI)
# ***************************************************
self.root2 = Frame(self.master, bg=self.main_color)
self.root2.pack(padx=250, fill=X)
self.canc_btn = mytk.create_img_button(self.root2, 'media/button_cancella-tutto.png', 'media/button_cancella-tutto_hov.png', self.clear_all)
self.canc_btn.pack(side=RIGHT, padx=10)
self.agg_segn_btn = mytk.create_img_button(self.root2, 'media/button_aggiungi-segnalazione.png', 'media/button_aggiungi-segnalazione_hov.png', self.add_report)
self.agg_segn_btn.pack(side=RIGHT)
# ***************************************************
# SEZIONE INFERIORE (EXCEL RECORDS)
# ***************************************************
self.root3 = Frame(self.master, bg=self.main_color)
self.root3.pack(padx=250, pady=30, fill=X)
self.root4 = Frame(self.master, bg=self.main_color)
self.root4.pack()
self.df = pd.read_excel(self.path_excel)
self.records_frm = Frame(self.root4, bg=self.main_color)
self.records_frm.pack(padx=20)
self.tree_font = font.Font(family='ubuntu', size=12)
self.last_records = ttk.Treeview(self.records_frm, height=7)
ttk.Style().configure('Treeview', rowheight=30, font=('ubuntu', 12))
ttk.Style().configure('Treeview.Heading', font=('ubuntu', 12, 'bold'))
self.column_list = list(self.df.columns)
self.last_records['column'] = self.column_list
# format columns
for i, col in enumerate(self.column_list):
if i == 1 or i == 3:
width = self.tree_font.measure(col) + 70
elif i == 4:
width = 400
else:
width = self.tree_font.measure(col) + 30
self.last_records.column(col, width=width, minwidth=width, anchor=CENTER)
# specify the labels to be displayed (thanks to the next
# definition of the headings, the #0 column won't be showned)
self.last_records['show'] = 'headings'
# define the headings
for column in self.last_records['column']:
self.last_records.heading(column, text=column, anchor=CENTER)
self.show_last_records()
self.last_records.pack()
center_tk_window.center_on_screen(self.master)
self.update_treeview()
# refresh the treeview to show possible new entries from other operators
def update_treeview(self):
self.master.after(20000, self.show_last_records)
def show_last_records(self):
# clear treeview
self.last_records.delete(*self.last_records.get_children())
self.df = pd.read_excel(self.path_excel)
df_rows = self.df.to_numpy().tolist()
# loop through rows
for row in df_rows[-7:]:
# troncate problem description if too long
probl_maxlen = 50
if len(row[4]) > probl_maxlen:
i = 0
while True:
if row[4][probl_maxlen - i] == ' ':
row[4] = row[4][:probl_maxlen - i] + ' [...]'
break
else:
i -= 1
row[4] = row[4].capitalize() # capitalize the 1° letter in the problem description
self.last_records.insert('', 'end', values=row)
def add_report(self):
d = self.descr.get('1.0',END)[:-1]
if self.seriale.get()=='' and self.checkVar.get()==0:
win = Toplevel(self.master)
msg = 'Il numero seriale non è stato inserito!'
warning_msg(win, msg)
elif self.ordine.get()=='' or self.codice.get()=='' or self.ogg_rilev.get()=='' or d=='' or self.opID.get()=='':
win = Toplevel(self.master)
msg = 'Tutti i campi devono essere riempiti!'
warning_msg(win, msg)
else:
d = self.descr.get('1.0',END)[:-1]
if self.checkVar.get() == 1:
s = 'Non disponibile'
else:
s = self.seriale.get()
report_data = [self.ordine.get(), self.codice.get(), self.ogg_rilev.get(), s, d, self.opID.get()]
eh.add_data_to_sheet(self.path_excel, report_data)
self.clear_all()
self.show_last_records()
def clear_all(self):
self.ordine.set('')
self.codice.set('')
self.ogg_rilev.set('')
self.seriale.set('')
self.checkVar.set(0)
self.descr.delete('1.0', END)
self.fill_listbox(self.item_list)
self.ordine_ent.focus()
def fill_listbox(self, items):
self.lb.delete(0, END)
for item in items:
self.lb.insert(END, item)
def enter_item_code(self, event):
self.codice_ent.delete(0, END)
self.codice_ent.insert(0, self.lb.get(self.lb.curselection()[0]))
def update_listbox(self, event):
partial = self.codice.get().upper()
new_list = []
for item in eh.get_item_codes(self.path_excel):
if partial in item:
new_list.append(item)
self.lb.delete(0, END)
self.fill_listbox(new_list)
def main():
fileExcel_PATH = 'Dati/SegnalazioniProduzione.xlsx'
BG_COLOR = '#545451'
root = Tk()
myfont = font.Font(family='ubuntu', size=16)
root.title('Segnalazioni Produzione')
root.iconbitmap(root, 'media/sheet.ico')
root.option_add('*font', myfont)
root['bg'] = BG_COLOR
app = InserimentoSegnalazioniGUI(root, BG_COLOR, fileExcel_PATH)
# maximize window at opening
keyboard.press('alt+space+n')
keyboard.release('alt+space+n')
ctypes.windll.shcore.SetProcessDpiAwareness(1)
root.mainloop()
if __name__ == "__main__":
main() | true |
d624344c7e2f1ccfeef37475a5b7a76bb795257c | Python | JakeStubbs4/CMPE-365 | /Assignment 1/connecting_flights.py | UTF-8 | 5,824 | 3.8125 | 4 | [] | no_license | # CMPE 365 Week 2 Lab Problem: Connecting Flights
# Jake Stubbs
# 20005204
# I certify that this submission contains my own work, except as noted.
import sys
INFINITY = sys.maxsize
# Prompts user for flight schedule input in the form of a text file.
def readInput():
filename = input("Enter a file name representing a flight schedule, including the file type: ")
data = []
with open(filename, "r") as fileIn:
# Parse each line as a list of seperated integers and append to a list of all data.
for line in fileIn:
data.append(list(map(int, line.split())))
return data
# Display results given the path that was determined using getPath().
def displayResult(path_array, cost_array, starting_city, destination_city):
print(f"The optimal route from {starting_city} to {destination_city} is:")
for i in range(len(path_array)-1):
print(f"Fly from {path_array[i]} to {path_array[i + 1]}")
print(f"Arriving at {destination_city} at time {cost_array[destination_city]}")
# Returns the path by tracing through the predecessor_array from the destination city to the starting city.
def getPath(predecessor_array, starting_city, destination_city):
current_city = destination_city
path = [current_city]
while (current_city != starting_city):
path.insert(0, predecessor_array[current_city])
current_city = predecessor_array[current_city]
return path
# Dijkstra's algorithm based on the sudo-code provided in the week 1 lab. This version is modified to allow multiple paths between any two nodes.
def dijkstrasAlgorithm(num_vertices, starting_city, weights):
# Initialize arrays to their starting positions setting the appropriate starting city for the process.
cost = [None for i in range(num_vertices)]
cost[starting_city] = 0
reached = [False for i in range(num_vertices)]
reached[starting_city] = True
estimate = [INFINITY for i in range(num_vertices)]
estimate[starting_city] = 0
candidates = [False for i in range(num_vertices)]
predecessor = [None for i in range(num_vertices)]
# Update candidates, estimates, and predecessors based on the starting position and its available paths.
for i in range(num_vertices):
if (weights[starting_city][i][0] != [0,0]):
for flight in weights[starting_city][i]:
if flight[1] < estimate[i]:
estimate[i] = flight[1]
candidates[i] = True
predecessor[i] = starting_city
# Iterate through all possible nodes to ensure that the optimal cost for each node was calculated.
for count in range(num_vertices):
best_candidate_estimate = INFINITY
# Choose current node and best candidate estimate to proceed with process.
for i in range(num_vertices):
if (candidates[i] == True) and (estimate[i] < best_candidate_estimate):
current_node = i
best_candidate_estimate = estimate[i]
cost[current_node] = estimate[current_node]
reached[current_node] = True
candidates[current_node] = False
# Iterate through each node and update variables accordingly.
for j in range(num_vertices):
# Check that the flight path exists (greater than 0) and that it has not yet been reached.
if ((weights[current_node][j][0][1] > 0) and (reached[j] == False)):
for flight in weights[current_node][j]:
# Check that the flight arrives after the previous flight has landed, that the flight path exists, and that the estimate is better than the best_current_estimate.
if ((flight[0] > cost[current_node]) and (flight[1] > 0) and (flight[1] < estimate[j])):
estimate[j] = flight[1]
candidates[j] = True
predecessor[j] = current_node
return cost, predecessor
def Main():
# Read data for input file and parse the given data into an appropriate data structure.
data = readInput()
num_vertices = data[0][0]
data.pop(0)
# The data will be stored in a 2-dimensional list of lists of 2-tuples, ie. [[departure_time_1, arrival_time_1], .., [departure_time_n, arrival_time_n]]
# In this 2-dimensional list, the ith row and jth column represents a list of flights from city i to city j.
weights = [[[[0,0]] for i in range(num_vertices)] for j in range(num_vertices)]
for flight in data:
if(weights[flight[0]][flight[1]][0] == [0,0]):
weights[flight[0]][flight[1]][0] = [flight[2], flight[3]]
else:
weights[flight[0]][flight[1]].append([flight[2],flight[3]])
# Prompt user for input to determine starting and destination city.
starting_city = int(input(f"Enter an integer greater than or equal to 0 and less than {num_vertices} representing a starting city: "))
destination_city = int(input(f"Enter an integer greater than or equal to 0 and less than {num_vertices} representing a destination city: "))
cost_array = [0]*num_vertices
predecessor_array = [None]*num_vertices
cost_array, predecessor_array = dijkstrasAlgorithm(num_vertices, starting_city, weights)
# Display output according to user's input. If there is no available path, display this fact.
if(starting_city == destination_city):
print("Please enter a destination city other than the starting city.")
elif(cost_array[destination_city] == None):
print(f"There is no valid route from {starting_city} to {destination_city}")
else:
path_array = getPath(predecessor_array, starting_city, destination_city)
displayResult(path_array, cost_array, starting_city, destination_city)
# Call Main funtion to initialize process:
Main() | true |
9012f8360cba879e2f466b903eba194ac5b9de2e | Python | saki45/CodingTest | /py/CLRS1_9/findNearestKth.py | UTF-8 | 1,112 | 3.609375 | 4 | [] | no_license | from findKth import findKth
def findNearestKth(a, n, k):
# This method returns the nearest k elements along with the nth largest element
if n >= len(a):
print('illegal n')
return None
p, v = findKth(a, n)
print(v)
if k == 0:
return v
if k < 0:
k = -k
# p -- the kth nearest element to a[n]
# a[:p+1] all the needed (2k+1) elements including a[n]
p = findKthNear(a, v, 2*k+1)
return a[:p+1]
def findKthNear(a, v, k):
st, ed, ck = 0, len(a)-1, k
while st <= ed:
if st == ed:
return st
# compares the difference of each elements and v
p = partitionNearest(a, v, st, ed)
if p == ck-1:
return p
elif p > ck-1:
ed = p-1
else:
st = p+1
def partitionNearest(a, v, st, ed):
p, q = st+1, st+1
d0 = abs(a[st] - v)
while q <= ed:
if abs(a[q]-v) >= d0:
q += 1
else:
a[p], a[q] = a[q], a[p]
p += 1
q += 1
a[st], a[p-1] = a[p-1], a[st]
return p-1
if __name__ == '__main__':
import random
N = 12
n = 6
k = 2
a = [int(random.uniform(0, 4*N)) for i in range(0, N)]
print(a)
b = findNearestKth(a, n, k)
print('ours:',b)
a.sort()
print(a)
| true |
532dffe85d680dcee8dbbcd7adb6b0a66201ca63 | Python | INKWWW/NLP | /preprocess_server.py | UTF-8 | 4,628 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''直接匹配算法'''
import os
import copy
import jieba
import csv
dirname = print(os.getcwd())
# 加载停词
def getStopwords(filepath):
'''训练模型的时候不用加载停词文件表,直接使用只去除标点符号的停词'''
with open(filepath, 'r', encoding='utf-8') as f:
words = f.read()
# print(type(words)) # str
print(words.split('\n'))
return words.split('\n')
def processSen(test_sentence_1, test_sentence_2, stopwords):
for stop in stopwords:
test_sentence_1 = test_sentence_1.replace(stop, '')
test_sentence_2 = test_sentence_2.replace(stop, '')
sen_1_parser = jieba.lcut(test_sentence_1)
sen_1_parser = [item for item in sen_1_parser if item not in stopwords]
sen_2_parser = jieba.lcut(test_sentence_2)
sen_2_parser = [item for item in sen_2_parser if item not in stopwords]
if len(test_sentence_1) > len(test_sentence_2):
longer_sen = sen_1_parser
shorter_sen = sen_2_parser
elif len(test_sentence_1) < len(test_sentence_2):
longer_sen = sen_2_parser
shorter_sen = sen_1_parser
else:
longer_sen = sen_1_parser
shorter_sen = sen_2_parser
return (shorter_sen, longer_sen)
def compare(shorter_sen, longer_sen):
shorter_sen_copy = copy.deepcopy(shorter_sen)
longer_sen_copy = copy.deepcopy(longer_sen)
for item in shorter_sen:
if item in longer_sen:
indexs_s = [i for i, v in enumerate(shorter_sen_copy) if v==item]
indexs_l = [i for i, v in enumerate(longer_sen_copy) if v==item]
for index in indexs_s:
shorter_sen_copy.pop(index)
for index in indexs_l:
longer_sen_copy.pop(index)
else:
continue
if len(shorter_sen_copy) == 0:
# print('@-----same-----')
return True
else:
for item in shorter_sen_copy:
for item_l in longer_sen_copy:
if item in item_l or item_l in item:
indexs_s = [i for i, v in enumerate(shorter_sen_copy) if v==item]
indexs_l = [i for i, v in enumerate(longer_sen_copy) if v==item_l]
for index in indexs_s:
shorter_sen_copy.pop(index)
for index in indexs_l:
longer_sen_copy.pop(index)
if len(shorter_sen_copy) == 0 or len(longer_sen_copy) == 0:
# print('@@-----same-----')
return True
# elif len(shorter_sen_copy) == 1 or len(longer_sen_copy) == 1:
# if shorter_sen.index(shorter_sen_copy[0]) == 0 or longer_sen.index(longer_sen_copy[0]) == 0:
# # print('@@@-----same-----')
# return True
else:
# print('-----may not be the same-----')
return False
def predict(input_file, out_file, stopwords):
base_name = []
input_name = []
base_result = []
predict_result = []
with open(input_file, 'r') as f:
fread = f.read()
lines = fread.split()
for line in lines:
line_split = line.split(',')
base_name.append(line_split[0])
input_name.append(line_split[1])
base_result.append(line_split[2])
length = len(input_name)
with open(out_file, 'w') as fw:
for i in range(0, length):
test_sentence_1 = base_name[i]
test_sentence_2 = input_name[i]
shorter_sen, longer_sen = processSen(test_sentence_1, test_sentence_2, stopwords)
result = compare(shorter_sen, longer_sen)
if result:
predict_result[i].append('1')
else:
predict_result[i].append('0')
content = test_sentence_1 + ',' + test_sentence_2 + ',' + base_result[i] + ',' + predict_result[i] + ',' + str(result) + '\n'
fw.write(content)
return base_result, predict_result
def main():
filepath = 'stopwords_words.txt'
stopwords = getStopwords(filepath)
# test_sentence_1 = '黑龙江红兴隆农垦民乐农业生产资料有限公司'
# # test_sentence_2 = '黑龙江红兴隆农垦民乐农业生产资料公司'
# # test_sentence_2 = '黑龙江兴隆农垦民乐生产资料有限公司'
# test_sentence_2 = '四川省红兴隆农垦民乐生产资料有限公司'
shorter_sen, longer_sen = processSen(test_sentence_1, test_sentence_2, stopwords)
compare(shorter_sen, longer_sen)
if __name__ == '__main__':
main()
| true |
5ee8d884e30afa4e091ebb9ada425c0c0a856bb2 | Python | l5d1l5/CCRCexamples | /markdown_thesis/plot_wordcount.py | UTF-8 | 1,709 | 2.53125 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
print('plotting wordcount from log_wordcount.txt...')
data = pd.read_csv('log_wordcount.txt',delim_whitespace=True,header=None, usecols=[0,1,3],
names=['date','time','words'],parse_dates=[['date','time']],index_col=[0])
estimate = pd.DataFrame(data=[0,60000],index=[pd.Timestamp('2018-01-01'),pd.Timestamp('2018-06-01')])
plt.close('all')
fig, ax = plt.subplots(nrows=1,ncols=1,figsize=(8,5))
data.plot(ax=ax,marker='+',ms=3,mec='k',title='Words in commited thesis files',legend=False)
estimate.plot(ax=ax,legend=False,color='r')
# ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2))
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%d'))
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.set_xlim(pd.Timestamp('2018-01-01'), pd.Timestamp('2018-09-01'))
xmax = data.max()[0]*1.05
ax.set_ylim(0, xmax)
# deadlines
ax.vlines(pd.Timestamp('2018-06-01'),ymin=0, ymax=xmax, color='0.6')
ax.text(pd.Timestamp('2018-06-01'),data.max()[0]*0.05, 'draft',
color='0.6',rotation=90,va='bottom',ha='right')
ax.vlines(pd.Timestamp('2018-08-01'),ymin=0, ymax=xmax, color='0.6')
ax.text(pd.Timestamp('2018-08-01'),data.max()[0]*0.05, 'final',
color='0.6',rotation=90,va='bottom',ha='right')
ax.vlines(pd.Timestamp('2018-08-31'),ymin=0, ymax=xmax, color='0.6')
ax.text(pd.Timestamp('2018-08-31'),data.max()[0]*0.05, 'deadline',
color='0.6',rotation=90,va='bottom',ha='right')
plt.xticks(ax.get_xticks(), rotation=0,ha='center')
plt.xlabel('')
plt.tight_layout()
plt.savefig('plot_wordcount.png',dpi=300)
print('done') | true |
379da3e121157f9eafbb1f2ae18cf5b11298b807 | Python | Deveshshukla4/Insta_bot | /insta_bot.py | UTF-8 | 8,385 | 3.203125 | 3 | [] | no_license | import requests # Requests library imported to perform different queries such as get , post , delete ,put
APP_ACCESS_TOKEN = "3068983250.94b2134.006490b178c24fefa74dba819dddaa1c" # access token
BASE_URL = "https://api.instagram.com/v1/"
#Function to prints the user info
def self_info():
requests_url = (BASE_URL + 'users/self/?access_token=%s') % (APP_ACCESS_TOKEN) #req to fetech the user info
my_info = requests.get(requests_url).json() #response from api
print "Name :" + str(my_info['data']['full_name'])
print "Bio :" + str(my_info['data']['bio'])
print "Followers :" + str(my_info['data']['counts']['followed_by'])
get_user_by_username()#calling function
#function to search the username and perform operations
def get_user_by_username():
user_name = raw_input("Enter Username you wants to Search OR S to Check your profile OR Q to quit : ")
requests_url = (BASE_URL + 'users/search?q=%s&access_token=%s') % (user_name , APP_ACCESS_TOKEN)
search_results = requests.get(requests_url).json()
if len(search_results['data']) :
print "User id : " + str(search_results['data'][0]['id'])
print "Full name : " + str(search_results['data'][0]['full_name'])
print "Bio : " + str(search_results['data'][0]['bio'])
response = raw_input("Enter Y to perform operations or N to continue search : ").upper()
if response == "Y":
operations(search_results['data'][0]['id'])
else:
get_user_by_username()
elif user_name == "s" or user_name == "S":
self_info()
elif user_name == "q" or user_name == "Q":
exit()
else:
print "User doesn't exists !"
get_user_by_username()
#Performing operations on selected user
def operations(user_id):
requests_url = (BASE_URL + 'users/%s/media/recent/?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
recent_posts =requests.get(requests_url).json()
post_list = ["x"]
for likes in recent_posts['data']:
post_list.append(likes['id'])
print "User_id : " + str(user_id) + " Post id: " + str(post_list.index(likes['id'])) + " likes :" + str(likes['likes']['count']) + " Comments : " + str(likes['comments']['count'])
post_id = raw_input("Enter Post Id you wan't to access OR B to go back: ")
if post_id == "b" or post_id == "B":
get_user_by_username()
else:
x = int(post_id) #converting post_id into integer
if post_list[x] not in post_list:
print "Invalid Post id !"
operations(user_id)
else:
post_id = post_list[x]
select_operation(user_id, post_id)
#Function to perform various operations
def select_operation(user_id , post_id):
opr = raw_input("Enter L to like a Post \n OR C to comment on a Post \n OR D to Delete comment \n "
"OR A to find average number of words per comment \n OR B to go back : ").upper()
if opr == "L" or opr == "l":
like_post(user_id , post_id)
elif opr == "C" or opr == "c":
comment_post(user_id , post_id)
elif opr == "B" or opr == "b":
operations(user_id)
elif opr == "D" or opr == "d":
select_the_way_to_delete_comment(user_id,post_id)
elif opr == "A" or opr == "a":
Average_number_of_words(user_id,post_id)
#function to like the post
def like_post(uid, post_id):
payload = {'access_token':APP_ACCESS_TOKEN}
requests_url = (BASE_URL + 'media/%s/likes' % (post_id))
response_to_like = requests.post(requests_url, payload).json() # post request to send data
if len(response_to_like):
print "Post liked successfully!"
operations(uid)
else:
print "Something went wrong! Can't like the post"
operations(uid)
# function to delete the comment using two word method and manual method
def select_the_way_to_delete_comment(user_id,post_id):
response = raw_input("Enter D to delete comment by word OR M to delete comments manually OR B to go back :").upper()
if response == "D" or response == "d":
delete_comment_by_word(user_id,post_id)
elif response == "M" or response == "m":
delete_comment_manually(user_id,post_id)
elif response == "B" or response == "b":
operations(user_id)
else:
print "Please Select the correct Method."
select_the_way_to_delete_comment(user_id,post_id)
#This Function is to comment on post
def comment_post(uid , post_id):
comment = raw_input("Your Comment here : ")
payload = {'access_token': APP_ACCESS_TOKEN , 'text':comment}
requests_url = (BASE_URL + 'media/%s/comments' % (post_id))
response_to_comments = requests.post(requests_url, payload).json()
if len(response_to_comments):
print "Comment posted successfully."
operations(uid)
else:
print "Something went wrong! Cant post comment."
operations(uid)
#function to delete multiple comments containing a single word
def delete_comment_by_word(user_id , post_id):
word = raw_input("Enter the word you want to search in comments :")
requests_url = (BASE_URL + 'media/%s/comments?access_token=%s' % (post_id, APP_ACCESS_TOKEN))
result = requests.get(requests_url).json()
result2 = result['data']
comment_list = []
#loop for storing comment id
for i in range(len(result2)):
split = result2[i]['text'].split()
if word in split:
comment_list.append(result2[i]['id'])
if len(comment_list):
for i in comment_list:
requests_url2 = (BASE_URL + 'media/%s/comments/%s?access_token=%s' % (post_id, i, APP_ACCESS_TOKEN))
response = requests.delete(requests_url2).json()
print str(len(comment_list)) + " Hurray! Comment deleted successfully."
operations(user_id)
else:
print "No comments found for " + str(word)
operations(user_id)
# function to delete comments manually
def delete_comment_manually(user_id,post_id):
requests_url = (BASE_URL + 'media/%s/comments?access_token=%s' % (post_id,APP_ACCESS_TOKEN))
fetch = requests.get(requests_url).json()
c = 1
for comments in fetch['data']:
if len(comments['text']):
print "comment id : " + str(c) + " " + "text : " + str(comments['text'])
else:
print "No comments to delete !"
operations(user_id)
c = c + 1
comment_y = raw_input("Enter the comment id you want to delete OR B to go Back :")
if comment_y == "b" or comment_y == "B":
operations(user_id)
else:
comment_y = int(comment_y)
comment_id =["x"]
for x in fetch['data']:
comment_id.append(x['id'])
x = comment_id[comment_y]
requests_url2 = (BASE_URL + 'media/%s/comments/%s?access_token=%s' % (post_id, x , APP_ACCESS_TOKEN))
response = requests.delete(requests_url2).json()
if response['data'] == None:
print "comment deleted successfully."
delete_comment_manually(user_id, post_id)
else:
print "Something Went wrong! Can't Delete the comment."
delete_comment_manually(user_id,post_id)
# function to find the average number of words per comment on a post
def Average_number_of_words(user_id , post_id):
requests_url = (BASE_URL + 'media/%s/comments?access_token=%s' % (post_id, APP_ACCESS_TOKEN))
fetch = requests.get(requests_url).json()
c = 1
av = 0
if len(fetch['data']) > 0:
for comments in fetch['data']:
if len(comments['text']):
print "comment id : " + str(c) + " " + "text : " + str(comments['text'])
x1 = comments['text'].split()
k = 0
for i in x1:
k = k +1
av = av + k
c = c + 1
total_words = av
total_comments = c - 1
Average = total_words / total_comments
print "Average number of words per comment is : " + str( Average)
operations(user_id)
else:
print "No comments found !"
operations(user_id)
get_user_by_username()
| true |
c74e5fb4b803deae21bb089003689f9b2730bb6e | Python | fitai/fitai_controller | /FitAI/php_process_data.py | UTF-8 | 3,781 | 2.9375 | 3 | [] | no_license | import sys, os
import getopt
import json
from pandas import DataFrame
try:
print 'Adding {} to sys.path'.format(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
except NameError:
print 'working in Dev mode.'
from processing.functions import calc_vel2, calc_rms, calc_power
from util import parse_data, extract_weight, extract_sampling_rate
# Expects a dataframe with known fields
# Timepoint, a_x, (a_y, a_z), lift_id
def process_data(header, content):
if not isinstance(content, DataFrame):
print 'Content (type {}) is not a dataframe. Will try to convert...'.format(type(content))
content = DataFrame(content)
accel_headers = [x for x in content.columns if x in ['a_x', 'a_y', 'a_z']]
fs = extract_sampling_rate(header)
weight = extract_weight(header)
if len(accel_headers) == 0:
print 'Could not find acceleration field(s). Cannot process'
sys.exit(10)
elif len(accel_headers) == 1:
print 'Found single axis of data'
vel = calc_vel2(content[accel_headers[0]], fs)
pwr = calc_power(content[accel_headers[0]], vel, weight)
return content[accel_headers[0]], vel, pwr
else:
print 'Found multiple axes of data. Will combine into RMS.'
a_rms = calc_rms(content, accel_headers)
v_rms = calc_vel2(a_rms, fs)
p_rms = calc_power(a_rms, v_rms, weight)
return a_rms, v_rms, p_rms
# This function should get called by calling the file
# If any flags are passed, use getopt to get them
def main(argv):
print 'received args {}'.format(argv)
try:
opts, args = getopt.getopt(argv, 'd:h:', 'data=')
except getopt.GetoptError:
print 'test.py -d (--data) <JSON string>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -d (--data) <JSON string>'
sys.exit()
elif opt in ("-d", "--data"):
json_data = arg
print 'received data {}'.format(json_data)
if json_data is None:
print 'Did not capture JSON data. Will exit..'
sys.exit(100)
header, content = parse_data(json_data)
if header is None:
print 'Could not read header data'
response = raw_input('Assume default values? (fs = 20Hz, weight = 22.5 kg) - [Y]/n')
if (response is None) or (response in ('y', 'Y')):
print 'Moving forward with default values.'
header = {'lift_sampling_rate': 20, 'lift_weight': 22.5, 'lift_weight_units': 'kg'}
elif response in ('N', 'n'):
print 'Exiting..'
sys.exit(20)
else:
print 'Unexpected response {}. Exiting..'.format(response)
sys.exit(20)
if content is None:
print 'Could not read content. Exiting...'
sys.exit(30)
a, v, p = process_data(header, content)
data_out = DataFrame(data={'lift_id': [header['lift_id']]*len(a),
'timepoint': content['timepoint'],
'a_rms': a,
'v_rms': v,
'p_rms': p},
index=a.index)
print 'Processed headers into:\n{}'.format(json.dumps(list(data_out.columns)))
print 'Processed data into:\n{}'.format(data_out.head().to_json(orient='values'))
# PHP doesnt capture any return statements. Only what is sent to stdout (e.g. print statements)
# return data_out.to_json()
# Receives initial ping to file
if __name__ == '__main__':
main(sys.argv[1:])
# json_data = '{"header":{"lift_id":0,"lift_sampling_rate":50,"lift_weight":100,"lift_weight_units":"lbs"},"content":{"timepoint":["0,0.02,0.04"],"a_x":["230,773,169"]}}'
| true |
026285701f24e270dfcc394a4a81b533583c7564 | Python | doanc4/SalaryPrediction | /preprocessing.py | UTF-8 | 1,979 | 3.140625 | 3 | [] | no_license | import numpy as np
import pandas as pd
from nlp import clean_text
def get_bin(salary, percentiles):
"""Bins salaries into groups based on percentile. Used for fine-tuning BERT."""
for i, val in enumerate(percentiles):
if i < len(percentiles) - 1:
if salary > val and salary <= percentiles[i + 1]:
bin_label = i
break
else:
bin_label = i
return bin_label
def preprocess_job(job_object, dummy_cols, categorical_vars, vectorizer, lemmatizer):
"""Data preprocessing function to get input data for a novel job posting into the format required by the regression model.
Params:
job_object: a dict whose key-value pairs are property names and values of a given job posting.
dummy_cols: a list specifying the column names associated with the trained model.
categorical_vars: a list specifying the categorical variables used for the model.
vectorizer: the text vectorizer object used to transform the text variables into vectors.
"""
assert all(var in job_object for var in categorical_vars), f"All categorical variables must be present in the Job Object. See: {categorical_vars}"
assert 'Title' in job_object, "Job object must contain a 'Title' feature"
dummy_dict = {dummy_col: [] for dummy_col in dummy_cols}
for k, v in job_object.items():
for dummy in dummy_dict:
if dummy.startswith(f'{k}_'):
if dummy.endswith(v):
dummy_dict[dummy] = 1
else:
dummy_dict[dummy] = 0
clean_title = clean_text(job_object['Title'], lemmatizer=lemmatizer)
title_vec = vectorizer.transform([clean_title])
title_vec = np.array(title_vec.todense()).flatten()
features = vectorizer.get_feature_names_out()
title_vec = pd.Series(title_vec, index=features)
features = pd.concat([pd.Series(dummy_dict), title_vec], axis=0)
return features
| true |
e90f11ebbb184e36510ddab112c86c064f9d7b6f | Python | SanyaBoroda4/Hillel_Homeworks | /LESSON_06(LISTS, TUPLES)/HM_01.py | UTF-8 | 208 | 3.1875 | 3 | [] | no_license | matrix = [int(input()) for i in range(6)]
k = int(input("Please enter the index number from 0 to 5: "))
for i in range(k, len(matrix) - 1):
matrix[i], matrix[i+1] = matrix[i+1], matrix[i]
matrix.pop()
| true |
f3a607e100863b26c49e950a0624fd23b4142f24 | Python | MaryamGambo/PythonTest | /functions.py | UTF-8 | 2,798 | 3.578125 | 4 | [] | no_license | import sqlite3
import queries
try:
with sqlite3.connect("students.sqlite3") as conn:
cur = conn.cursor()
# to create a table
cur.execute(queries.CREATE_TABLE)
conn.commit()
class Connectivity:
def start(self):
while True:
self.options()
prompt = input("Do you want to continue [Y/N] ?").lower()
quit(0) if prompt == 'n' else ''
def options(self):
print('1) Display all students')
print('2) Insert student record')
print('3) Update student record')
print('4) Delete student record')
option = int(input('Select an option [1/2/3/4] =>'))
if option == 1:
self.display_record()
elif option == 2:
self.insert_record()
elif option == 3:
self.update_record()
elif option == 4:
self.delete_record()
else:
print('Invalid option')
return
def display_record(self):
# selecting all records from table
students = cur.execute(queries.FETCH_ALL) # fetch all records from db
for i in students:
print(i)
return
def insert_record(self):
# inserting records into table
first_name = input('Enter your first name:')
last_name = input('Enter your last name:')
age = int(input('Enter your age:'))
gender = input('Enter your gender:')
email = input('Enter your email address:').lower()
cur.execute(queries.INSERT, (first_name, last_name, age, gender, email))
conn.commit()
print('Student record successfully added')
return
def delete_record(self):
# deleting a record from table
student_id = input('Enter your student id:')
cur.execute(queries.DELETE, student_id)
conn.commit()
print('Student record deleted successfully')
return
def update_record(self):
# updating a record in the table
student_id = int(input('Enter your student id:'))
first_name = input('Enter first name:')
last_name = input('Enter last name:')
age = input('Enter age:')
gender = input('Enter gender:')
email = input('Enter email address:')
cur.execute(queries.UPDATE, (first_name, last_name, age, gender, email, student_id))
conn.commit()
print('Student record updated successfully')
return
except sqlite3.Connection as e:
print('Error while connecting to sqlite database')
| true |
dd8b1aae651472e687a4c165afbee7cae23b8f0e | Python | 88daxiong/leetcode | /Interview/yanfudao/1.py | UTF-8 | 1,003 | 3.078125 | 3 | [] | no_license | '''
@Descripttion: 分组对话
@Author: daxiong
@Date: 2019-08-24 15:59:56
@LastEditors: daxiong
@LastEditTime: 2019-08-24 17:37:01
'''
import sys
if __name__ == "__main__":
C = int(sys.stdin.readline().strip())
ans = 0
nums = list()
for i in range(C):
line = sys.stdin.readline().strip()
students = list(map(int, line.split())) # 选择不同角色的学生数目
nums.append(students)
for i in range(C):
numsOfstu = nums[i][1:]
numOfchar = nums[i][0] # 角色数目
if numOfchar < 3: # 如果小于3个角色
print(0)
else:
resNum = 0 # 保存最后的角色数
numsOfstu.sort() # 先排序,这样不对
while numsOfstu[-3] != 0:
resNum += numsOfstu[-3]
numsOfstu[-2] -= numsOfstu[-3]
numsOfstu[-1] -= numsOfstu[-3]
del numsOfstu[-3]
numsOfstu.sort()
print(resNum)
| true |
290343068d830a282f6209c4642a38f43df8043e | Python | bot-createor/ios | /calculator.py | UTF-8 | 975 | 3.34375 | 3 | [] | no_license | import tkinter as tk
root = tk.Tk()
root.title("calculator")
class Screen:
def __init__(self):
self.screen_width = root.winfo_width / 7
self.screen_height = root.winfo_height / 4
root.geometry(self.screen_width + "x" + self.screen_height)
# input variables
input_width = self.screen_width - (self.screen_width / 20 * 2)
input_height = self.screen_height - self.screen_height - (self.screen_height / 20 * 2)
# create input
self.create_input(self.screen_width - (self.screen_width - input_width / 2), self.screen_height - (self.screen_height - input_height), \
self.screen_width - input_width / 2, self.screen_height - input_height)
self.create_number_buttons()
def create_input(self, x1, y1, x2, y2, width, height):
input = tk.Entry(root, x1, y1, x2, y2, fill = "#e0dede", outline = "#383838", state = DISABLED)
def create_number_buttons(self, width, height):
pass
screen = Screen()
| true |
dba3205d74d24345d1547efda68cd0338d836cd5 | Python | Aasthaengg/IBMdataset | /Python_codes/p03814/s646616164.py | UTF-8 | 244 | 3.15625 | 3 | [] | no_license | def main():
s = input()
a = len(s); z = 0
for i, t in enumerate(s):
if t == 'A':
a = min(a, i)
if t == 'Z':
z = max(z, i)
ans = z-a+1
print(ans)
if __name__ == "__main__":
main()
| true |
59b93d66d83c0ade1ca731d1a3cca6f3ff58ff2e | Python | nhmishaq/Python-Assignments | /python_platform_assignments/stringsLists.py | UTF-8 | 1,740 | 4.75 | 5 | [] | no_license | #This is version 2.0 of the same python platform assignments that I worked on in my first attempt.
#The goal is to implement better coding practices and sharpen my command over the language.
# Find and Replace
# In this string: words = "It's thanksgiving day. It's my birthday,too!" print the
# position of the first instance of the word "day". Then create a new string where
# the word "day" is replaced with the word "month".
def find(sentence, wordToFind, newWord):
firstInstance = sentence.find(wordToFind)
newSentence = sentence.replace(wordToFind, newWord)
print firstInstance, newSentence
find("It's thanksgiving day. It's my birdhday,too!", "day", "month")
# Min and Max
# Print the min and max values in a list like this one: x = [2,54,-2,7,12,98].
# Your code should work for any list.
def minAndMax(list):
print min(list)
print max(list)
minAndMax([2,54,-2,7,12,98])
# First and Last
# Print the first and last values in a list like this one: x = ["hello",2,54,-2,7,12,98,"world"].
# Now create a new list containing only the first and last values in the original list.
# Your code should work for any list.
def firstAndLast(list):
print list[0]
print list[len(list)-1]
firstAndLast(["hello",2,54,-2,7,12,98,"world"])
# New List
# Start with a list like this one: x = [19,2,54,-2,7,12,98,32,10,-3,6].
# Sort your list first. Then, split your list in half.
# Push the list created from the first half to position 0 of the list created from the second half.
# The output should be: [[-3, -2, 2, 6, 7], 10, 12, 19, 32, 54, 98].
# Stick with it, this one is tough!
def newList(list):
list.sort()
list.append(list[len(list)/2])
print list
newList([19,2,54,-2,7,12,98,32,10,-3,6]) | true |
19ff7f2cabc027cc30fce6c7120b868af5908b1f | Python | tagyro/GCoM-Cities-Action-Explorer | /mainScript.py | UTF-8 | 9,289 | 2.96875 | 3 | [] | no_license | import json
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
## only needed for plots:
# import seaborn as sns
# import matplotlib.pyplot as plt
# import random
### parameters
# population cutoff for cities:
pop_cutoff = 50000
# similarity score cutoff for matching:
sim_cutoff = 0.7
# list of variables that should be log transformed
log_transform = ["GDP", "Population", "GDP_per_cap_method1"]
# list of variables for feature scaling
# (subtracting the mean and dividing by std deviation of distribution)
feature_scaling = ["GDP", "Population", "GDP_per_cap_method1",
"HDD", "CDD", "HDI_2", "Fuel_price"]
# ID of city to run tests on
# check gcom_cities file for id's
test_city = 'BR0003'
### utilites
# utility to find the list of actions from the CDP actions data base with a given gcom ID
# and data frame of actions
def findListOfActions(actions_df, gcomId):
# pull out the relevant actions for the city from the data base,
# reset the index
city_actions_df = actions_df.loc[actions_df['Account number'] ==
citiesDict[gcomId]['cdp_id']].reset_index(drop=True)
# create placeholder list of actions
actionsList = [{} for x in range(len(city_actions_df))]
# make key list for each action dict
action_keys = ['reporting year', 'category', 'activity'] # , 'description']
for index, action in city_actions_df.iterrows():
# translate CDP categories to reclassified ones
action_values = [action['Reporting Year'], categoryDict[action['Sector']],
action['Emissions reduction activity']]
# , action['Action description']]
# zip keys and values together to create the dict
actionsList[index] = dict(zip(action_keys, action_values))
return actionsList
def distance(city1_dict, city2_dict):
# result will be a list of distances in each characteristic
dist = list(range(1 + len(feature_scaling)))
# first check whether cities are in same country (by country code cc)
# if yes, this distance is 0. if no, 1
dist[0] = int(city1_dict['cc'] != city2_dict['cc'])
# distance for all numeric features
for i, feature in enumerate(feature_scaling):
dist[1+i] = city1_dict[feature] - city2_dict[feature]
return dist
# utility to calculate similarity between two cities,
# given lists of their characteristics
# using numpy to calculate exponential function and euclidian distance between the
# vectors of city characteristics
def similarity(city1_dict, city2_dict):
dist = distance(city1_dict, city2_dict)
return np.exp(- np.linalg.norm(dist)/len(dist))
### set up categories
# read category matching from file
categories = pd.read_csv(r"reclassification_files\cdp_sector_reclass.csv", encoding="utf-8").fillna('(blank)')
# build dict
categoryDict = dict(zip(list(categories['CDP Sector']), list(categories['reclass_sector'])))
print(categoryDict)
### prepare dictionary of GCoM cities
# read gcom cities into pandas dataframe
cities = pd.read_csv(r"input_data\gcom_cities.csv", encoding="utf-8").fillna('(blank)')
# select cities with population > pop_cutoff, reset index:
cities = cities.loc[cities['Population'] > pop_cutoff].reset_index(drop=True)
# x = pd.Series(cities["GDP"], name = 'GDP')
# sns.distplot(x).set_title("Distribution of GDP of GCOM cities "
# "with population > {}".format(pop_cutoff))
# plt.show()
# apply log transformation
cities[log_transform] \
= cities[log_transform].apply(np.log)
# perform feature scaling:
cities[feature_scaling] \
= StandardScaler().fit_transform(cities[feature_scaling])
# x = pd.Series(cities["GDP"], name = 'Scaled log of GDP')
# sns.distplot(x).set_title("Distribution of GDP of GCOM cities with population > {}"
# " after a log transform and feature scaling".format(pop_cutoff))
# plt.show()
# initialize dictionary with city id's as keys
citiesDict = dict.fromkeys(list(cities['new_id']))
# fill dictionary values by looping over rows in dataframe
# take keys from headers, add placeholders for actions and matches
keys = list(cities) + ['cdp_id', 'actions', 'matches']
for index, row in cities.iterrows():
# take value for each city dict from the row and add NA/empty
# list placeholders for cdp_id/actions and matches
values = list(row) + ['NA', [], []]
# zip keys and values together to create the dict
citiesDict[row[0]] = dict(zip(keys, values))
### prepare CDP cities for matching
# read cdp actions into pandas dataframe
actions_df = pd.read_csv(r"input_data\Actions_cdp_2012-2017.csv",
encoding="utf-8").fillna('(blank)')
# change USA to United States of America
actions_df = actions_df.replace(to_replace='USA',
value='United States of America')
# find set of city names used in the CDP actions file
# (stripping leading and trailing whitespace)
cdp_cities = set(actions_df['City'].str.strip())
# print(cdp_cities)
### match GCoM cities to cdp action data base
counter = 0
for entry in citiesDict.values():
gcomCityName = entry['city']
# match GCOM city to cdp_cities if possible and add list of cdp actions
if gcomCityName in cdp_cities:
counter += 1
cdpId = set(actions_df.loc[actions_df['City'].str.strip() ==
gcomCityName]['Account number'])
### TO-DO:
# some city names appear multiple times.
# have to match on add'l characteristics like country or coordinates
if len(cdpId) > 1:
print("Found several matches for " + gcomCityName)
print(cdpId)
# update GCoM cities list entry with cdp Id found
entry['cdp_id'] = cdpId.pop()
# update list of actions with the matched city in cdp data base
entry['actions'] = findListOfActions(actions_df, entry['new_id'])
print("# of matched cities between GCOM and CDP: " + str(counter))
# find matches for cities
for i, city1_id in enumerate(cities['new_id']):
for j, city2_id in enumerate(cities['new_id'][i + 1:]):
city1 = citiesDict[city1_id]
city2 = citiesDict[city2_id]
sim = similarity(city1, city2)
if sim > sim_cutoff:
city1['matches'] += [{'city_id': city2_id, 'score': sim}]
city2['matches'] += [{'city_id': city1_id, 'score': sim}]
# save data in json file
with open('output\cities_data.json', 'w') as f:
json.dump(citiesDict, f) #, sort_keys=True, indent=4)
# get info for only cities above test city's population to keep the file smaller:
bigCitiesDict = {k: citiesDict[k] for k in citiesDict.keys() if
citiesDict[k]['Population'] >= citiesDict[test_city]['Population']}
with open(r'output\big_cities_data.json', 'w') as f:
json.dump(bigCitiesDict, f, sort_keys=True, indent=4)
# # calculate top ten matches for test city and print cities with match scores
# top_ten = sorted(citiesDict[test_city]['matches'], key=lambda k: k['score'],
# reverse=True)[:10]
# print("Top Ten Matches for {}, {}:".format(citiesDict[test_city]['city'],
# citiesDict[test_city]['country']))
# for entry in top_ten:
# print(citiesDict[entry['city_id']]['city'] + ", " + \
# citiesDict[entry['city_id']]['country'] + \
# ". Score: {:05.4f}".format(entry['score']))
# # print all actions that matched cities have taken
# print("City's actions: ")
# for action in citiesDict[entry['city_id']]['actions']:
# print(action['reporting year'], ":", action['category'], ":", action['activity'])
# # look at 10 random scores for other cities with test city:
# print("Random Ten Scores for {}, {}:".format(citiesDict[test_city]['city'], citiesDict[test_city]['country']))
# for i in range(10):
# random_match = random.choice(list(citiesDict))
# print(citiesDict[random_match]['city'] + ", " + citiesDict[random_match]['cc'] + ". Score: {:05.4f}".format(
# similarity(citiesDict[random_match], citiesDict[test_city])))
# # some fun seaborn plots
# sns.lmplot(x='Population', y='GDP', data=cities)
# fig, axes = plt.subplots()
# sns.violinplot(data=cities[['HDD','CDD','TDD']], ax=axes)
# sns.lmplot(x='HDI_country', y='Fuel_price', data=cities)
# x = pd.Series(cities["GDP"], name = 'Normalized log of GDP')
# sns.distplot(x)
# plt.show()
# y = pd.Series(cities["Population"], name = 'Normalized log of Population, cutoff at {}'.format(pop_cutoff))
# sns.distplot(y)
# plt.show()
# creating a visual for test city's similarity scores with other cities
# sim_scores = list(range(len(cities.index)))
# for i, entry in enumerate(citiesDict.values()):
# sim = similarity(citiesDict[test_city], entry)
# sim_scores[i] = sim
# if sim > sim_cutoff:
# # print(entry['city'] + ", similarity: {}".format(sim))
# entry['matches'] += [test_city]
#
# plt.plot(list(reversed(sorted(sim_scores)[:-1])))
# plt.ylabel('Similarity Score')
# plt.xlabel('Cities with population > {}'.format(pop_cutoff))
# plt.title('Matches for {}, {}:'.format(citiesDict[test_city]['city'],
# citiesDict[test_city]['country']))
# plt.show()
| true |
73f83739a22c6760eedaa05992275b32b06a1153 | Python | cin-derella/python_datascience | /YC_DataAnalysis/code/numpyTest/13.broadcast.py | UTF-8 | 204 | 2.921875 | 3 | [] | no_license | import numpy as np
x = np.array([[1],[2],[3]])
y = np.array([4,5,6])
b = np.broadcast(x,y)
b.index
print(b.__next__()) #循环下一个
print(b.__next__())
print(b.__next__())
print(b.index) #索引位置 | true |
e2abe6cd74c9fb2f11e65914f6fd9eb522fde2e7 | Python | WebucatorTraining/classfiles-actionable-python | /advanced-python-concepts/Demos/with_filter.py | UTF-8 | 166 | 3.828125 | 4 | [
"MIT"
] | permissive | def is_odd(num):
return num % 2
def main():
nums = range(0, 10)
odd_nums = filter(is_odd, nums)
for num in odd_nums:
print(num)
main() | true |
e958045f1c59d27c8e1650c0c317da519c09e9fc | Python | gabminamedez/leetcode | /easy/1108.py | UTF-8 | 141 | 2.71875 | 3 | [] | no_license | # [1480] Defanging an IP Address
class Solution:
def defangIPaddr(self, address: str) -> str:
return address.replace('.', '[.]') | true |
2242b0b9e41a2936b012eb1a2d71f3b59132f5c2 | Python | brakdag/cursoBasicopython | /src/invertir.py | UTF-8 | 59 | 3.234375 | 3 | [
"MIT"
] | permissive | numero = input("ingrese un numero:")
print(numero[-1::-1])
| true |
a1189d1cd5386efc0077da22bed68641af789f65 | Python | aisichenko/gdsfactory | /gdsfactory/components/cutback_bend.py | UTF-8 | 6,579 | 2.875 | 3 | [
"MIT"
] | permissive | from numpy import float64
import gdsfactory as gf
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.bend_circular import bend_circular, bend_circular180
from gdsfactory.components.bend_euler import bend_euler, bend_euler180
from gdsfactory.components.component_sequence import component_sequence
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.types import ComponentFactory, ComponentOrFactory
def _get_bend_size(bend90: Component) -> float64:
p1, p2 = list(bend90.ports.values())[:2]
bsx = abs(p2.x - p1.x)
bsy = abs(p2.y - p1.y)
return max(bsx, bsy)
@cell
def cutback_bend(
bend90: ComponentOrFactory = bend_euler,
straight_length: float = 5.0,
rows: int = 6,
columns: int = 5,
straight: ComponentFactory = straight_function,
**kwargs
) -> Component:
"""Deprecated! use cutback_bend90 instead,
which has smaller footprint
Args:
bend90:
straight_length:
rows:
columns:
straight: function for straight
kwargs: cross_section settings
keyword args:
cross_section:
.. code::
this is a column
_
_|
_|
_ this is a row
"""
bend90 = bend90(**kwargs) if callable(bend90) else bend90
straightx = straight(length=straight_length, **kwargs)
# Define a map between symbols and (component, input port, output port)
symbol_to_component = {
"A": (bend90, "o1", "o2"),
"B": (bend90, "o2", "o1"),
"S": (straightx, "o1", "o2"),
}
# Generate the sequence of staircases
s = ""
for i in range(columns):
s += "ASBS" * rows
s += "ASAS" if i % 2 == 0 else "BSBS"
s = s[:-4]
c = component_sequence(
sequence=s, symbol_to_component=symbol_to_component, start_orientation=90
)
c.info["n_bends"] = rows * columns * 2 + columns * 2 - 2
return c
@cell
def cutback_bend90(
bend90: ComponentOrFactory = bend_euler,
straight_length: float = 5.0,
rows: int = 6,
columns: int = 6,
spacing: int = 5,
straight: ComponentFactory = straight_function,
**kwargs
) -> Component:
"""Returns bend90 loss.
Args:
bend90:
straight_length:
rows:
columns:
straight: function for straight
kwargs: cross_section settings
.. code::
_
|_| |
"""
bend90 = bend90(**kwargs) if callable(bend90) else bend90
straightx = straight(length=straight_length, **kwargs)
straight_length = 2 * _get_bend_size(bend90) + spacing + straight_length
straighty = straight(length=straight_length, **kwargs)
# Define a map between symbols and (component, input port, output port)
symbol_to_component = {
"A": (bend90, "o1", "o2"),
"B": (bend90, "o2", "o1"),
"-": (straightx, "o1", "o2"),
"|": (straighty, "o1", "o2"),
}
# Generate the sequence of staircases
s = ""
for i in range(columns):
if i % 2 == 0: # even row
s += "A-A-B-B-" * rows + "|"
else:
s += "B-B-A-A-" * rows + "|"
s = s[:-1]
# Create the component from the sequence
c = component_sequence(
sequence=s, symbol_to_component=symbol_to_component, start_orientation=0
)
c.info["n_bends"] = rows * columns * 4
return c
@cell
def staircase(
bend90: ComponentOrFactory = bend_euler,
length_v: float = 5.0,
length_h: float = 5.0,
rows: int = 4,
straight: ComponentFactory = straight_function,
**kwargs
) -> Component:
"""Returns staircase.
Args:
bend90:
straight_length:
rows:
columns:
straight: function for straight
kwargs: cross_section settings
"""
bend90 = bend90(**kwargs) if callable(bend90) else bend90
wgh = straight(length=length_h, **kwargs)
wgv = straight(length=length_v, **kwargs)
# Define a map between symbols and (component, input port, output port)
symbol_to_component = {
"A": (bend90, "o1", "o2"),
"B": (bend90, "o2", "o1"),
"-": (wgh, "o1", "o2"),
"|": (wgv, "o1", "o2"),
}
# Generate the sequence of staircases
s = "-A|B" * rows + "-"
c = component_sequence(
sequence=s, symbol_to_component=symbol_to_component, start_orientation=0
)
c.info["n_bends"] = 2 * rows
return c
@cell
def cutback_bend180(
bend180: ComponentOrFactory = bend_euler180,
straight_length: float = 5.0,
rows: int = 6,
columns: int = 6,
spacing: int = 3,
straight: ComponentFactory = straight_function,
**kwargs
) -> Component:
"""Return cutback to measure u bend loss.
Args:
bend180:
straight_length:
rows:
columns:
spacing:
straight:
kwargs: cross_section settings
.. code::
_
_| |_ this is a row
_ this is a column
"""
bend180 = bend180(**kwargs) if callable(bend180) else bend180
straightx = straight(length=straight_length, **kwargs)
wg_vertical = straight(
length=2 * bend180.size_info.width + straight_length + spacing, **kwargs
)
# Define a map between symbols and (component, input port, output port)
symbol_to_component = {
"D": (bend180, "o1", "o2"),
"C": (bend180, "o2", "o1"),
"-": (straightx, "o1", "o2"),
"|": (wg_vertical, "o1", "o2"),
}
# Generate the sequence of staircases
s = ""
for i in range(columns):
if i % 2 == 0: # even row
s += "D-C-" * rows + "|"
else:
s += "C-D-" * rows + "|"
s = s[:-1]
c = component_sequence(
sequence=s, symbol_to_component=symbol_to_component, start_orientation=0
)
c.info["n_bends"] = rows * columns * 2 + columns * 2 - 2
return c
cutback_bend180circular = gf.partial(cutback_bend180, bend180=bend_circular180)
cutback_bend90circular = gf.partial(cutback_bend90, bend90=bend_circular)
if __name__ == "__main__":
# c = cutback_bend()
# c = cutback_bend90()
# c = cutback_bend_circular(rows=7, columns=4, radius=5) #62
# c = cutback_bend_circular(rows=14, columns=4) #118
# c = cutback_bend90()
# c = cutback_bend180(rows=3, columns=1)
# c = cutback_bend(rows=3, columns=2)
# c = cutback_bend90(rows=3, columns=2)
c = cutback_bend180(rows=2, columns=2)
# c = cutback_bend(rows=3, columns=2)
c.show()
| true |
0170773e410ad6644ba934e7fc5102d8a00a72f6 | Python | martewegger/fys4150 | /project2/main.py | UTF-8 | 5,996 | 2.6875 | 3 | [] | no_license | import os
import sys
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.size'] = 16
#IMPORTANT: The two first functions use the potential for 1 electron. The last function use the potential for 2 electrons. In addition to turning the function on in this python script, the corresponding call to the «initialize» function on main.cpp has to be unhashed.
rho_max = np.linspace(3,6,10)
acum_rel_err = np.zeros(len(rho_max))
lambda1 = np.zeros(len(rho_max))
lambda2 = np.zeros(len(rho_max))
lambda3 = np.zeros(len(rho_max))
lambda4 = np.zeros(len(rho_max))
cpp_codes = "main.cpp class_code.cpp"
compiler_flags = "-larmadillo -O2"
executeable = "main.out"
run = "./main.out"
lambda_list = np.array((3,7,11,15))
# The call «my_solver.rel_err_rho_max(outfilename);» in main.cpp has to be unhashed before running this function.
def find_rho_max(run=False):
n=350
if run==True:
os.system("echo compiling...")
os.system(" ".join(["c++", "-o", executeable, cpp_codes, compiler_flags]))
for i in range(len(rho_max)):
filename = "max_rel_err.txt"
print('Running for rho_max= %f' % rho_max[i])
os.system(" ".join([run, str(rho_max[i]), str(filename), str(n)]))
data = np.array((np.loadtxt(filename)))
lambda1[i] = data[0]
lambda2[i] = data[1]
lambda3[i] = data[2]
lambda4[i] = data[3]
acum_rel_err[i] = data[4]
indx = np.argmin(acum_rel_err)
plt.figure(); plt.title('Acumulative relative error as a function of $\\rho_{max}$')
plt.plot(rho_max, acum_rel_err, c='k', lw=0.8,ls='solid')
plt.plot(rho_max[indx], acum_rel_err[indx], 'ro', label='(%.2f, %.2f)' % (rho_max[indx],acum_rel_err[indx]))
plt.xlabel('$\\rho_{max}$')
plt.ylabel('Relative error')
plt.legend(fontsize=16)
plt.savefig('max_rel_err.png')
plt.figure();plt.title('Relative error as a function of $\\rho_{max}$')
indx1 = np.argmin(lambda1)
plt.plot(rho_max,lambda1, 'r', label='$\\lambda_1 = 3$')
plt.plot(rho_max[indx1], lambda1[indx1], 'ro', label='(%.2f, %.1e)' % (rho_max[indx1],lambda1[indx1]))
indx2 = np.argmin(lambda2)
plt.plot(rho_max,lambda2,'b', label='$\\lambda_2 = 5$')
plt.plot(rho_max[indx2], lambda2[indx2], 'bo', label='(%.2f, %.1e)' % (rho_max[indx2],lambda2[indx2]))
indx3 = np.argmin(lambda3)
plt.plot(rho_max,lambda3,'g', label='$\\lambda_3 = 11$')
plt.plot(rho_max[indx3], lambda3[indx3], 'go', label='(%.2f, %.1e)' % (rho_max[indx3],lambda3[indx3]))
indx4 = np.argmin(lambda4)
plt.plot(rho_max,lambda4, 'm',label='$\\lambda_4 = 15$')
plt.plot(rho_max[indx4], lambda4[indx4], 'mo', label='(%.2f, %.1e)' % (rho_max[indx4],lambda4[indx4]))
plt.xlabel('$\\rho_{max}$')
plt.ylabel('Relative error')
plt.legend()
plt.savefig('rel_err_lmbda.png')
os.system("rm max_rel_err.txt")
final = np.array((rho_max[indx1], rho_max[indx2], rho_max[indx3], rho_max[indx4], rho_max[indx]))
np.save('rho_values.npy', final)
#find_rho_max(run=True)
# For the two next functions, the «my_solver.rel_err(outfilename, indx);» call in main.cpp has to be unhashed.
def find_optimal_N(run_cpp = False): #finne optal n for hver lam.
omega_r = 0
plt.figure();plt.title('Relative error as a function of matrix size, $N$')
rho_vals = np.load('rho_values.npy') #dette er rhomax verdiier for ulike lam og acc
filename = "n_analysis.txt"
if run_cpp==True:
os.system("echo compiling...")
os.system(" ".join(["c++", "-o", executeable, cpp_codes, compiler_flags]))
err = 1
tol = 1e-4
n_max = 0
for i in range(len(rho_vals)-1): # sjekker ikke for gjennomsnitts verdien
print("\n")
print("--------------------------------------------------------")
n_list = []
err_list = []
n_min = 450
n = n_min
err = 1
rho_max = rho_vals[i]
os.system("echo evaluating rho_max= %f for lambda%d" % (rho_max, i+1))
while err >tol:
n_list.append(n)
os.system("echo running for n= %d" % n)
os.system(" ".join([run, str(rho_max), str(filename), str(n), str(i), str(omega_r)]))
err = np.loadtxt(filename)
err_list.append(err)
print("Relative error= ", err)
n += 10
if n>=650:
print(" No success, but break cause n too big..")
break
if n > n_max:
n_max = n
print("Possible success at n = %d" % (n-10))
print("relative error= %g" % err)
plt.plot(n_list,err_list, '.', label='$\\lambda_%d$' % (i+1))
plt.plot([n_min,n_max], [1e-4,1e-4], c='k', label='tolerance')
plt.xlabel('$N$')
plt.ylabel('Relative error')
plt.legend(fontsize=16)
plt.savefig('n_analysis.png')
os.system(" ".join(["rm", str(filename)]))
#find_optimal_N(run_cpp=True)
def vary_w_r():
n=350
colors = np.array(('r', 'b', 'g', 'orange'))
plt.figure(figsize=(10,10));plt.title('Relative error for different $\\omega_r$')
w_r = np.array((0.01, 0.5, 1, 5))
os.system("echo compiling...")
os.system(" ".join(["c++", "-o", executeable, cpp_codes, compiler_flags]))
filename = 'omega_test.txt'
rho_vals = np.load('rho_values.npy')
os.system("echo running omega_r analysis")
for i in range(len(lambda_list)):
rho_max = rho_vals[i]
err_list = []
for j in range(len(w_r)):
os.system(" ".join([run, str(rho_max), str(filename), str(n), str(i), str(w_r[j])]))
err_list.append(np.loadtxt(filename))
plt.plot(w_r,err_list, c = colors[i], label='$\\lambda_%d$' % (i+1))
plt.plot(w_r,err_list, c = colors[i])
plt.xlabel('$\\omega_r$')
plt.ylabel('Relative error')
plt.legend(fontsize=16)
plt.savefig('test_w_r.png')
os.system(" ".join(["rm", str(filename)]))
#vary_w_r()
| true |
51831de3dc08c2fa241e37f29c8ed670ee79dafb | Python | sekil9529/django-demo | /libs/error_code/enum.py | UTF-8 | 1,498 | 2.875 | 3 | [] | no_license | # coding: utf-8
"""错误码枚举类"""
from __future__ import annotations
from typing import NamedTuple
from enum import Enum, EnumMeta, unique
from types import DynamicClassAttribute
__all__ = (
'ECData',
'BaseECEnum',
)
class ECData(NamedTuple):
"""错误码数据"""
code: str # 错误码
message: str # 错误信息
def __eq__(self, other: object) -> bool:
if not isinstance(other, ECData):
raise NotImplemented
return self.code == other.code
class _ECEnumMeta(EnumMeta):
def __new__(mcs, *args, **kwargs):
enum_class = super(_ECEnumMeta, mcs).__new__(mcs, *args, **kwargs)
# 全部code
enum_class._member_codes_ = tuple(enum.value.code for enum in enum_class)
# code唯一
return unique(enum_class)
@property
def codes(cls) -> tuple[str, ...]:
return cls._member_codes_
class BaseECEnum(Enum, metaclass=_ECEnumMeta):
"""错误码基类
使用示例:
class ECEnum(BaseECEnum):
ServerError = ECData(code="500", message="服务异常,请稍后重试")
"""
@DynamicClassAttribute
def code(self):
"""错误码"""
return self.value.code
@DynamicClassAttribute
def message(self):
"""错误信息"""
return self.value.message
@DynamicClassAttribute
def error(self):
"""error码"""
return self.name
| true |
df3affb773c2f476a87da94aa138f61b1a099c20 | Python | KamilBabayev/Scripts | /aws_s3_boto_daily_backuper.py | UTF-8 | 1,332 | 2.609375 | 3 | [] | no_license | #!/usr/bin/python3
import os
import boto3
from datetime import datetime
day=str(datetime.now())[:10]
access_key = '***************'
secret_key = '************************'
subfolder = day + '/'
#subfolder='2017-09-05/'
records = '/var/lib/freeswitch/recordings'
actual_day = records + '/' + day
#actual_day = records + '/' + '2017-09-05'
print(subfolder)
print(actual_day)
print('------------------')
client = boto3.client('s3', aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
client.put_object(Bucket='backupfs01', Key=subfolder)
def upload_files(path, subfolder):
client = boto3.client('s3')
session = boto3.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
s3 = session.resource('s3')
bucket = s3.Bucket('backupfs01')
for subdir, dirs, files in os.walk(path):
for file in files:
full_path = os.path.join(subdir, file)
with open(full_path, 'rb') as data:
key2 = subfolder + str(full_path[len(path)+1:])
#key2 = '2017-09-04/' + str(full_path[len(path)+1:])
#bucket.put_object(Key=full_path[len(path)+1:], Body=data)
bucket.put_object(Key=key2, Body=data)
if __name__ == "__main__":
upload_files(actual_day, subfolder)
| true |
beeab924f7d139eafe3fc58557a3798554d35331 | Python | aleedom/DjangularMessaging | /authentication/models.py | UTF-8 | 988 | 2.5625 | 3 | [] | no_license | from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
class AccountManager(BaseUserManager):
def create_user(self, username, password=None,):
if not username:
raise ValueError('Users must have a valid username.')
account = self.model(username=username)
account.set_password(password)
account.save()
return account
def create_superuser(self, username, password):
account = self.create_user(username, password)
account.is_admin = True
account.save()
return account
class Account(AbstractBaseUser):
username = models.CharField(max_length=40, unique=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'username'
def __str__(self):
return self.username
| true |
d16696c1bdeb9318c1bec92b083818fb8d7e9919 | Python | art-vybor/twnews | /core/twnews/recommend.py | UTF-8 | 3,200 | 2.6875 | 3 | [] | no_license | import heapq
import logging
from scipy import sparse
from twnews.utils.extra import progressbar_iterate
def get_index_of_correct_news(tweet, news_list):
news_list = sorted(news_list, key=lambda x: x[1], reverse=True)
for idx, (news, score) in enumerate(news_list):
for url in tweet.urls:
if url == news.link:
return idx+1
raise Exception('Correct news not founded')
def recommend(news, tweets, top_size=10, evaluate=False):
from sklearn.metrics.pairwise import cosine_similarity
def convert_to_compare_matrix(documents):
dim = documents[0].compare_vector.shape[0]
data, row_idxs, column_idxs = [], [], []
for column_idx, document in enumerate(documents):
rows, _, values = sparse.find(document.compare_vector)
for i, value in enumerate(values):
data.append(values[i])
row_idxs.append(rows[i])
column_idxs.append(column_idx)
compare_matrix = sparse.csr_matrix((data, (row_idxs, column_idxs)), shape=(dim, len(documents)))
return compare_matrix
logging.info('convert tweets to compare matrix')
tweets_matrix = convert_to_compare_matrix(tweets)
logging.info('convert news to compare matrix')
news_matrix = convert_to_compare_matrix(news)
logging.info('build cosine similarity matrix')
mat = cosine_similarity(tweets_matrix.T, news_matrix.T)
logging.info('build recommendation')
recommendation = []
correct_news_idxs = []
for tweet_idx, tweet in enumerate(progressbar_iterate(tweets)):
news_list = [(single_news, mat[tweet_idx][news_idx]) for news_idx, single_news in enumerate(news)]
news_list_top = heapq.nlargest(top_size, news_list, key=lambda x: x[1])
if evaluate:
correct_news_idxs.append(get_index_of_correct_news(tweet, news_list))
recommendation.append((tweet, news_list_top))
return recommendation, correct_news_idxs
def set_compare_vector(documents, Q):
logging.info('Start setting of compare vector for {NUM} documents'.format(NUM=len(documents)))
for i in progressbar_iterate(range(Q.shape[1])):
compare_vector = Q[:, i]#.toarray().tolist()
#compare_vector = map(lambda x: x[0], compare_vector)
documents[i].set_compare_vector(compare_vector)
def dump_to_csv(recommendation, filename, score_threshold=0.4):
recommendation_filtered = filter(lambda x: x[1][0][1] > score_threshold, recommendation)
with open(filename, 'w') as f:
f.write('total_tweets: %s\nrecommended_tweets: %s\n' % (len(recommendation), len(recommendation_filtered)))
for i, (tweet, news_list) in enumerate(recommendation_filtered):
f.write('%d) %s\n' % (i, tweet.text.replace('\n', ' ').encode('utf-8')))
f.write('%s\n' % tweet.tweet_id)
if tweet.urls:
f.write('%s\n' % ' '.join(tweet.urls))
for news, score in news_list:
if score > score_threshold:
f.write('\t%s %s\n' % (news, score))
f.write('\t%s\n' % news.link)
f.write('\t---------------------\n') | true |
6d03bf60deb6075e95e436fef76552c89655e841 | Python | Zihaokong/DeepLearning | /CSE151B_PA3/datasets.py | UTF-8 | 1,953 | 2.953125 | 3 | [] | no_license | import torch
from torch.utils.data import Dataset
import torch
from PIL import Image
import os
from torchvision import transforms
import numpy as np
# Dataset class to preprocess your data and labels
# You can do all types of transformation on the images in this class
class bird_dataset(Dataset):
# You can read the train_list.txt and test_list.txt files here.
def __init__(self,root,file_path):
#the direct path name storing picture
self.pic_dir = os.path.join(os.getcwd(),root,"images")
#open txt file and read files
f_train = open(root+'/'+file_path,'r')
self.lines = f_train.readlines()
#place holder for dataset labels
self.labels = []
#store each image's absolute path for loading, and store their labels as well
for i in range(len(self.lines)):
line = self.lines[i].split()
self.lines[i] = os.path.join(self.pic_dir,line[0])
self.labels.append(int(line[1]))
#throw error when number of image is not the same as number of labels
if len(self.lines) != len(self.labels):
raise Exception("y size aren't match x size")
shuffle = np.random.permutation(len(self.lines))
def __len__(self):
#return the length of dataset
return len(self.lines)
def __getitem__(self, item):
# turn PIL to tensor, and normalize use ImageNet mean and SD
train_transforms = transforms.Compose([transforms.ToTensor(), \
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
#read an image dynamically and center crop it, then transform it
image = Image.open(self.lines[item]).convert("RGB")
image = transforms.CenterCrop(224)(image)
image = train_transforms(image)
return image,self.labels[item]
| true |
23fa5c2b847fd15208908402f7da0b6414ad70cd | Python | mishav78/conversational-summarization | /src/data/make_dataset.py | UTF-8 | 4,424 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from glob import glob
import json
import torch
from torch.utils.data.dataset import TensorDataset, random_split
from transformers import BartTokenizer
import numpy as np
def text_cleaner(text: str):
"""
Removes \r and \n from a text string.
:param text: (str) the text you want cleaned.
:returns: (str) the cleaned text
"""
text = text.replace("\r", "")
text = text.replace("\n", " ")
return text
@click.command()
@click.argument("input_filepath", type=click.Path(exists=True))
@click.argument("output_filepath", type=click.Path())
def main(input_filepath: str, output_filepath: str):
"""
Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
:param input_filepath: (str) the directory that contains your json files.
:param output_filepath: (str) the directory you want to store your train/test/val data in.
:returns: None
"""
logger = logging.getLogger(__name__)
logger.info("making final data set from raw data")
files = glob(f"{input_filepath}/*.json")
data = []
for file in files:
with open(file, "r") as in_file:
data += json.load(in_file)
in_file.close()
dialogues = [text_cleaner(d["dialogue"]) for d in data]
summaries = [text_cleaner(d["summary"]) for d in data]
logger.info("seperated dialogues and summaries")
tokenizer = BartTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
dialogue_tokens = tokenizer.prepare_seq2seq_batch(
dialogues, padding="longest", truncation=True, return_tensors="np"
)
summary_tokens = tokenizer.prepare_seq2seq_batch(
summaries, padding="longest", truncation=True, return_tensors="np"
)
logger.info("Tokenized Texts")
num_dialogues, longest_dialogue = dialogue_tokens["input_ids"].shape
num_summaries, longest_summary = summary_tokens["input_ids"].shape
assert num_dialogues == num_summaries
dialogue_lens = np.sum(dialogue_tokens["attention_mask"], axis=1)
summary_lens = np.sum(summary_tokens["attention_mask"], axis=1)
ratio = summary_lens / dialogue_lens
dialogues = np.array(dialogues)[ratio < 1]
summaries = np.array(summaries)[ratio < 1]
logger.info(
"Removed instances where the summary was equal to or longer than the dialogue."
)
dialogue_tokens = tokenizer.prepare_seq2seq_batch(
dialogues.tolist(), padding="longest", truncation=True, return_tensors="pt"
)
summary_tokens = tokenizer.prepare_seq2seq_batch(
summaries.tolist(), padding="longest", truncation=True, return_tensors="pt"
)
logger.info("Tokenized into PyTorch Tensors")
num_dialogues, longest_dialogue = dialogue_tokens["input_ids"].shape
num_summaries, longest_summary = summary_tokens["input_ids"].shape
assert num_dialogues == num_summaries
dataset = TensorDataset(
dialogue_tokens["input_ids"],
dialogue_tokens["attention_mask"],
summary_tokens["input_ids"],
)
train_size = int(dialogue_tokens["input_ids"].shape[0] * 0.80)
test_size = int(dialogue_tokens["input_ids"].shape[0] * 0.10)
val_size = int(dialogue_tokens["input_ids"].shape[0]) - train_size - test_size
assert train_size + test_size + val_size == int(
dialogue_tokens["input_ids"].shape[0]
)
train, test, val = random_split(
dataset=dataset, lengths=(train_size, test_size, val_size)
)
torch.save(train, f"{output_filepath}/train_dataset.pt")
logger.info("Saved train_dataset.pt...")
torch.save(test, f"{output_filepath}/test_dataset.pt")
logger.info("Saved test_dataset.pt...")
torch.save(val, f"{output_filepath}/val_dataset.pt")
logger.info("Saved val_dataset.pt...")
logger.info("Done!")
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| true |
0a0c58460240e79d1ee745c5af032efa963a2dfa | Python | joshua-lai/ETNP_TMAO_metagenomics | /formatBlastHits_v6.py | UTF-8 | 8,926 | 2.84375 | 3 | [] | no_license | import re
import sys
import os
from Bio import SeqIO
def formatAccession(preAccession):
"""gets accesion from within the |'s or the entirety
this was made with first seeing clara's prokdb but i think most things are fine w/o it"""
if '|' in preAccession:
start = preAccession.find('|')
end = preAccession.rfind('|')
postAccession = preAccession[start + 1:end]
else:
postAccession = preAccession
return(postAccession)
def parseBlastHits(blastHitFilepath, dbName):
"""takes in a string of a filepath to a fasta formatted document
returns three lists, one as a list of unique acession numbers
a corresponding list of minimum e-values
and a corresponding list of names formatted to remove non-alphanumeric characters"""
accessionList = []
eValList = []
nameList = []
with open (blastHitFilepath, 'r') as blastHits:
for entry in blastHits:
# split the line by whitespace
line = entry.split("\t")
if len(line) > 1:
# sequence id as the second item, possibly surrounded by '|'s
try:
if dbName == "prokdb":
accession = formatAccession(line[1])
else:
accession = line[1]
except:
print("error in accession with line", line)
accession = "fake_someErrorHappened"
break
# e-value as the third item
eVal = float(line[2])
# name as fourth item, non-alphanumeric replaced w/ '_'
name = re.sub('[^0-9a-zA-Z]+', '_', line[3])
#adds sequence id, eval, name to a list
if accession not in accessionList:
accessionList += [accession]
eValList += [eVal]
nameList += [name]
# if name already included, replaces e-value with the smaller e-value
else:
indexOfMatch = accessionList.index(accession)
if eValList[indexOfMatch] > eVal:
eValList[indexOfMatch] = min(eValList[indexOfMatch], eVal)
nameList[indexOfMatch] = name
return(accessionList, eValList, nameList)
def copyQuerySequences(querySequenceFilepath, accessionList, sequenceList):
"""takes in the filepath of the query sequences
or 'na' to not add it.
returns three lists:
a list of accession numbers / sequence ids,
a list of corresponding names,
a list of corresponding sequences"""
accessionListQ = []
nameListQ = []
sequenceListQ = []
if querySequenceFilepath != "na":
with open (querySequenceFilepath, 'r') as querySeq:
for record in SeqIO.parse(querySeq, "fasta"):
#format accession
accession = record.id
#format name from description
name = record.description.split(maxsplit = 1)[1]
name = [re.sub('[^0-9a-zA-Z]+', '_', name)]
if accession not in accessionList:
if record.seq not in sequenceList:
accessionListQ += [accession]
nameListQ += [name]
sequenceListQ += [record.seq]
return(accessionListQ, nameListQ, sequenceListQ)
def crossReferenceSequences (blastDatabaseAsFastaFilepath, accessionList, minSeqLength, errorOutfile):
"""takes in the filepath of initial sequence that were blasted
takes in a list of accession numbers / sequence id's
returns a list of the corresponding sequences"""
sequenceList = ["nothing_found"] * len(accessionList)
numU = 0
numDup = 0
numFrag = 0
fastaFilesToLookIn = []
# searches through a variety of fasta files if needed, kinda assumes exclusive of each other
# prefers later copies if not
if blastDatabaseAsFastaFilepath[-1] == "/":
filenames = os.listdir(blastDatabaseAsFastaFilepath)
for file in filenames:
if ((file[-3:] == ".fa") or (file[-4:] == ".faa")) or ((file[-6:] == ".fasta") or (file[-4:] == ".txt")):
fastaFilesToLookIn += [blastDatabaseAsFastaFilepath + file]
else:
fastaFilesToLookIn = [blastDatabaseAsFastaFilepath]
# opens fasta files to search
print(accessionList)
for fastaFile in fastaFilesToLookIn:
with open (fastaFile, 'r') as bdFasta:
with open (errorOutfile, 'w') as errorOutput:
#print("opening ",bdFasta)
for record in SeqIO.parse(bdFasta, "fasta"):
#format accession of record.id for comparison
accession = record.id
#print(accession, accession in accessionList)
if accession in accessionList:
#if accession in the accession list, copy the corresponding sequence
accessionMatchIndex = accessionList.index(accession)
sequenceToCopy = str(record.seq)
# if sequence already in the list or contains a U, adjusts it, and copies the sequence to list
if sequenceToCopy in sequenceList:
sequenceMatchIndex = sequenceList.index(sequenceToCopy)
sequenceList[accessionMatchIndex] = "!dupOf" + str(accessionList[sequenceMatchIndex])
errorOutput.write(f"{accession} is a duplicate of {accessionList[sequenceMatchIndex]}\n")
numDup += 1
# trim short sequences
elif len(sequenceToCopy) < minSeqLength:
errorOutput.write(f"{accession} was too short with only {len(sequenceToCopy)} aa's\n")
sequenceList[accessionMatchIndex] = "!tooShort" + str(len(sequenceToCopy))
numFrag += 1
elif "U" in sequenceToCopy:
errorOutput.write(f"selenocysteine (U) in {record}, not removed\n")
#print("AAHHH! why is there a 'U' in " + str(record) + ", U removed")
sequenceList[accessionMatchIndex] = sequenceToCopy#.replace('U','')
numU += 1
# copies sequence into sequence list
else:
sequenceList[accessionMatchIndex] = sequenceToCopy
errorOutput.write(f"{accession} added\n")
errorOutput.write(f"There were {numDup} duplicates\n")
errorOutput.write(f"There were {numFrag} sequences shorter than {minSeqLength}\n")
errorOutput.write(f"There were {numU} sequences with a U in them\n")
return(sequenceList)
def writeFasta(accessionList, nameList, sequenceList, eValList, outputFilepath, dbName):
"""takes in a list of accession numbers, a corresponding list of names
and a corresponding sequence of sequences
takes in a filepath to write a fasta formatted file to"""
with open (outputFilepath, 'w') as outfile:
for ii in range(len(accessionList)):
if sequenceList[ii][0] != "!":
outfile.write(f">{accessionList[ii]} {nameList[ii]} {dbName}_{eValList[ii]}\n")
temp = str(sequenceList[ii])
outfile.write(f"{temp}\n")
def main(args):
ARGS_EXPECTED = 8
name, blastHitFilepath, querySequenceFilepath, blastDatabaseAsFastaFilepath, outputFilepath, errorOutfile, dbName, minSeqLength = args[0:ARGS_EXPECTED]
for num, otherArgs in enumerate(args[ARGS_EXPECTED:]):
print("argument",num+ARGS_EXPECTED,": \"",otherArgs,"\" not used")
# reads a fasta file with the blast hits
accessionList, eValList, nameList = parseBlastHits(blastHitFilepath, dbName)
# cross references the accessions agianst the sequences for the blast database
sequenceList = crossReferenceSequences (blastDatabaseAsFastaFilepath, accessionList, int(minSeqLength), errorOutfile)
# could add the original query sequences
accessionListQ, nameListQ, sequenceListQ = copyQuerySequences(querySequenceFilepath, accessionList, sequenceList)
accessionList += accessionListQ
eValList += ["0.0"]*len(accessionList)
nameList += nameListQ
sequenceList += sequenceListQ
# writes results and sequences to a fasta file
writeFasta(accessionList, nameList, sequenceList, eValList, outputFilepath, dbName)
if __name__ == "__main__":
main(sys.argv) | true |
de54dc804b8eb00884dc7a222ca3b2812358a212 | Python | cdalvara/Mini-Python-Interpreter | /Testcases/tc10.py | UTF-8 | 173 | 3.109375 | 3 | [] | no_license | var1=230
var2=450
var3=var1+var2
v4=var3+100
if var1>=var2:
if var3==500:
s="A"
else:
s="B"
else:
if v4<=500:
s="C"
else:
s="d"
print(v4)
print(s) | true |
ee45d4a46b3555c103e1d088663e0a5145505f65 | Python | arpitdixit445/Leetcode-30-day-challenge | /Day_7__Counting_Elements.py | UTF-8 | 777 | 3.65625 | 4 | [] | no_license | '''
Problem Statement -> Given an integer array arr, count element x such that x + 1 is also in arr.
If there're duplicates in arr, count them seperately.
Example 1 -> Input: arr = [1,2,3]
Output: 2
Explanation: 1 and 2 are counted cause 2 and 3 are in arr.
Example 2 -> Input: arr = [1,1,3,3,5,5,7,7]
Output: 0
Explanation: No numbers are counted, cause there's no 2, 4, 6, or 8 in arr.
'''
#Solution - Using A Set : Time O(n), Space O(n)
class Solution:
def countElements(self, arr: List[int]) -> int:
ss = set(arr)
count = 0
for i in arr:
if i+1 in ss:
count += 1
return count
| true |
b75329a1458a240ecb8122b4cfba2892b4c07422 | Python | JoeyNeidigh/robo_cleanup | /scripts/robo_cleanup.py | UTF-8 | 6,035 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
import map_utils
import tf
import actionlib
import numpy as np
from move_base_msgs.msg import MoveBaseGoal, MoveBaseAction
from geometry_msgs.msg import Pose, Point
from geometry_msgs.msg import PoseWithCovarianceStamped
from nav_msgs.msg import OccupancyGrid
from actionlib_msgs.msg import *
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool, Float32MultiArray
class RoboCleanupNode(object):
def __init__(self):
rospy.init_node('robo_cleanup')
# Subscribers/Publishers
self.map_msg = None
rospy.Subscriber('map', OccupancyGrid, self.map_callback)
rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped,
self.position_callback)
rospy.Subscriber('new_goal', MoveBaseGoal, self.new_goal_callback)
rospy.Subscriber('messes_to_clean', Float32MultiArray, self.mess_arr_callback)
self.mv_base = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=10)
self.goal_reached = rospy.Publisher('/goal_reached', Bool, queue_size=10)
# Other globals
self.ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
self.position = None
self.goal = None
self.searching = True
self.cleaning = False
self.cur_mess = None
self.mess_id = 1
self.tf_listener = tf.TransformListener()
self.old_marker = Point()
self.old_marker.x = 0
self.old_marker.y = 0
# Wait for the map and the robot's position to be initialized
while self.map_msg is None and not rospy.is_shutdown():
rospy.loginfo("Waiting for map...")
rospy.sleep(1)
self.map = map_utils.Map(self.map_msg)
while (self.position is None and not rospy.is_shutdown()):
rospy.loginfo("Waiting for position...")
rospy.sleep(.1)
# Initially set the safezone to the start location
self.safezone = self.position
counter = 0
# Main execution loop
while not rospy.is_shutdown():
if (self.searching and self.goal is not None): # if in searching state AND has a valid goal
# if first goal
if counter == 0:
goal = self.goal_message(self.goal.x, self.goal.y, 0)
self.ac.send_goal(goal)
counter += 1
# if close enough to the goal OR the goal has been reached OR the goal has been abandoned
if (self.close_enough(self.position.position.x,self.position.position.y, self.goal.x, self.goal.y, .7)
or self.ac.get_state() is 4 or self.ac.get_state() is 3):
goal = self.goal_message(self.goal.x, self.goal.y, 0)
self.ac.send_goal(goal)
self.goal = None
if self.cleaning: # if in cleaning state
length = len(self.mess_arr)/2
count = 0
# retreive each mess in self.mess_arr
for i in range(0, length):
self.drive_to_mess(self.mess_arr[i+count], self.mess_arr[i+1+count])
self.take_to_safezone()
count += 1
self.cleaning = False
def take_to_safezone(self):
""" Return to safezone with the mess and drop it off """
mv_back = Twist()
mv_back.linear.x = -.5
goal = self.goal_message(self.safezone.position.x, self.safezone.position.y, 0)
self.go_to_point(goal)
self.ac.wait_for_result()
self.mv_base.publish(mv_back)
def drive_to_mess(self, mess_x, mess_y):
""" Drive to the mess located at (mess_x, mess_y) and gather it in the plow """
goal = self.goal_message(mess_x, mess_y, 0)
self.go_to_point(goal)
self.ac.wait_for_result()
def go_to_point(self, goal):
""" Sends the robot to a given goal point """
rospy.loginfo("Waiting for server.")
self.ac.wait_for_server()
self.ac.send_goal(goal)
rospy.loginfo("Goal Sent.")
def goal_message(self, x_target, y_target, theta_target):
""" Create a goal message in the base_link coordinate frame """
quat = tf.transformations.quaternion_from_euler(0, 0, theta_target)
# Create a goal message ...
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.get_rostime()
goal.target_pose.pose.position.x = x_target
goal.target_pose.pose.position.y = y_target
goal.target_pose.pose.orientation.x = quat[0]
goal.target_pose.pose.orientation.y = quat[1]
goal.target_pose.pose.orientation.z = quat[2]
goal.target_pose.pose.orientation.w = quat[3]
return goal
def map_callback(self, map_msg):
""" map_msg will be of type OccupancyGrid """
self.map_msg = map_msg
def position_callback(self, pos):
""" Saves the current position of the robot """
self.position = pos.pose.pose
def close_enough(self, x_one, y_one, x_two, y_two, distance):
""" Checks to see if (x_one, y_one) is within the given distance of (x_two, y_two) """
return (np.sqrt((x_one - x_two)**2 + (y_one - y_two)**2) < distance)
def new_goal_callback(self, new_goal):
""" Continuously set self.goal to be a valid goal """
self.goal = new_goal.target_pose.pose.position
def mess_arr_callback(self, messes_msg):
""" Sets self.mess_arr to the array of goals that this robot is responsible for """
""" and switches the state from searching to cleaning """
self.mess_arr = messes_msg.data
self.searching = False
self.cleaning = True
self.ac.cancel_goal()
if __name__ == "__main__":
RoboCleanupNode()
| true |