blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8c1cf791e79842e43470ba37c34c1d7c8ae0be84 | Python | EveryoneHappyAI/ComputerVision_Learning | /OpenCV3_Py_Examples/ConvolveTest.py | UTF-8 | 1,690 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
OpenCV 3 学习示例
卷积特性试验
Created on Fri Jul 21 12:49:40 2017
@author: yf
"""
import cv2
import time
#help(cv2.namedWindow)
import numpy as np
from skimage import io
from scipy import ndimage
kernel_3x3 = np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
kernel_3x3 = np.array([[0, -0.25, 0],
[-0.25, 1, -0.25],
[0, -0.25, 0]])
kernel_5x5 = np.array([[-1, -1, -1, -1, -1],
[-1, 1, 2, 1, -1],
[-1, 2, 4, 2, -1],
[-1, 1, 2, 1, -1],
[-1, -1, -1, -1, -1]])
fileStr = "C:\\Users\\yj_7u\\Pictures\\Icon.bmp"
#fileStr = "C:\\Users\\yj_7u\\Pictures\\p2303767259.jpg"
#fileStr = "C:\\Users\\006\\Pictures\\tx.jpg"
#fileStr = "C:\\Users\\006\\Pictures\\half.jpg"
#fileStr = "E:\\Pics\\Mask.jpg"
image = cv2.imread(fileStr)
imageGrey = cv2.imread(fileStr, 0)
imageB = image[:,:,0]
imageG = image[:,:,1]
imageR = image[:,:,2]
k3B = ndimage.convolve(imageB, kernel_3x3)
k3G = ndimage.convolve(imageG, kernel_3x3)
k3R = ndimage.convolve(imageR, kernel_3x3)
k3 = ndimage.convolve(imageGrey, kernel_3x3)
k5 = ndimage.convolve(imageGrey, kernel_5x5)
blurred = cv2.GaussianBlur(image, (11, 11), 0)
bluredImg = image - blurred
cv2.waitKey(-1)
cv2.imshow("3x3", k3)
cv2.imshow("5x5", k5)
cv2.imshow("bluredImg", bluredImg)
#image[:,:,0] = k3B
#image[:,:,1] = k3G
#image[:,:,2] = k3R
cv2.imshow("image", image)
cv2.imshow("3x3B", k3B)
cv2.imshow("3x3G", k3G)
cv2.imshow("3x3R", k3R)
cv2.waitKey(-1)
#cv2.destroyWindow()
cv2.destroyAllWindows() | true |
9007ecbd795e61d5837d31586b04f178d2c4c33b | Python | TimoKubera/instagram_monitor | /instagram/testing/test.py | UTF-8 | 1,642 | 2.578125 | 3 | [] | no_license | from urllib.parse import urljoin
from selenium import webdriver
from selenium.webdriver import ActionChains
from bs4 import BeautifulSoup
url = "https://www.instagram.com/jimmyfallon/"
base_url = "https://www.instagram.com/"
geckodriver = "/Users/timo/node_modules/geckodriver/geckodriver"
driver = webdriver.Firefox(executable_path = geckodriver)
driver.get(url)
# Auf Englisch kann es Accept heißen
driver.find_element_by_xpath("//*[text()='Akzeptieren']").click()
driver.find_element_by_class_name("dCJp8").click()
content = driver.page_source
soup = BeautifulSoup(content)
driver.close()
for img in soup.find_all("img", src=True):
if not img["src"].startswith('http'):
img["src"] = urljoin(base_url, img["src"])
for img in soup.find_all("img", srcset=True):
if not img["srcset"].startswith('http'):
img["srcset"] = urljoin(base_url, img["srcset"])
for a in soup.find_all("a"):
if not a["href"].startswith("http"):
a["href"] = urljoin(base_url, a["href"])
for link in soup.find_all("link"):
if not link["href"].startswith('http'):
link["href"] = urljoin(base_url, link["href"])
i = 0
for script in soup.find_all("script", src=True):
if not script["src"].startswith('http'):
i += 1
if i == 4:
# Dieses script wird ausgeschlossen, weil dadurch die Seite ständig refreshed wird.
#<script crossorigin="anonymous" src="/static/bundles/es6/Vendor.js/c911f5848b78.js" type="text/javascript"></script>
continue
script["src"] = urljoin(base_url, script["src"])
f = open("insta.html", "w")
f.write(soup.prettify())
f.close() | true |
40e50b8da09610b3245692f42b87fa7360d2282e | Python | mumtazcem/Amazon-meta-graph | /main.py | UTF-8 | 12,470 | 2.71875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import networkx as nx
import plot_creator as pv
import networkx.algorithms.community as nx_comm
import random
import time
# Seed value for betweenness centrality and for random choices
seed = 900
# Most Crowded Modules would be saved to..
g1_modules_file = "most_crowded_modules/g1_modules.csv"
g2_modules_file = "most_crowded_modules/g2_modules.csv"
# Page Ranks would be saved to..
g1_pagerank_file = "page_ranks/g1_pagerank_file.csv"
g2_pagerank_file = "page_ranks/g2_pagerank_file.csv"
# Database
g1_pd = "saved_dataframes/g1Db_clean.csv"
g2_pd = "saved_dataframes/g2Db_clean.csv"
g1_db = pd.read_csv(g1_pd)
g2_db = pd.read_csv(g2_pd)
# Fix nodeId column
num_of_nodes1, col1 = g1_db.shape
g1_db['nodeId'] = np.full((num_of_nodes1,), range(num_of_nodes1))
num_of_nodes2, col2 = g2_db.shape
g2_db['nodeId'] = np.full((num_of_nodes2,), range(num_of_nodes2))
# Generate networkx version of our graphs here
def generate_g1_g2():
# Thresholded
adj1_file = "saved_adj_matrices/adj1_min.csv"
adj2_file = "saved_adj_matrices/adj2_min.csv"
adj_1_pd = pd.read_csv(adj1_file)
adj_2_pd = pd.read_csv(adj2_file)
# need to drop index column that is generated by pandas
adj_1_pd.drop(adj_1_pd.columns[0], axis=1, inplace=True)
adj_2_pd.drop(adj_2_pd.columns[0], axis=1, inplace=True)
# convert pd to numpy
adj_1 = adj_1_pd.to_numpy()
adj_2 = adj_2_pd.to_numpy()
# generate nx graphs from adj matrices
G1 = nx.from_numpy_matrix(adj_1)
G2 = nx.from_numpy_matrix(adj_2)
num_of_nodes, num_of_edges = pv.get_nodes_and_edges_number(G1)
print("Number of nodes :", num_of_nodes, "number of edges :", num_of_edges)
# pv.plot_degree_dist(G1)
num_of_nodes, num_of_edges = pv.get_nodes_and_edges_number(G2)
print("Number of nodes :", num_of_nodes, "number of edges :", num_of_edges)
# pv.plot_degree_dist(G2)
return G1, G2
# Modified Girvan-Newman algorithm from HW3
def modified_girvan_newman_algorithm(g):
initial = nx_comm.modularity(g, [set(g.nodes)], weight='weight')
max_modularity = initial
saved_components = []
saved_graph = nx.Graph()
while g.number_of_edges() != 0:
centralities = nx.edge_betweenness_centrality(g, weight='weight', seed=seed)
# max() returns one of the edges with maximum centrality
u, v = max(centralities, key=centralities.get)
# Checking for same maximum centrality score below
if len(sorted(centralities.values(), reverse=True)) > 2:
centrality_max1 = sorted(centralities.values(), reverse=True)[0]
centrality_max2 = sorted(centralities.values(), reverse=True)[1]
if centrality_max1 == centrality_max2:
# At least two equal max centrality measure detected!
same_scores = []
for centrality in centralities:
if centralities[centrality] == centrality_max1:
same_scores.append(centrality)
# Pick an edge randomly among same scores
u, v = random.Random(seed).choice(same_scores)
# same score check finishes.
components = sorted(nx.connected_components(g), key=len, reverse=True)
if len(components) > 1:
fragmented_modularity = nx_comm.modularity(g, components, weight='weight')
if fragmented_modularity > max_modularity:
max_modularity = fragmented_modularity
saved_components = components
saved_graph = g.copy()
g.remove_edge(u, v)
return max_modularity, saved_components, saved_graph
def most_crowded_module(all_components):
max_len = 0
most_crowded_modules = []
for component in all_components:
if max_len < len(component):
max_len = len(component)
for component in all_components:
if max_len == len(component):
most_crowded_modules.append(component)
return max_len, most_crowded_modules
# Gets two graphs, runs Girvan Newman algorithm
# Finds connected components. Among connected components,
# it would print out the most crowded connected components
# to csv files under most_crowded_modules folder.
def modularity_calculations(G1, G2, filename1, filename2):
start_time = time.time()
print("****** Modularity Calculation Started ******")
print("Running G1")
result_modularity, g1_result_components, result_graph = modified_girvan_newman_algorithm(G1)
print("Final modularity: ", result_modularity)
print("Connected components of the graph with maximum modularity: ", g1_result_components)
g1_max_len, g1_most_crowded_modules = most_crowded_module(g1_result_components)
print("most_crowded_module include : ", g1_max_len, " nodes.")
print("most_crowded_modules: ", g1_most_crowded_modules)
g1_time = time.time()
print("G1 modularity is finished in --- %s seconds ---" % (g1_time - start_time))
print("Running G2")
result_modularity, g2_result_components, result_graph = modified_girvan_newman_algorithm(G2)
print("Final modularity: ", result_modularity)
print("Connected components of the graph with maximum modularity: ", g2_result_components)
g2_max_len, g2_most_crowded_modules = most_crowded_module(g2_result_components)
print("most_crowded_module include : ", g2_max_len, " nodes.")
print("most_crowded_modules: ", g2_most_crowded_modules)
g2_time = time.time()
print("G2 is finished --- %s seconds ---" % (g2_time - g1_time))
print("Modularity finished in %s seconds." % (time.time() - start_time))
print("****** Modularity Calculation Ended ******")
print("Printing modules of G1..")
g1_modules_asin = []
for module_ in g1_result_components:
module_asin = []
for product in module_:
module = g1_db[g1_db['nodeId'] == product]
module_asin.append(module['ASIN'].iat[0])
g1_modules_asin.append(module_asin)
df1 = pd.DataFrame(g1_modules_asin)
# the number of nodes in the corresponding module
df1['NumberOfNodes'] = ""
for row_index, row in df1.iterrows():
node_counter = 0
for column in row:
if column is not None:
node_counter += 1
df1.at[row_index, 'NumberOfNodes'] = node_counter - 1
with open(filename1, 'w', newline='') as myfile:
df1.to_csv(filename1)
print("Printing modules of G2..")
g2_modules_asin = []
for module_ in g2_result_components:
module_asin = []
for product in module_:
module = g2_db[g2_db['nodeId'] == product]
module_asin.append(module['ASIN'].iat[0])
g2_modules_asin.append(module_asin)
df2 = pd.DataFrame(g2_modules_asin)
# the number of nodes in the corresponding module
df2['NumberOfNodes'] = ""
for row_index, row in df2.iterrows():
node_counter = 0
for column in row:
if column is not None:
node_counter += 1
df2.at[row_index, 'NumberOfNodes'] = node_counter - 1 # minus 1 because of NumberOfNodes column
with open(filename2, 'w', newline='') as myfile:
df2.to_csv(filename2)
return df1, df2
def page_rank_calculations(G1, G2):
# G1
pr = nx.pagerank(G1, alpha=0.9, max_iter=1000, weight='weight')
sorted_pr = {k: v for k, v in sorted(pr.items(), key=lambda item: item[1], reverse=True)}
# print(sorted_pr)
g1_pr_df = pd.DataFrame.from_dict(sorted_pr, orient='index')
g1_pr_df["ASIN"] = ""
for index, row in g1_pr_df.iterrows():
product = g1_db[g1_db['nodeId'] == index]
g1_pr_df.at[index, 'ASIN'] = product['ASIN'].iat[0]
# G2
pr = nx.pagerank(G2, alpha=0.9, max_iter=1000, weight='weight')
sorted_pr = {k: v for k, v in sorted(pr.items(), key=lambda item: item[1], reverse=True)}
# print(sorted_pr)
g2_pr_df = pd.DataFrame.from_dict(sorted_pr, orient='index')
g2_pr_df["ASIN"] = ""
for index, row in g2_pr_df.iterrows():
product = g2_db[g2_db['nodeId'] == index]
g2_pr_df.at[index, 'ASIN'] = product['ASIN'].iat[0]
return g1_pr_df, g2_pr_df
# Searches over modules, finds the corresponding module for the input ASIN
def search_modules(modules_dataframe, input_asin):
for index, row in modules_dataframe.iterrows():
for column_asin in row:
if column_asin == input_asin:
return index, modules_dataframe.at[index, 'NumberOfNodes']
# Given pagerank and modularity, this method would create the R space that includes
# node's
# ID
# PageRank
# ASIN
# Module degree
# Module that it belongs to
def create_relationship_space(graph_pagerank, graph_modularity):
# Add module degree column. Module degree represents the number of nodes in the module that this node belongs to
graph_pagerank["ModuleDegree"] = ""
# Given pagerank find its corresponding module ID that this node belongs to
graph_pagerank["BelongsTo"] = ""
for index, row in graph_pagerank.iterrows():
product_asin = graph_pagerank.at[index, 'ASIN']
# search over modules
module_index, module_degree = search_modules(graph_modularity, product_asin)
graph_pagerank.at[index, 'BelongsTo'] = module_index
graph_pagerank.at[index, 'ModuleDegree'] = module_degree
return graph_pagerank
# Calculate pagerank sum of the most popular module and return the module degree of the graph
def morphospace_values(graph_pagerank, graph_modularity, is_reading_from_file):
sum_pagerank = 0
for i in range(len(graph_modularity.columns) - 2):
if is_reading_from_file:
product_asin = graph_modularity.at[0, str(i)]
else:
product_asin = graph_modularity.at[0, i]
# For file reading, use this below
# product_asin = graph_modularity.at[0, str(i)]
# graph_modularity.loc[graph_modularity.index[0], 6]
# get its pagerank
for index, row in graph_pagerank.iterrows():
if product_asin == row['ASIN']:
# 0 is the column name unfortunately for the pagerank
if is_reading_from_file:
sum_pagerank += row['0']
else:
sum_pagerank += row[0]
break
return sum_pagerank, graph_modularity.at[0, 'NumberOfNodes']
def do_calculations_using_file(file1_pr, file2_pr, file1_mod, file2_mod):
g1_pagerank = pd.read_csv(file1_pr)
g2_pagerank = pd.read_csv(file2_pr)
g1_modularity = pd.read_csv(file1_mod)
g2_modularity = pd.read_csv(file2_mod)
# Get morphospace values for graphs
sum_page_rank1, mod_degree1 = morphospace_values(g1_pagerank, g1_modularity, is_reading_from_file=True)
sum_page_rank2, mod_degree2 = morphospace_values(g2_pagerank, g2_modularity, is_reading_from_file=True)
print(sum_page_rank1/mod_degree1)
print(mod_degree1)
print(sum_page_rank2/mod_degree2)
print(mod_degree2)
return sum_page_rank1, mod_degree1, sum_page_rank2, mod_degree2
# Provide file names to be written to
def do_all_calculations(G1, G2, file_mod1, file_mod2, file_pr1, file_pr2):
# Calculate pagerank and modules
g1_pagerank, g2_pagerank = page_rank_calculations(G1, G2)
g1_modularity, g2_modularity = modularity_calculations(G1, G2, file_mod1, file_mod2)
# Create the R space
g1_relationship = create_relationship_space(g1_pagerank, g1_modularity)
g2_relationship = create_relationship_space(g2_pagerank, g2_modularity)
# Get morphospace values for graphs
sum_page_rank1, mod_degree1 = morphospace_values(g1_pagerank, g1_modularity, is_reading_from_file=False)
sum_page_rank2, mod_degree2 = morphospace_values(g2_pagerank, g2_modularity, is_reading_from_file=False)
print(sum_page_rank1)
print(mod_degree1)
print(sum_page_rank2)
print(mod_degree2)
# Write to a file
with open(file_pr1, 'w', newline='') as myfile:
g1_relationship.to_csv(g1_pagerank_file)
with open(file_pr2, 'w', newline='') as myfile:
g2_relationship.to_csv(g2_pagerank_file)
return sum_page_rank1, mod_degree1, sum_page_rank2, mod_degree2
# Generate real G1 G2
# G1, G2 = generate_g1_g2()
# do_all_calculations(G1, G2, g1_modules_file, g2_modules_file, g1_pagerank_file, g2_pagerank_file)
# do_calculations_using_file(g1_pagerank_file, g2_pagerank_file, g1_modules_file, g2_modules_file)
| true |
0c1724d19fc377d20511d6f3292fa96459cdb276 | Python | haha1808656980/study_data | /python基础用法/dir方法和getattr.py | UTF-8 | 221 | 3.703125 | 4 | [] | no_license | '''
dir方法和getattr的使用
'''
from datetime import datetime
print(dir(datetime)) #把对象的方法和属性全部打印出来
print('*'*50)
print(getattr(datetime,'ctime')) #获取对象的属性
| true |
9d0bc48947a332e9ef38fdbac4c527b1a30c03c3 | Python | vipulshah31120/PythonDataStructures | /OccurInSortedArray.py | UTF-8 | 326 | 3.765625 | 4 | [] | no_license | def occurrence(arr, n, x) :
res = 0
for i in range(n) : # Returns number of times x
if x == arr[i] : # occurs in arr[0..n-1]
res += 1
return res
arr = [1, 2, 2, 2, 2, 3, 4, 2 ,8 ,8]
n = len(arr)
x = 2
print(occurrence(arr, n, x))
| true |
9a8169deca1cd26e5d116f0f5b50568a68546c2f | Python | shahrukh00789/PythonBasics | /tryExceptionalHandling.py | UTF-8 | 215 | 3.96875 | 4 | [] | no_license | print("Enter Number 1")
num1 = input()
print("Enter Number 2")
num2 = input()
try:
print("The sum of two numbers are ",int(num1)+int(num2))
except Exception as e:
print(e)
print("This is Important number")
| true |
26b62c68807a86477ed286c4edf97129ab1ee10f | Python | JoDongHyuen/AI-Study | /정보전산원 수업/Sklearn/Reg/boston_linear.py | UTF-8 | 1,215 | 3.53125 | 4 | [] | no_license | # --------------------------------------
# 보스턴 집값 데이터
# --------------------------------------
# 모듈 로딩 ----------------------------------------------
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn import datasets
import matplotlib.pyplot as plt
# 데이터 준비 ---------------------------------------------
bdata = datasets.load_boston()
x_data = bdata.data
y_data = bdata.target
# 학습용 문제-라벨 & 테스트용 문제 - 라벨 데이터 분리
x_train, x_test, y_train, y_test = model_selection.train_test_split(x_data, y_data, test_size = 0.3)
print(f"x_train = {len(x_train)}, x_test = {len(x_test)}")
# 모델링 ---------------------------------------------------
# (1) 학습 모델 객체 생성
model = LinearRegression()
# (2) 학습 => 기울기랑 절편 추출
model.fit(x_train, y_train)
# (3) 테스트 y = ax + b <- x_train
y_predict = model.predict(x_test)
# (4) 성능 평가
score = metrics.r2_score(y_test, y_predict)
print(f"r2 score => {score}")
# # 그래프 그리기 ----------------------------------------------
# plt.plot(x_test, y_test)
# plt.grid()
# plt.show() | true |
d57418a4b33fe0c261af4a31dcd70eaa040374f7 | Python | Aasthaengg/IBMdataset | /Python_codes/p03609/s093221150.py | UTF-8 | 79 | 3.28125 | 3 | [] | no_license | a,b=map(int, input().split())
if a < b:
print(0)
if a >= b:
print(a-b) | true |
ca7d66ff83a4d54ff376c625b2e6e1085fc3f1de | Python | trinhgliedt/Algo_Practice | /2020_11_20_parens_valid.py | UTF-8 | 677 | 4.5625 | 5 | [] | no_license | # Page 67 Algo:
# Parens Valid
# Create a function that, given an input string,
# returns a boolean whether parentheses in that
# string are valid. Given input "y(3(p)p(3)r)s" ,
# return true. Given "n(0(p)3" , return false .
# Given "n)0(t(0)k" , return false .
def parensValid(str):
count = 0
for i, v in enumerate(str):
if v == "(":
if count < 0:
return False
count += 1
elif v == ")":
count -= 1
return True if count == 0 else False
print(parensValid("y(3(p)p(3)r)s")) #true
print(parensValid("n(0(p)3")) # false
print(parensValid("n)0(t(0)k")) #false
print(parensValid("()(()(())))")) #false | true |
dc888975e4227c21f67b4899b0f76e5c228052f3 | Python | guerrerobertrand/python | /tests/searchAndReplaceFiles.py | UTF-8 | 1,069 | 3.140625 | 3 | [] | no_license | '''
Created on 19 mai 2015
@author: Bertrand
'''
import fileinput
import sys, os
if __name__ == '__main__':
print("Search and Replace on multiple files")
# The top argument for walk
topdir = "C:\\Users\\Bertrand\\Desktop\\stage\\"
# The extension to search for
exten = ".txt"
# Loop recursively into folders
for dirpath, dirnames, files in os.walk(topdir):
for name in files:
if name.lower().endswith(exten):
print(os.path.join(dirpath, name))
file=os.path.join(dirpath, name)
try:
print("Opening file : " + file)
searchExp = "\""
replaceExp = ""
for line in fileinput.input(file, inplace=True):
if searchExp in line:
line = line.replace(searchExp,replaceExp)
sys.stdout.write(line)
finally:
print('Done, file closed')
#file.close() | true |
2559a736ffab93a4b421a178529d73d54fe1160e | Python | jennifersong/dailyprogrammer | /easy/115-guessthatnumbergame.py | UTF-8 | 988 | 3.96875 | 4 | [] | no_license | #####################################################################
#
# ORIGINAL PROBLEM:
# Write a program that prompts the user to guess a randomly
# chosen integer between 1 and 100, inclusive.
#
# For more information, see the original prompt at
# http://www.reddit.com/r/dailyprogrammer/comments/15ul7q/122013_challenge_115_easy_guessthatnumber_game/
#
#####################################################################
import random
random.seed()
num = str(random.randint(1, 100))
print "C> Please make a guess of a number in the range of 1 to 100 (inclusive)."
while True:
guess = raw_input("U> ")
if guess == "exit":
print ":'("
break
else:
if guess == num:
print "C> Correct!"
break
else:
try:
print "C> Wrong. That number is too {adjective}.".format \
(adjective="high" if int(guess) > int(num) else "low")
except ValueError:
print "C> That is not a number! Please guess only integers between 1 and 100!" | true |
fae5053bf7d98068ce8301b2d55c4053b346f542 | Python | steadily-worked/July | /DataScience/silicon_valley.py | UTF-8 | 420 | 2.75 | 3 | [] | no_license | %matplotlib inline
import pandas as pd
df = pd.read_csv('data/silicon_valley_summary.csv')
boolean1 = df['gender'] == 'Male'
boolean2 = df['job_category'] == 'Managers'
boolean3 = df['race_ethnicity'] != 'All'
df[boolean1 & boolean2 & boolean3].plot(kind='bar', x='race_ethnicity', y='count')
#실리콘 밸리에서 일하는 남자 관리자(Managers)에 대한 인종 분포를 막대 그래프로 그리는 코드. | true |
c43896293aa6adf4ce774bc96e1f209b98972e3d | Python | ZazAndres/Ejercicios_Taller_Lab24 | /punto5.py | UTF-8 | 574 | 3.859375 | 4 | [] | no_license | from typing import Sized
cond="si"
def frecuencia(numero,digito):
cantidad=0
while numero !=0:
ultDigito=numero%10
if ultDigito==digito:
cantidad+=1
numero=numero//10
return cantidad
while cond=="si":
num=int(input("ingrese un numero: "))
un_digito=int(input("ingrese un digito: "))
print("frecuencia del digito en el numero:",frecuencia(num,un_digito))
cond=input("¿Quieres volver a ingresar un numero y un digito?\n¿Si o no?\n")
if cond=="no":
print("vuelve pronto amigo")
| true |
754870964b41aa1001bcb37afe6c57fee68d4935 | Python | Fabritsi/Python-labs | /Python-labs/-5/Завдання 3.py | UTF-8 | 432 | 3.78125 | 4 | [] | no_license | x=float(input("Введіть змінну x="))
e=float(input("Введіть точність е="))
import math
d=x
n=2
while math.fabs(1-(x**2/((n-1)**2)*(math.pi**2)))>e:
d*=(1-(x**2/((n-1)**2)*(math.pi**2)))
n+=1
print("Добуток:{0}".format(d))
if math.sin(x)-d<e:
print("Рівність справедлива d=sin(x)")
else:
print("Рівнсть не справедлива")
| true |
7b19d18fef02443a6eb5979d62d471f8ca0f06c4 | Python | adi0808/setuproject | /Security/hashing.py | UTF-8 | 443 | 3.34375 | 3 | [] | no_license | import hashlib
# Hashing class and methods
class Hashing:
def hash(self, info, format):
hashing_type = get_hashing_format(format)
return hashing_type(info)
def get_hashing_format(format):
if format == 'sha1':
return _sha1_hashing
else:
return ValueError
def _sha1_hashing(info):
result = hashlib.sha1(info)
checksum = str(result.hexdigest())
return checksum
| true |
5d44d7fa7a1064c3613c5ee89665bc915883b329 | Python | psavery/hexrdgui | /hexrd/ui/image_file_manager.py | UTF-8 | 4,789 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | import os
import tempfile
import yaml
from PySide2.QtWidgets import QMessageBox
from hexrd import imageseries
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.load_hdf5_dialog import LoadHDF5Dialog
class Singleton(type):
_instance = None
def __call__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instance
class ImageFileManager(metaclass=Singleton):
IMAGE_FILE_EXTS = ['.tiff', '.tif']
def __init__(self):
# Clear any previous images
HexrdConfig().imageseries_dict.clear()
self.remember = True
self.path = []
def load_images(self, detectors, file_names):
HexrdConfig().imageseries_dict.clear()
for name, f in zip(detectors, file_names):
try:
if isinstance(f, list):
f = f[0]
ims = self.open_file(f)
HexrdConfig().imageseries_dict[name] = ims
except (Exception, IOError) as error:
msg = ('ERROR - Could not read file: \n' + str(error))
QMessageBox.warning(None, 'HEXRD', msg)
return
# Save the path if it should be remembered
if self.remember:
self.path = HexrdConfig().hdf5_path
else:
HexrdConfig().hdf5_path = self.path
def load_aps_imageseries(self, detectors, directory_names):
HexrdConfig().imageseries_dict.clear()
for name, d in zip(detectors, directory_names):
try:
ims = self.open_directory(d)
HexrdConfig().imageseries_dict[name] = ims
except (Exception, IOError) as error:
msg = ('ERROR - Could not read file: \n' + str(error))
QMessageBox.warning(None, 'HEXRD', msg)
return
def open_file(self, f):
ext = os.path.splitext(f)[1]
if self.is_hdf5(ext):
ims = imageseries.open(f, 'hdf5',
path=HexrdConfig().hdf5_path[0],
dataname=HexrdConfig().hdf5_path[1])
elif ext == '.npz':
ims = imageseries.open(f, 'frame-cache')
elif ext == '.yml':
data = yaml.load(open(f))
form = next(iter(data))
ims = imageseries.open(f, form)
else:
# elif ext in self.IMAGE_FILE_EXTS:
input_dict = {
'image-files': {}
}
input_dict['image-files']['directory'] = os.path.dirname(f)
input_dict['image-files']['files'] = os.path.basename(f)
input_dict['options'] = {}
input_dict['meta'] = {}
temp = tempfile.NamedTemporaryFile(delete=False)
try:
data = yaml.dump(input_dict).encode('utf-8')
temp.write(data)
temp.close()
ims = imageseries.open(temp.name, 'image-files')
finally:
# Ensure the file gets removed from the filesystem
os.remove(temp.name)
# else:
# ims = imageseries.open(f, 'array')
return ims
def open_directory(self, d, files=None):
if files is None:
files = os.listdir(d)
input_dict = {
'image-files': {}
}
input_dict['image-files']['directory'] = d
file_str = ''
for i, f in enumerate(files):
file_str += os.path.basename(f)
if i != len(files) - 1:
file_str += ' '
input_dict['image-files']['files'] = file_str
input_dict['options'] = {}
input_dict['meta'] = {}
temp = tempfile.NamedTemporaryFile(delete=False)
try:
data = yaml.dump(input_dict).encode('utf-8')
temp.write(data)
temp.close()
ims = imageseries.open(temp.name, 'image-files')
finally:
# Ensure the file gets removed from the filesystem
os.remove(temp.name)
return ims
def is_hdf5(self, extension):
hdf5_extensions = ['.h5', '.hdf5', '.he5']
if extension in hdf5_extensions:
return True
return False
def path_exists(self, f):
try:
imageseries.open(f, 'hdf5', path=HexrdConfig().hdf5_path[0],
dataname=HexrdConfig().hdf5_path[1])
return True
except:
return False
def path_prompt(self, f):
path_dialog = LoadHDF5Dialog(f)
if path_dialog.ui.exec_():
group, data, remember = path_dialog.results()
HexrdConfig().hdf5_path = [group, data]
self.remember = remember
else:
return False
| true |
1c3b85d3a6c083520df8c95a3f8e20ca68788354 | Python | kingwersen/CS-178-Project | /Classifiers/AClassifier.py | UTF-8 | 1,749 | 3.546875 | 4 | [] | no_license | import numpy as np
class AClassifier:
"""
Abstract Classifier Type. Supports Training and Predicting.
"""
def __init__(self):
self.alpha = 1
self.classes = np.zeros(0)
def train(self, x: np.array, y: np.array, classes: np.array=None) -> None:
"""
Train the classifier based on a set of features and their respective classes.
:param x: [MxN] Features for each Data for the classifier to train on.
:param y: [Mx1] Actual Classes for the given Features/Data.
:return: None
"""
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(y)
def predict(self, x: np.array) -> np.array:
"""
Predicts the most likely Class for each Data.
:param x: [MxN] Features for each Data.
:return: [Mx1] Highest probability Class for each Data.
"""
return self.classes[np.argmax(self.predict_soft(x), axis=1)]
def predict_soft(self, x: np.array) -> np.array:
"""
Returns a matrix of probabilities of each Class for each Data.
:param x: [MxN] Features for each Data.
:return: [MxK] Probabilities of each Class for each Data.
"""
raise NotImplementedError()
def error(self, x: np.array, y: np.array) -> float:
"""
Returns the "Magnitude" of incorrect predictions.
:param x: [MxN] Features for each Data.
:param y: [Mx1] Actual Classes for each Data.
:return: The "Magnitude" of incorrect predictions.
"""
yh = self.predict(x)
return np.mean(y != yh)
def auc(self, x: np.array, y: np.array, alpha: float=1) -> float:
# TODO
pass | true |
82e117ae8eb1fb452ddd2091c9b42289d2b9f49f | Python | goodsoulkor/python3_fastcampus | /section04-4.py | UTF-8 | 1,016 | 4.5 | 4 | [] | no_license | # section04-4
# 딕셔너리, 집합 자료형
# 딕셔너리(Dict) : 순서 X, 중복 X, 수정 O, 삭제 O
# Key, Value
# 선언
a = {'name': 'Kim', 'Phone': '010-1111-2222', 'birth': 800612}
b = {0: 'Hello Python', 1: 'Hello Coding'}
c = {'arr': [1, 2, 3, 4, 5]}
print(type(a))
# 출력
print(a['name'])
print(a.get('name1'))
print(c['arr'][1:2])
# 딕셔너리 추가
a['address'] = 'Seoul'
print(a)
a['rank'] = [1, 2, 3]
a['rank2'] = (1, 2, 3)
print(a)
# keys, values, items
print(a.keys())
print(list(a.keys()))
temp = list(a.keys())
print(temp[1:3])
print(a.values())
print(a.items())
print(1 in b)
# 집합(set)
# 순서 x, 중복 x
a = set()
b = set([1, 2, 3, 4])
c = set([1, 4, 5, 6, 6])
print(c)
print()
print()
s1 = set([1, 2, 3, 4, 5, 6])
s2 = set([4, 5, 6, 7, 8, 9])
print(s1.intersection(s2))
print(s1 & s2)
print(s1 | s2)
print(s1.union(s2))
print(s1 - s2)
print(s1.difference(s2))
# 추가 & 제거
s3 = set([7, 8, 10, 15])
s3.add(18)
print(s3)
s3.remove(15)
print(s3)
print(type(s3))
| true |
7d9d66b2c46331d30b45216e1e1c31b45f806cf9 | Python | mrdrozdov/pubmed-demo | /examples/tfidf.py | UTF-8 | 3,736 | 3.0625 | 3 | [] | no_license | """
The scikit-learn tfidf tool removes stop words by default. The list of stop words is here:
https://github.com/scikit-learn/scikit-learn/blob/b194674c42d54b26137a456c510c5fdba1ba23e0/sklearn/feature_extraction/_stop_words.py
"""
import os
import collections
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import pubmed_parser as pp
def walk(path='./sample'):
for parent, _, file_lst in os.walk(path):
for file_name in file_lst:
if file_name.endswith('xml'):
yield os.path.join(parent, file_name)
if __name__ == '__main__':
corpus = []
# Read text.
for path in walk():
doc = pp.parse_pubmed_xml(path)
text = doc['abstract']
corpus.append(text)
################################################
# Example with n-grams for n in [1, 2, 3].
################################################
print('\n\n\nExample with n-grams for n in [1, 2, 3].')
vectorizer = TfidfVectorizer(ngram_range=(1, 3))
X = vectorizer.fit_transform(corpus)
ngrams = vectorizer.get_feature_names()
print('# of n-grams:')
print(collections.Counter([len(x.split()) for x in ngrams]))
# Counter({3: 618, 2: 550, 1: 295})
################################################
# Example with custom tokenizer.
################################################
print('\n\n\nExample with custom tokenizer.')
import spacy
nlp = spacy.load('en', disable=['ner', 'parser', 'tagger'])
def spacy_tokenizer(doc):
"""
Warning: The spacy tokenizer might convert words like "don't" into
two words: "do", "n't".
"""
doc = nlp(doc)
# TODO: All filter should be done in one pass for speed.
lst = [x for x in doc if x.is_alpha and not x.is_stop and not x.is_punct]
# import ipdb; ipdb.set_trace()
# Lemmatization.
lst = [x.lemma_ for x in lst]
# Remove anomolous words (includes space or is empty).
lst = [x for x in lst if len(x) > 0 and len(x.split()) == 1]
return lst
vectorizer = TfidfVectorizer(ngram_range=(1, 3), tokenizer=spacy_tokenizer)
X = vectorizer.fit_transform(corpus)
ngrams = vectorizer.get_feature_names()
print('# of n-grams:')
print(collections.Counter([len(x.split()) for x in ngrams]))
# Counter({3: 370, 2: 334, 1: 214})
num_docs = len(corpus)
num_terms = len(ngrams)
assert X.shape == (num_docs, num_terms)
print('num-docs = {}, num-terms = {}'.format(num_docs, num_terms))
# Find terms with highest avergage tfidf.
weighted_X = np.asarray(X.mean(axis=0)).reshape(-1)
assert weighted_X.shape == (num_terms,)
index = np.argsort(weighted_X)[::-1] # Sort descending.
terms_to_show = 20
print('TOP TERMS')
for i in range(terms_to_show):
term_idx = index[i]
term = ngrams[term_idx]
avg_tfidf = weighted_X[term_idx]
print('{:>10}\t{:>40}\t{:>10}'.format(i, term, avg_tfidf))
print('')
print('BOTTOM TERMS')
for i in range(terms_to_show):
i = -(i+1)
term_idx = index[i]
term = ngrams[term_idx]
avg_tfidf = weighted_X[term_idx]
print('{:>10}\t{:>40}\t{:>10}'.format(i, term, avg_tfidf))
print('')
np.random.seed(121)
print('RANDOM TERMS')
for i in sorted(np.random.choice(np.arange(num_terms), size=terms_to_show, replace=False)):
term_idx = index[i]
term = ngrams[term_idx]
avg_tfidf = weighted_X[term_idx]
print('{:>10}\t{:>40}\t{:>10}'.format(i, term, avg_tfidf))
print('')
print('min-avg-tfidf = {}, max-avg-tfidf = {}'.format(weighted_X.min(), weighted_X.max()))
| true |
e756034e9b18d2fed3c1ef3da52a222c0d497f72 | Python | Sreelakshmi393/learn.py | /weight_converter.py | UTF-8 | 286 | 4.375 | 4 | [] | no_license | weight = int(input("Enter your weight : "))
unit = input("Unit in which you entered the weight [(L)bs or (K)g ]: ")
if unit.upper() == "L":
converted = weight*0.45
print(f"You are {converted} kilograms")
else:
converted = weight/0.45
print(f"You are {converted} pounds") | true |
3116808fe6a4b2a445f6a9272ab7c7de20f5a34a | Python | grimario-andre/Python | /exercicios/desafio3.py | UTF-8 | 132 | 3.828125 | 4 | [
"MIT"
] | permissive | num1 = int(input('Primeiro número'))
num2 = int(input('segundo número'))
print('A soma dos números é, {}.'.format(num1+num2))
| true |
5c0e91888f0d3ec8266f1866a2a299b19a37a739 | Python | qvpiotr/ASD | /Dynamic and greedy/6_1_oil_station.py | UTF-8 | 1,753 | 3.71875 | 4 | [] | no_license | # Zadanie 1. (problem stacji benzynowych) Pewien podróznik chce przebyc trase z punktu A do punktu
# B. Niestety jego samochód spala dokładnie jeden litr paliwa na jeden kilometr trasy (mozna powiedziec, ze
# jedzie czołgiem... znaczenie punktów A i B w ramach obecnej sytuacji geopolitycznej wybierzcie sobie sami).
# W baku miesci sie dokładnie D litrów paliwa. Trasa z A do B to prosta, na której znajduja sie stacje
# benzynowe. Mamy dwa rózne zadania (rozwiazywane osobno):
# (1) wyznaczyc trase, na której tankujemy minimalna liczbe razy.
# (2) wyznaczyc trase, której koszt jest minimalny (wówczas znamy jeszcze dla kazdej stacji cene za litr
# paliwa, nie musimy zawsze tankowac do pełna).
# (3) Bonus: j.w., ale jesli na stacji tankujemy, to musimy zatankowac do pełna.
def station1(A,D):
result = 0
# A[0] = A = 0 A[len(A)-1] = B = x kilometrów
n = len(A)-1
pos = A[1]
i = 1
L = D - pos
while pos != A[n] and i < n+1:
if L >= A[i+1] - pos:
L = L - A[i+1] + pos
pos = A[i+1]
i += 1
else:
result += 1
L = D
continue
return result
A = [0,30,40,45,80,100,120,150,185]
print(station1(A, 40))
def station2(A,D):
cost = 0
n = len(A)
pos = A[0][0]
L = D
# trasa krótsza niż zasięg
if A[n-1][0] <= D: return 0
# szukam pierwszej najtanszej stacji w zasiegu D
i = 1
spos = 0
sprice = float ('inf')
while A[i][0] <= D and i < n-1:
if A[i][1]<sprice:
sprice = A[i][1]
spos = i
i += 1
L -= A[spos][0]
j = spos + 1
while j <= n-1 and A[j][0] <= D:
B = [[0,0],[10,3],[40,4],[54,3]]
print(station2(B,50))
| true |
fe319c7d5992dc87c22f27d67648de0f8bed7b6c | Python | sudhamshrama/Adventure-game | /Desktop/adv_game.py | UTF-8 | 2,579 | 4.375 | 4 | [] | no_license | import time
import random
def print_pause(message, wait_time):
print(message)
time.sleep(wait_time)
def start():
print_pause("Help John to reach his home, which is 3 streets away.", 1)
print_pause("Its late night,John should reach his home asap!", 1)
print_pause("John is walking on the road alone.", 1)
print_pause("walking alone, John has two options either left or right", 1)
home()
def home():
print_pause("1.Left \t 2.Right .", 2)
while True:
option = input("Enter a number.")
if option == '1':
left()
elif option == '2':
right()
else:
print("Please enter a valid input.")
home()
def left():
print_pause("After a while. John saw stray dogs staring at him.", 1)
print_pause("Little john is scared of them.", 1)
print_pause("He decided to either walk along the road or runaway.", 1)
print_pause("1.Walk along the road \t 2.Runaway .", 1)
while True:
decision = input("Enter a number.")
if decision == '1':
walkalong()
elif decision == '2':
runaway()
else:
print("Please enter a valid input.")
def walkalong():
print_pause("As John moved forward. Dogs started to bark at him. ", 1)
print_pause("Little John is very scared and started to run.", 1)
print_pause("Dogs chased him for few streets and finally bite John.", 1)
print_pause("John fell unconsious.", 1)
print_pause("GAME OVER! Retry.", 1)
restart()
def runaway():
print_pause("John ran few streets away from dogs.", 1)
print_pause("he was deeply tensed about his location now.", 1)
print_pause("He finds his neighbours searching for him.", 1)
print_pause("They brought John home safe.", 1)
print_pause("Hurrayy! you won the game. Better decision.", 1)
restart()
def restart():
while True:
a = input("Do you want to play again (y/n) ? ")
if a == 'y':
start()
elif a == 'n':
print("Bye! Thank you for playing")
else:
print("Bye! Thank you for playing")
y = ['Uncle', 'Nephew', 'Neighbour', 'Father']
z = random.choice(y)
def right():
print_pause("It was dark here. There were no street lights.", 2)
print_pause("with lots of fear John walked along the road.", 2)
print_pause("He turned left and walked for a while.", 2)
print_pause(f"He saw his {z} .", 2)
print_pause(f"His {z} safely took him home.", 2)
print_pause("YOU WON!", 1)
restart()
start()
| true |
17338ad80515a4a9d47b8b8dee85223ac624b48a | Python | theoneandnoely/FYP_15144798 | /FYP/PlayerAgent.py | UTF-8 | 13,278 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 13:40:31 2020
@author: Noel
"""
from mesa import Agent
import numpy as np
class PlayerAgent(Agent):
def __init__(self, unique_id, model, goalkeeper = False, possession = False):
super().__init__(unique_id, model)
if (unique_id % 2 == 0):
self.teamID = 1
else:
self.teamID = 2
self.maxDisp = 0
self.stepsX = []
self.stepsY = []
self.dispPerStep = []
self.avgDisp = 0
self.goalkeeper = goalkeeper
self.possession = possession
self.state = ""
self.shotThreshold = 0.1 #The barrier to taking a shot. Potential to analyse effect of changing threshold
self.passThreshold = 0.01
self.passTarget = -1
self.tackleTarget = -1
self.stepChoice = ""
def checkPossession(self):
if self.model.newPossession == self.unique_id:
self.possession = True
self.model.newPossession = -1
if (self.possession == True and self.model.newPossession != -1):
self.possession = False
def checkState(self):
if self.possession == True:
if self.goalkeeper == True:
self.state = "GKP"
else:
self.state = "BP"
else:
if self.goalkeeper == False:
changedState = False
for content, x, y in self.model.grid.coord_iter():
if len(content)!=0:
for i in content:
if (i.possession == True and i.teamID == self.teamID):
self.state = "PO"
changedState = True
if changedState == False:
self.state = "DF"
else:
self.state = "GK"
def choice(self):
choice = ""
targetIDs = {}
if self.possession == True:
xG = self.shotProb()
if xG > self.shotThreshold:
choice = "Shoot"
else:
for content, x, y in self.model.grid.coord_iter():
if len(content) == 0:
pass
else:
for i in content:
if i.teamID == self.teamID:
xGTarget = i.shotProb()
if xGTarget > xG:
targetIDs[i.unique_id] = xGTarget
baseVP = 0
target = -1
if len(targetIDs) != 0:
for key in targetIDs.keys():
target = int(key)
xP = self.passProb(target)
vP = xP*targetIDs[target]
if vP > baseVP:
baseVP = vP
if baseVP > self.passThreshold:
choice = "Pass"
self.passTarget = target
else:
choice = "Move"
else:
neighbours = self.model.grid.get_neighborhood(self.pos, moore = True, include_center=True)
for i in range(len(neighbours)):
content = self.model.grid.get_cell_list_contents(neighbours[i])
if len(content) != 0:
for i in content:
if (i.teamID != self.teamID and i.possession == True):
self.tackleTarget = i.unique_id
choice = "Tackle"
else:
choice = "Move"
return choice
def move(self):
possibleSteps = self.model.grid.get_neighborhood(
self.pos,
moore = False,
include_center = False
)
movePotentials = []
for x,y in possibleSteps:
if self.teamID == 1:
if self.state == "GK":
movePotentials.append(self.model.movePotentialGK1[x][y])
elif self.state == "GKP":
movePotentials.append(self.model.movePotentialGKP1[x][y])
elif self.state == "DF":
movePotentials.append(self.model.movePotentialDF1[x][y])
elif self.state == "PO":
movePotentials.append(self.model.movePotentialPO1[x][y])
elif self.state == "BP":
movePotentials.append(self.model.movePotentialBP1[x][y])
else:
print("Error in move: Player has no state")
else:
if self.state == "GK":
movePotentials.append(self.model.movePotentialGK2[x][y])
elif self.state == "GKP":
movePotentials.append(self.model.movePotentialGKP2[x][y])
elif self.state == "DF":
movePotentials.append(self.model.movePotentialDF2[x][y])
elif self.state == "PO":
movePotentials.append(self.model.movePotentialPO2[x][y])
elif self.state == "BP":
movePotentials.append(self.model.movePotentialBP2[x][y])
else:
print("Error in move: Player has no state")
minPotentials = []
minPotential = movePotentials[0]
for i in range(len(movePotentials)):
if movePotentials[i] < minPotential:
minPotenital = movePotentials[i]
minPotentials = [i]
elif movePotentials[i] == minPotential:
minPotentials.append(i)
newPosition = possibleSteps[self.model.random.choice(minPotentials)]
(x0,y0) = self.pos
(x,y)=newPosition
xDiff = x - x0
self.stepsX.append(xDiff)
yDiff = y - y0
self.stepsY.append(yDiff)
if newPosition != self.pos:
self.model.grid.move_agent(self, newPosition)
else:
pass
def shoot(self):
xG = self.shotProb()
g = self.model.random.random()
if xG > g:
self.possession = False
if self.teamID == 1:
self.model.justConceded = 2
else:
self.model.justConceded = 1
else:
self.possession = False
for agent, x, y in self.model.grid.coord_iter():
if len(agent) == 0:
pass
else:
for i in agent:
if (i.goalkeeper == True and i.teamID != self.teamID):
self.model.newPossession = i.unique_id
else:
pass
def displacement(self):
disp = 0
sumX = sum(self.stepsX)
sumY = sum(self.stepsY)
disp = ((sumX)**2 + (sumY)**2)**0.5
self.dispPerStep.append(disp)
def averageDisp(self):
if len(self.dispPerStep) > 0:
return sum(self.dispPerStep)/len(self.dispPerStep)
else:
return 0
def maxDisplacement(self):
maxDisp = self.maxDisp
for i in range(len(self.dispPerStep)):
if self.dispPerStep[i] > maxDisp:
maxDisp = self.dispPerStep[i]
return maxDisp
def shotProb(self):
goalStart = (self.model.grid.width/2)-5
goalEnd = (self.model.grid.width/2)+3
(x,y) = self.pos
if self.teamID == 1:
thetaG2 = np.arctan((goalEnd -x)/(self.model.grid.height-(y)))
thetaG1 = np.arctan((goalStart -x)/(self.model.grid.height-(y)))
else:
thetaG2 = np.arctan((x-goalStart)/(y+1))
thetaG1 = np.arctan((x-goalEnd)/(y+1))
thetaG = thetaG2 - thetaG1
thetaOpen = thetaG
for cellContent, i, j in self.model.grid.coord_iter():
if len(cellContent) == 0:
pass
else:
if self.teamID == 1:
if j > y:
thetaI = np.arctan((i-x)/(j-y))
if (thetaI > thetaG1 and thetaI < thetaG2):
thetaI2 = np.arctan(((i+1)-x)/((j+1)-y))
thetaI1 = np.arctan(((i-1)-x)/((j+1)-y))
thetaIt = thetaI2-thetaI1
else:
thetaIt = 0
else:
thetaIt = 0
else:
if j < y:
thetaI = np.arctan((x-i)/(y-j))
if (thetaI > thetaG1 and thetaI < thetaG2):
thetaI2 = np.arctan((x-(i-1))/(y-(j-1)))
thetaI1 = np.arctan((x-(i+1))/(y-(j-1)))
thetaIt = thetaI2-thetaI1
else:
thetaIt = 0
else:
thetaIt = 0
thetaOpen = thetaOpen - thetaIt
xG = np.sin(thetaOpen/2)-(np.cos(thetaOpen/2)/25)
return xG
def passProb(self, targetID):
(x,y) = self.pos
team = self.teamID
xTarget = 0
yTarget = 0
for agent, i,j in self.model.grid.coord_iter():
if len(agent) == 0:
pass
else:
for k in agent:
if k.unique_id == targetID:
xTarget = i
yTarget = j
r = ((x-i)**2+(y-j)**2)**(0.5)
rNeighbours = []
for agent, i, j in self.model.grid.coord_iter():
if len(agent) == 0:
pass
else:
for k in agent:
if k.teamID != team:
d = ((xTarget-i)**2+(yTarget-j)**2)**(0.5)
if len(rNeighbours) < 3:
rNeighbours.append(d)
else:
for l in range(len(rNeighbours)):
if d < rNeighbours[l]:
rNeighbours[l] = d
avgR = sum(rNeighbours)/len(rNeighbours)
prob = (avgR/10)*(1-(r/250))
return prob
def passBall(self, target):
xP = self.passProb(target)
p = self.model.random.random()
if xP > p:
self.possession = False
self.model.newPossession = target
else:
self.possession = False
dMin = 10000000
for content, x, y in self.model.grid.coord_iter():
if len(content) != 0:
for k in content:
if k.unique_id == target:
i = x
j = y
for content, x, y in self.model.grid.coord_iter():
if len(content) != 0:
for k in content:
if k.teamID != self.teamID:
d = ((x-i)**2+(y-j)**2)**(0.5)
if d < dMin:
dMin = d
self.model.newPossession = k.unique_id
def tackle(self, target):
'''
getNeighborhood
if ballAgent is in nextCell:
take possession of ballAgent
'''
v = self.model.random.random()
if v > 0.5:
self.model.newPossession = self.unique_id
for content, x, y in self.model.grid.coord_iter():
if len(content) != 0:
for k in content:
if k.unique_id == target:
k.possession = False
def bugTest(self):
'''
idS = str(self.unique_id)
print("Unique ID: " + idS)
team = str(self.teamID)
print("Team: " + team)
(x,y) = self.pos
xStr = str(x)
yStr = str(y)
print("X: " + xStr)
print("Y: " + yStr)
if self.goalkeeper is True:
print("(GK)")
'''
if self.possession == True:
idS = str(self.unique_id)
print("Unique ID: " + idS)
xG = str(self.shotProb())
print("xG: " + xG)
def step(self):
self.checkPossession()
self.checkState()
self.stepChoice = self.choice()
self.displacement()
self.avgDisp = self.averageDisp()
self.maxDisp = self.maxDisplacement()
self.bugTest()
def advance(self):
if self.stepChoice == "Shoot":
self.shoot()
elif self.stepChoice == "Pass":
self.passBall(self.passTarget)
elif self.stepChoice == "Tackle":
self.tackle(self.tackleTarget)
else:
self.move() | true |
ab27b6ad8591f4fada51089c11783205c2c60579 | Python | abjose/surfsim3 | /old_tests.py | UTF-8 | 16,711 | 2.875 | 3 | [] | no_license |
from rule import Constraint as C, ExecStep as E
import random
#from node import Node
from context import Context
import matplotlib.pyplot as plt
import numpy as np
""" NOTES
TODO: put useful Es and Cs into a file somewhere
maybe make them into functions so you can modify their insides :O
NOTE: Problem that with *args can only do ALL conjunctions or ALL disjunctions?
NOTE: Note that for most things that need to be accessed from parents, easy
to just do self.(whatever)!! so $parent.rule = $rule if don't overwrite
TODO: Come up with 'rules' on how to use things. For example, seeming like
you must re-initialize everything before expecting it to work correctly,
and you (maybe) can only do reference by name before initializing, and
(maybe) must force things to only be 'dependent' on things higher in
their own hierarchy (so don't initialize based on some random other
thing's position or something), and should initialize variables before
referencing them...and don't connect things before initializing...
TODO: If going to have many things that take a (list of) input vectors but need
to operate on only a single output vector...better way of doing?
TODO: Would be nice to add something that catches exceptions when ExecSteps
or Constraints don't work and tells you what the string is.
TODO: Have warnings when variables are made without being prepended by $ or
other?
TODO: Why is nothing shown for initialization during copies?
TODO: Appending to numpy arrays is bad, do some other way
"""
# create context
s = Context()
# add stimulus sizes to root node...would be nicer if they went in stimulus node
s.add_rule('init',
'$kernel_length = 10',
'$output_length = 50',
'$bcm_radius = 4',
'$stim_size = 20',
'$time_delay = 5')
# NOTE: these are one longer than you think - fix?
# add a container for stimulus and 'focus' on it
s.add_node('$name = "stimulus"')
s.set_focus('$name == "stimulus"')
# add a distribution rule for stimulus points
s.add_rule('init',
'$child_grid = Grid(xl=$stim_size, yl=$stim_size, dx=2, dy=2)',
'print $child_grid.positions')
# also maintain a matrix of stimulus values for stimulus points to access
s.add_rule('init',
#'$stim = SinusoidStim($stim_size, $stim_size)', # why two?
'$stim = JigglySinusoidStim($stim_size, 10)',
#'$stim = InvertingSinusoidStim($stim_size, 5)',
#'$stim = SquareWaveStim($stim_size, 5)',
'$stim.step()',
'$stim_data = $stim.output')
s.add_rule('update',
'$stim.step()',
'$stim_data = $stim.output')
# add a point of stimulus and change focus
s.add_node('$name = "stim_point"')
s.set_focus('$name == "stim_point"')
# make stim_point read from its associated position in parent's stimulus matrix
s.add_rule('init',
'$x, $y = $child_grid.get_next()',
'$init_data($output_length)')
s.add_rule('interact',
'$temp_data = $stim_data[$x][$y]')
s.add_rule('update',
#'print "TEMP_DATA: ", $temp_data',
'$append_data($temp_data)',
#'print $data',
'$clean_data($output_length)')
# make some stim_point copies...should technically make lots more than 10...
#s.set_focus('parent')
# TODO: want to change copy_node so that it takes constraints?
s.copy_node(N=99)
# Add another node to root to act as the Ganglion Cell Module
s.set_focus('parent')
s.set_focus('parent')
s.add_node('$name = "GCM"')
s.set_focus('$name == "GCM"')
# Add a grid-positioning rule for BCMs (grid same size as stimulus)
s.add_rule('init',
'$child_grid = Grid(x0=5, y0=5, dx=5, dy=5, xl=$stim_size, yl=$stim_size)')
# Add a node to act as a Bipolar Cell Module
s.add_node('$name = "BCM"')
s.set_focus('$name == "BCM"')
# Grab position from parent
s.add_rule('init',
'$x, $y = $child_grid.get_next()')
# Add a node to act as a biphasic filter
s.add_node('$name = "biphasic"')
s.set_focus('$name == "biphasic"')
# need to change this so positioned on everything...
# Position randomly in a square centered on parent
s.add_rule('init',
"$x=rand_centered($parent().x, $bcm_radius)",
"$y=rand_centered($parent().y, $bcm_radius)",
'$init_data($output_length)')
# Add a biphasic irf with amplitude proportional to distance from parent
s.add_rule('init',
'$irf = biphasic($kernel_length, ' +
'1./flip_dist(($parent().x, $parent().y), ($x, $y), 3))')
# need to make this negative past a certain threshold...
# use irf to update output vector
s.add_rule('interact',
'$temp_data = $dot_input()')
s.add_rule('update',
#'print $temp_data',
'$append_data($temp_data)',
'$clean_data($output_length)')
# Get connections from nearest input node
# could put something in parent to help?
# for now just connect if close, limit to one connection
s.add_rule('incoming',
"other.name == 'stim_point'",
"dist((other.x, other.y), ($x, $y)) < 10",
"len($get_predecessors()) < 1") # ugly-ish
# want to make connection to BCM's sum node
s.add_rule('outgoing',
'other.name == "sum"',
"$parent() == other.parent()")
# make some more biphasics
s.copy_node(N=5)
# set up sum
s.set_focus('parent')
s.add_node('$name = "sum"')
s.set_focus('$name == "sum"')
s.add_rule('init', '$init_data($output_length)')
# On every step, sum inputs, push sum to end of output vector
s.add_rule('interact',
#'print $get_inputs()',
'$temp_data = sum($get_inputs())')
s.add_rule('update',
'$set_data($temp_data)',
'$clean_data($output_length)')
# want to make connections to thresh
s.add_rule('outgoing',
'other.name == "thresh"',
'$parent() == other.parent()') # want to verify shared parents?
# Don't have to worry about getting connections from biphasics - already handled
# set up thresh
s.set_focus('parent')
s.add_node('$name = "thresh"')
s.set_focus('$name == "thresh"')
s.add_rule('init', '$init_data($output_length)')
# threshold input vector
s.add_rule('interact',
# TODO: This is an ugly way of doing this
'$temp_data = threshold(verify_single($get_inputs())[0], 0.)')
s.add_rule('update',
#'print $temp_data',
'$set_data($temp_data)',
'$clean_data($output_length)')
# add rule to connect to GCM's sum node
s.add_rule('outgoing',
'other.name == "sum"',
'other.parent().name == "GCM"',
'other.parent() == $parent().parent()')
# go back to BCM to make exponential feedback between thresh and sum units
s.set_focus('parent')
s.add_node('$name = "feedback"')
s.set_focus('$name == "feedback"')
s.add_rule('init', '$init_data($output_length)')
# add exponential IRF
s.add_rule('init',
'$irf = exponential($kernel_length)')
# use irf to update output vector
s.add_rule('interact',
'$temp_data = $dot_input()')
s.add_rule('update',
'$append_data($temp_data)',
'$clean_data($output_length)')
# get input from thresh
s.add_rule('incoming',
'other.name == "thresh"',
'$parent() == other.parent()')
# send output to sum
s.add_rule('outgoing',
'other.name == "sum"',
"$parent() == other.parent()")
# make some more BCMs
s.set_focus('parent')
s.copy_node(N=8)
# finish out GCM
s.set_focus('parent')
# add sum to GCM
s.add_node('$name = "sum"')
s.set_focus('$name == "sum"')
s.add_rule('init', '$init_data($output_length)')
# On every step, sum inputs, push sum to end of output vector
s.add_rule('interact',
#'print $get_inputs()',
'$temp_data = sum($get_inputs())')
s.add_rule('update',
'$set_data($temp_data)',
'$clean_data($output_length)')
# want to make connections to thresh
s.add_rule('outgoing',
'other.name == "thresh"',
'$parent() == other.parent()')
# add thresh to GCM
s.set_focus('parent')
s.add_node('$name = "thresh"')
s.set_focus('$name == "thresh"')
s.add_rule('init', '$init_data($output_length)')
# threshold input vector
s.add_rule('interact',
# TODO: This is an ugly way of doing this
'$temp_data = threshold(verify_single($get_inputs())[0], 0.)')
s.add_rule('update',
#'print $temp_data',
'$set_data($temp_data)',
'$clean_data($output_length)')
# add feedback to GCM
s.set_focus('parent')
s.add_node('$name = "feedback"')
s.set_focus('$name == "feedback"')
s.add_rule('init', '$init_data($output_length)')
# add exponential IRF
s.add_rule('init',
'$irf = exponential($kernel_length)')
# use irf to update output vector
s.add_rule('interact',
'$temp_data = $dot_input()')
s.add_rule('update',
'$append_data($temp_data)',
'$clean_data($output_length)')
# get input from thresh
s.add_rule('incoming',
'other.name == "thresh"',
'$parent() == other.parent()')
# send output to sum
s.add_rule('outgoing',
'other.name == "sum"',
"$parent() == other.parent()")
# Re-initialize entire circuit
s.init_simulation()
# make connections between necessary populations
# connect stim_points to biphasics
s.connect(['$name == "stimulus"'],
['$name == "BCM"'])
# connect biphasics to sums
s.connect(['$name == "biphasic"'],
['$name == "sum"'])
# connect thresh to feedback ..better way of doing both this and next one?
s.connect(['$name == "thresh"'],
['$name == "feedback"'])
# connect feedback to sums
s.connect(['$name == "feedback"'],
['$name == "sum"'])
# connect sums to thresh
s.connect(['$name == "sum"'],
['$name == "thresh"'])
# connect BCM thresh to GCM sum
# TODO: this is maybe where relative names would be nice...
s.connect(['$name == "thresh"'],
['$name == "sum"'])
#s.focus.show_cg()
# prepare plotting stuff
s.set_focus('root')
s.set_focus('$name == "stimulus"')
stim = s.focus
s.set_focus('root')
# NOTE: (for convolution) only need extension on one side - because IRF makes
# point at only one side
# TODO: biphasic should 'value' recent time more
bcms = s.focus.filter_nodes(C(['$name == "BCM"']))
biphasics = [list(s.focus.filter_nodes(C(['$name == "biphasic"',
'id($parent()) == ' + str(id(bcm))])))
for bcm in bcms]
# select things for easier plotting
chosen_bcm = random.sample(bcms,1)[0]
chosen_biphasics = list(s.focus.filter_nodes(C(['$name == "biphasic"',
'id($parent()) == ' +
str(id(chosen_bcm))])))
bcm_sum = list(s.focus.filter_nodes(C(['$name == "sum"',
'id($parent()) == ' +
str(id(chosen_bcm))])))[0]
bcm_thresh = list(s.focus.filter_nodes(C(['$name == "thresh"',
'id($parent()) == ' +
str(id(chosen_bcm))])))[0]
gcm_sum = list(s.focus.filter_nodes(C(['$name == "sum"',
'$parent().name == "GCM"'])))[0]
gcm_thresh = list(s.focus.filter_nodes(C(['$name == "thresh"',
'$parent().name == "GCM"'])))[0]
colors = ['green', 'blue', 'yellow', 'red', 'magenta', 'orange', 'beige', 'LimeGreen', 'aqua']
bcm_xs = [b.x for b in bcms]
bcm_ys = [b.y for b in bcms]
bph_xs = [[b.x for b in bph] for bph in biphasics]
bph_ys = [[b.y for b in bph] for bph in biphasics]
chosen_xs = [b.x for b in chosen_biphasics]
chosen_ys = [b.y for b in chosen_biphasics]
# step the network a few times to 'prime' things
prime_steps = 150
for i in range(prime_steps):
print 'priming:', i+1, '/', prime_steps
s.step_simulation()
# initialize mins/maxes
stim_min = np.min(stim.stim_data)
stim_max = np.max(stim.stim_data)
bph_min = min([min(b.get_output()) for b in chosen_biphasics])
bph_max = max([max(b.get_output()) for b in chosen_biphasics])
bcm_sum_min = min(bcm_sum.get_output())
bcm_sum_max = max(bcm_sum.get_output())
bcm_thresh_min = min(bcm_thresh.get_output())
bcm_thresh_max = max(bcm_thresh.get_output())
gcm_sum_min = min(gcm_sum.get_output())
gcm_sum_max = max(gcm_sum.get_output())
gcm_thresh_min = min(gcm_thresh.get_output())
gcm_thresh_max = max(gcm_thresh.get_output())
print stim_min, stim_max
print bph_min, bph_max
# now step some times to get better mins/maxes
range_steps = 150
for i in range(range_steps):
print 'ranging:', i+1, '/', range_steps
s.step_simulation()
stim_min = min(stim_min, np.min(stim.stim_data))
stim_max = max(stim_max, np.max(stim.stim_data))
bph_min = min(bph_min, min([min(b.get_output()) for b in chosen_biphasics]))
bph_max = max(bph_max, max([max(b.get_output()) for b in chosen_biphasics]))
bcm_sum_min = min(bcm_sum_min, min(bcm_sum.get_output()))
bcm_sum_max = max(bcm_sum_max, max(bcm_sum.get_output()))
bcm_thresh_min = min(bcm_thresh_min, min(bcm_thresh.get_output()))
bcm_thresh_max = max(bcm_thresh_max, max(bcm_thresh.get_output()))
gcm_sum_min = min(gcm_sum_min, min(gcm_sum.get_output()))
gcm_sum_max = max(gcm_sum_max, max(gcm_sum.get_output()))
gcm_thresh_min = min(gcm_thresh_min, min(gcm_thresh.get_output()))
gcm_thresh_max = max(gcm_thresh_max, max(gcm_thresh.get_output()))
#print stim_min, stim_max
#print bph_min, bph_max
#print bcm_sum_min, bcm_sum_max
#print bcm_thresh_min, bcm_thresh_max
#print gcm_sum_min, gcm_sum_max
#print gcm_thresh_min, gcm_thresh_max
#raw_input()
"""
plt.ion()
#plt.axis('off')
for i in range(500):
#plt.ion()
print 'plotting:', prime_steps+range_steps+i
s.step_simulation()
plt.cla()
plt.subplot2grid((11,6), (0,1), colspan=4, rowspan=4)
plt.xlim([0,19])
plt.ylim([0,19])
plt.axis('off')
plt.title('Input and node locations')
plt.imshow(stim.stim_data, cmap='Greys', vmin=stim_min, vmax=stim_max)
for i in range(len(bcm_xs)):
plt.plot(bcm_xs[i], bcm_ys[i], marker='x', markersize=20,
color=colors[i], markeredgewidth=2)
for i,(x,y) in enumerate(zip(bph_xs,bph_ys)):
plt.plot(x, y, marker='o', linestyle='none', markersize=8,
color=colors[i])
# highlight chosen biphasics
plt.plot(chosen_bcm.x, chosen_bcm.y, marker='x', markersize=20,
color='pink', markeredgewidth=2)
plt.plot(chosen_xs, chosen_ys, marker='o', markersize=15,
color='pink', linestyle='none')
for i in range(len(chosen_biphasics)):
b = chosen_biphasics[i]
plt.subplot2grid((11,7), (4,i))
plt.ylim([-1,1])
plt.title('biphasic irf')
plt.plot([0]*len(b.irf))
plt.plot(b.irf)
plt.subplot2grid((11,7), (5,i))
plt.axis('off')
plt.title('biphasic input')
plt.imshow(np.resize(b.get_sources()[0].get_output(),
(10, len(b.get_output()))),
cmap='Greys', vmin=stim_min, vmax=stim_max)
plt.subplot2grid((11,7), (6,i))
plt.axis('off')
plt.title('biphasic output')
plt.imshow(np.resize(b.get_output(), (10, len(b.get_output()))),
cmap='Greys', vmin=bph_min, vmax=bph_max)
plt.subplot2grid((11,7), (7, 0))
plt.axis('off')
plt.title('bcm sum')
plt.imshow(np.resize(bcm_sum.get_output(),
(10, len(bcm_sum.get_output()))),
cmap='Greys', vmin=bcm_sum_min, vmax=bcm_sum_max)
plt.subplot2grid((11,7), (8, 0))
plt.axis('off')
plt.title('bcm thresh')
plt.imshow(np.resize(bcm_thresh.get_output(),
(10, len(bcm_thresh.get_output()))),
cmap='Greys', vmin=bcm_thresh_min, vmax=bcm_thresh_max)
plt.subplot2grid((11,7), (9, 0))
plt.axis('off')
plt.title('gcm sum')
plt.imshow(np.resize(gcm_sum.get_output(),
(10, len(gcm_sum.get_output()))),
cmap='Greys', vmin=gcm_sum_min, vmax=gcm_sum_max)
plt.subplot2grid((11,7), (10, 0))
plt.axis('off')
plt.title('gcm thresh')
plt.imshow(np.resize(gcm_thresh.get_output(),
(10, len(gcm_thresh.get_output()))),
cmap='Greys', vmin=gcm_thresh_min, vmax=gcm_thresh_max)
plt.draw()
#plt.ioff()
raw_input()
plt.ioff()
"""
| true |
b093df8b8410d21eaae63bb65a6e81b2afcb7e52 | Python | Williamdayu/PythonCodeLibrary | /sort/HeapSort.py | UTF-8 | 1,224 | 3.6875 | 4 | [] | no_license | def swap(nums, i, j):
nums[i], nums[j] = nums[j], nums[i]
def sift_up(nums, i, comp):
# assert nums[1:i] is a heap
while i != 1:
if comp(nums[i], nums[i/2]):
swap(nums, i, i / 2)
i /= 2
else:
break
def sift_down(nums, i, comp):
# assert heap[1:i] is a heap
j = 1
while j*2 < i:
t = j*2
if t+1 < i and comp(nums[t+1], nums[t]):
t += 1
if comp(nums[t], nums[j]):
swap(nums, j, t)
j = t
else:
break
def heap_sort(nums, reverse=False):
if nums is None or len(nums) < 2:
return nums
# build heap
heap = [" "]
heap.extend(nums)
for i in range(2, len(heap)):
if reverse is True:
sift_up(heap, i, lambda a, b: a < b) # build minimal heap
else:
sift_up(heap, i, lambda a, b: a > b) # build maximal heap
# sort
for i in range(len(heap)-1, 1, -1):
swap(heap, 1, i)
# rebuild heap
if reverse is True:
sift_down(heap, i, lambda a, b: a < b) # select smaller
else:
sift_down(heap, i, lambda a, b: a > b) # select bigger
return heap[1:]
| true |
4ad7a97226d183511726d45a590113096b6e991a | Python | annagriffin/LEGpOe | /limit_finder_edges.py | UTF-8 | 1,524 | 2.671875 | 3 | [] | no_license | import numpy as np
import cv2
from matplotlib import pyplot as plt
def nothing(x):
pass
def main():
cap = cv2.VideoCapture(1)
window_name = 'color range parameter'
cv2.namedWindow(window_name)
cb = cv2.imread('lamb.jpg')
cv2.createTrackbar('min', window_name, 0,500, nothing)
cv2.createTrackbar('max', window_name, 0,500, nothing)
while(True):
ret, frame = cap.read()
blurred_frame = cv2.GaussianBlur(frame.copy(), (7,7), 0)
hsv = cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([110, 50, 50])
upper_blue = np.array([130, 255, 255])
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
mask_blue = cv2.erode(mask_blue, None, iterations=2)
mask_blue = cv2.dilate(mask_blue, None, iterations=2)
res_blue = cv2.bitwise_and(frame,frame, mask=mask_blue)
gray = cv2.cvtColor(res_blue, cv2.COLOR_HSV2BGR)
gray = cv2.cvtColor(res_blue, cv2.COLOR_BGR2GRAY)
min1 = cv2.getTrackbarPos('min', window_name)
max1 = cv2.getTrackbarPos('max', window_name)
edges = cv2.Canny(gray, min1, max1)
edges = cv2.dilate(edges, None, iterations=1)
edges = cv2.erode(edges, None, iterations=1)
cv2.imshow('image',frame)
cv2.imshow('frame',edges)
cv2.imshow(window_name,cb)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__" :
main()
| true |
32e27e70590c60639e5244a12f84bd8d90035fe6 | Python | dietriro/int_agents_project | /scripts/Test.py | UTF-8 | 1,147 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
from numpy import (array, dot, arccos, clip, pi)
from numpy.linalg import norm
from Transformations import quat_to_euler, tf_world_to_robot
from tf.transformations import euler_from_quaternion
from threading import Thread, Lock
from SimulationEnvironment import SimulationEnvironment
import rospy
from time import sleep
# v = array([1, 0])
# u = array([-1.0, -0.1])
# c = dot(u, v)/norm(u)/norm(v) # -> cosine of the angle
# angle = arccos(clip(c, -1, 1)) # if you really want the angle
#
# # print(c)
# print (angle)
# #
# # print(euler_from_quaternion([0.0, 0.0, 0.0, 1.0]))
# #
# # r = array([2, 1, -1.57])
# # p = array([3, 2])
# #
# # print(tf_world_to_robot(r, p))
#
#
# m = Lock()
# m.
sim = SimulationEnvironment()
r = rospy.Rate(1)
while(not rospy.is_shutdown()):
sim.step()
print(sim.get_reward())
state = sim.get_state()
if state is not None:
print(state.shape)
sleep(2)
# world = np.zeros((20, 20))
#
# pose = np.array([0.4, 0.4, 0.0])
#
# map = Map((20, 20), values=world)
#
# map.set_robot_position(pose)
# map.set_value_cart(0.4, 0.4, 0)
#
# misc.imshow(map.values)
#
#
| true |
1ee33629f97b77d69347854d83e7cb3268bfa0e7 | Python | ntomita/superres | /data.py | UTF-8 | 5,995 | 2.59375 | 3 | [] | no_license | import sys
from os.path import join, basename, exists
from os import makedirs, remove
import tarfile
import zipfile
from io import BytesIO
from six.moves.urllib.request import urlopen
from PIL import Image
from utils.utils import is_image, filename_wo_ext
def download_aplus(dest='dataset'):
""" Download BSDS300 and extract images under train/test folders.
Resulting folders are following:
-[dest]-Aplus-images-train
-Set5-test
-Set14-test
"""
def in_set5(file_path):
return file_path.find('Set5') != -1
def in_set14(file_path):
return file_path.find('Set14') != -1
def in_train(file_path):
""" The 91 images
"""
return file_path.find('Training') != -1 and file_path.find('CVPR08-SR') != -1
url = "http://www.vision.ee.ethz.ch/~timofter/software/AplusCodes_SR.zip"
output_dir = join(dest, 'Aplus', 'images')
if not exists(output_dir):
makedirs(output_dir)
tmp_file = join(dest, basename(url))
if not exists(tmp_file):
response = urlopen(url)
buf_size = 16 * 1024
with open(tmp_file, 'wb') as f:
while True:
buf = response.read(buf_size)
if not buf:
break
f.write(buf)
with zipfile.ZipFile(tmp_file) as f:
pass
train_dir = join(output_dir, 'train')
set5_dir = join(output_dir, 'Set5', 'test')
set14_dir = join(output_dir, 'Set14', 'test')
makedirs(train_dir)
makedirs(set5_dir)
makedirs(set14_dir)
for item in f.infolist():
if is_image(item.filename):
if in_train(item.filename):
image = Image.open(BytesIO(f.read(item)))
image.save(join(
train_dir,
filename_wo_ext(item.filename)+'.jpg'))
elif in_set5(item.filename):
image = Image.open(BytesIO(f.read(item)))
image.save(join(
set5_dir,
filename_wo_ext(item.filename)+'.jpg'))
elif in_set14(item.filename):
image = Image.open(BytesIO(f.read(item)))
image.save(join(
set14_dir,
filename_wo_ext(item.filename)+'.jpg'))
remove(tmp_file)
return output_dir
def download_bsds500(dest='dataset'):
"""Download BSDS500 and extract images under train/test folders.
"""
def in_test(file_path):
return file_path.find('test') != -1 and file_path.find('images') != -1
def in_val(file_path):
return file_path.find('val') != -1 and file_path.find('images') != -1
def in_train(file_path):
return file_path.find('train') != -1 and file_path.find('images') != -1
url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
output_dir = join(dest, 'BSDS500', 'images')
if not exists(output_dir):
makedirs(output_dir)
tmp_file = join(dest, basename(url))
if not exists(tmp_file):
response = urlopen(url)
buf_size = 16 * 1024
with open(tmp_file, 'wb') as f:
while True:
buf = response.read(buf_size)
if not buf:
break
f.write(buf)
with tarfile.open(tmp_file) as f:
train_dir = join(output_dir, 'train')
val_dir = join(output_dir, 'val')
test_dir = join(output_dir, 'test')
makedirs(train_dir)
makedirs(val_dir)
for item in f.getmembers():
if is_image(item.name):
if in_train(item.name):
item.name = basename(item.name)
f.extract(item, train_dir)
elif in_val(item.name):
item.name = basename(item.name)
f.extract(item, val_dir)
elif in_test(item.name):
item.name = basename(item.name)
f.extract(item, test_dir)
remove(tmp_file)
return output_dir
def download_bsds300(dest='dataset'):
"""Download BSDS300 and extract images under train/test folders.
"""
def in_val(file_path):
return file_path.find('test') != -1
def in_train(file_path):
return file_path.find('train') != -1
url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz"
output_dir = join(dest, 'BSDS300', 'images')
if not exists(output_dir):
makedirs(output_dir)
tmp_file = join(dest, basename(url))
if not exists(tmp_file):
response = urlopen(url)
buf_size = 16 * 1024
with open(tmp_file, 'wb') as f:
while True:
buf = response.read(buf_size)
if not buf:
break
f.write(buf)
with tarfile.open(tmp_file) as f:
train_dir = join(output_dir, 'train')
val_dir = join(output_dir, 'val')
makedirs(train_dir)
makedirs(val_dir)
for item in f.getmembers():
if is_image(item.name):
if in_train(item.name):
item.name = basename(item.name)
f.extract(item, train_dir)
if in_val(item.name):
item.name = basename(item.name)
f.extract(item, val_dir)
remove(tmp_file)
return output_dir
if __name__ == '__main__':
#download_bsds500()
#download_aplus()
pass
| true |
775462f13674a4c7ddb388672e9ba3e9f4edd6e8 | Python | joeycarr/misc | /pfft | UTF-8 | 2,583 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import skimage
from scipy.fftpack import fft2, fftn, fftshift
from skimage.io import imread, imsave
from skimage import exposure
def parse_args():
ap = argparse.ArgumentParser(
description='''Runs a 2D Fast Fourier Transform (FFT) on the given input image. Depends on SciPy.''')
ap.add_argument('infile',
metavar='infile',
type=argparse.FileType('r'),
help='The image file to ingest.')
ap.add_argument('outfile',
metavar='outfile',
type=argparse.FileType('w'),
help='A writeable filename that can accept 16 bit output.')
ap.add_argument('--func',
metavar='func',
type=str,
default='fft2',
choices=['fft2', 'fftn'],
help='Which FFT function to call; can currently choose fft2 or fftn.')
return ap.parse_args()
def log_adjust(nparray, adjustment):
'''A useful adjustment that kicks up the dark end without disturbing the white point http://www.imagemagick.org/Usage/transform/#evaluate_log.'''
return np.log1p(adjustment*nparray)/np.log1p(adjustment)
# Perform a per-channel fft2
def fft2RGB(RGB):
out = np.empty_like(RGB)
out[:,:,0] = np.absolute(fft2(RGB[:,:,0]))
out[:,:,1] = np.absolute(fft2(RGB[:,:,1]))
out[:,:,2] = np.absolute(fft2(RGB[:,:,2]))
return out
def pfft(RGB, func='fft2'):
if func=='fft2':
fft = fft2RGB
elif func=='fftn':
fft = fftn
RGB = skimage.img_as_float(RGB)
# Get the magnitude (abs value) of the complex output. The
# fftshift function puts the frequencies in the order that I like.
out = np.absolute(fftshift(fft(RGB), axes=(0,1)))
# Do a linear scaling to map out.max() to 1
disp = exposure.rescale_intensity(out, out_range=(0.0, 1.0))
# Apply a log transformation as per the ImageMagick documentation.
# The hueristic for the ideal adjustment is given here:
# http://www.imagemagick.org/Usage/fourier/
adjustment = np.exp(np.log(disp.mean())/np.log(0.5))
disp = log_adjust(disp, adjustment)
return disp
def main():
args = parse_args()
result = pfft(imread(args.infile.name), args.func)
#TODO: maybe make the bit-depth etc. into an option
imsave(args.outfile.name, skimage.img_as_uint(result))
if __name__ == '__main__':
main()
| true |
3c217567204b855679443e8739da55cea744142e | Python | bitsbuffer/Clustering | /process_santander_data.py | UTF-8 | 2,170 | 2.875 | 3 | [] | no_license | import argparse
import os
import pandas as pd
from sklearn.impute import SimpleImputer
from feature_engine.encoding import RareLabelEncoder, CountFrequencyEncoder
from feature_engine.imputation import CategoricalImputer
from sklearn.preprocessing import MinMaxScaler
from feature_engine.selection import (
DropFeatures,
DropConstantFeatures,
DropDuplicateFeatures,
DropCorrelatedFeatures
)
from feature_engine.outliers import OutlierTrimmer, Winsorizer
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import SMOTE
import seaborn as sns
import matplotlib.pyplot as plt
def process_data(X):
#clean data
y = X.pop("TARGET")
X['var3'].replace("-999999", -1, inplace=True)
#remove constant feature
trimmer = Winsorizer(capping_method='quantiles', tail='both', fold=0.005)
X = trimmer.fit_transform(X)
undersampler = RandomUnderSampler(sampling_strategy=0.7, random_state=1234)
X, Y = undersampler.fit_resample(X, y)
drop_features = DropFeatures(features_to_drop=['ID'])
X = drop_features.fit_transform(X)
quasi_constant = DropConstantFeatures(tol=0.998)
X = quasi_constant.fit_transform(X)
print(f"Quasi Features to drop {quasi_constant.features_to_drop_}")
# Remove duplicated features¶
duplicates = DropDuplicateFeatures()
X = duplicates.fit_transform(X)
print(f"Duplicate feature sets {duplicates.duplicated_feature_sets_}")
print(f"Dropping duplicate features {duplicates.features_to_drop_}")
drop_corr = DropCorrelatedFeatures(method="pearson", threshold=0.9, missing_values="ignore")
X = drop_corr.fit_transform(X)
print(f"Drop correlated feature sets {drop_corr.correlated_feature_sets_}")
print(f"Dropping correlared features {drop_corr.features_to_drop_}")
X['target'] = Y
return X
if __name__ == '__main__':
df = pd.read_csv("./dataset/santander/train.csv.zip", compression="zip")
print(f"Train shape {df.shape}")
df_filtered = process_data(df)
print(f"Processed Train shape {df_filtered.shape}")
df_filtered.to_csv("./dataset/santander/data_cleaned.csv.zip", index=False, compression="zip")
| true |
6386a60636c8cc1c27992eb6df28b795edd2083c | Python | sekei3/MatchingSubtitle | /MatchingSubTitle.py | UTF-8 | 1,593 | 2.921875 | 3 | [] | no_license | import os
from enum import Enum
import tkinter
from tkinter import filedialog
class VideoFileEnds(Enum):
AVI = '.avi'
MKV = '.mvk'
MP4 = '.mp4'
class SubtitleFileEnds(Enum):
SMI = '.smi'
SRT = '.srt'
def isVideoFile(filename):
for vdoEnd in VideoFileEnds:
if( filename.lower().endswith(vdoEnd.value) ):
return True
return False
def isSubTitleFile(filename):
for subtitle in SubtitleFileEnds:
if( filename.lower().endswith(subtitle.value)):
return True
return False
root = tkinter.Tk()
root.withdraw()
path_dir = filedialog.askdirectory(parent=root,initialdir="/",title='Please select a directory')
if not path_dir:
print('failed getting path')
exit()
fileList = os.listdir( path_dir )
videoFileList = [ file for file in fileList if isVideoFile(file) ]
subFileList = [ file for file in fileList if isSubTitleFile(file) ]
if not videoFileList or not subFileList:
print('empty file list')
exit()
if len(videoFileList) != len(subFileList):
print('check not match count videofile and subtitle')
exit()
videoFileList.sort()
subFileList.sort()
#print ("videoFileList: {}".format(videoFileList))
#print ("subFileList: {}".format(subFileList))
merge_list = tuple(zip(videoFileList, subFileList))
for vdo, sub in merge_list:
srcExt = os.path.splitext(sub)[1]
src = os.path.join( path_dir, sub )
dstfileName = os.path.splitext(vdo)[0]
dst = os.path.join( path_dir, dstfileName + srcExt )
os.rename(src, dst )
#print ("videoFileList: {}".format(videoFileList)) | true |
da90410829f66f6168589165737cbc1f288b005e | Python | MoyTW/Zappy | /Python_Zappy/entity/tool/ToolHoloprojector.py | UTF-8 | 3,084 | 2.734375 | 3 | [] | no_license | __author__ = 'Travis Moy'
import Tool
import entity.actor.Actor as Actor
import entity.actor.effects.EffectDeath as EffectDeath
import warnings
import level.commands.CompoundCmd as cmpd
from level.commands.command_fragments import LevelPlaceAndAssignEntityID
# How does the holoprojector work?
# We will need to change how the targeting works for the Adversaries.
# First off, they can't just look for the player. They have to look for objects not aligned to themselves. That means
# adding in a faction-style system.
# Secondly, they have to be able to evaluate targets over other targets. That is, have a method for choosing targets
# in a deterministic (sp?) manner. That would be something like a "targeting priority" embedded in Actor objects, or
# a "threat" counter that can go up and down based on actions.
# The way the holoprojector would work would be to create a new Actor with a higher priority than Zappy, thereby
# distracting the enemies.
class ToolHoloprojector(Tool.Tool):
def __init__(self, _eid, _level, _holo_name='Hologram', _hp=999, _threat=9, **kwargs):
"""
:type _eid: int
:type _level: level.LevelView.LevelView
:type _holo_name: str
:type _hp: int
:type _threat: int
"""
self._holo_name = _holo_name
self._hp = _hp
self._threat = _threat
kwargs['_list_target_types'] = [self.TYPE_LOCATION]
kwargs['_requires_LOS'] = True
super(ToolHoloprojector, self).__init__(_eid=_eid, _level=_level, **kwargs)
# How do we handle the entity creation?
#
# We've got a few options I can think of:
#
# Provide a function which lets us manipulate Level's entity ID attribute directly (won't do this because if I'm
# going to make Commands to get rid of direct access I'm drat well not putting it back in!)
#
# Provide a Command to create an Actor with the specified parameters (probably using **kwargs because I don't want
# a Command with a zillion different parameters I'll have to update if I ever change Actor's constructor again).
# This would be like, CreateAndPlaceActor.
#
# Provide a Command that takes an Actor, but which reassigns its id. So, you create an Actor in the Holoprojector,
# and pass it to the command, which then goes out to the Level and reassigns its id and then places it. So, like,
# PlaceNewlyCreatedEntity or something.
#
# We'll go with the last one.
def _effects_of_use_on_location(self, _x, _y):
"""
:type _x: int
:type _y: int
:rtype: bool
"""
holo = Actor.Actor(-1, self._level, _entity_name=self._holo_name, _max_hp=self._hp, _faction=self.user.faction,
_base_threat=self._threat)
holo.apply_status_effect(EffectDeath.EffectDeath(5, holo))
cmd_desc = "The holoprojector creates an image at ({0}, {1})!".format(_x, _y)
command = cmpd.CompoundCmd(cmd_desc, LevelPlaceAndAssignEntityID(holo, _x, _y))
self._level.add_command(command) | true |
548b904a5c8ac1f888b9bcb8d908f19a76c99ff4 | Python | saumya470/python_assignments | /.vscode/Polymorphism/Plusoperator.py | UTF-8 | 150 | 4.03125 | 4 | [] | no_license | # + operator is overloaded and polymorphic
x,y = 10,20
print(x+y)
s1='Hello'
s2=' How are you'
print(s1+s2)
l1= [1,2,3,4]
l2 = [4, 5,6,7,8]
print(l1+l2) | true |
3f2765f203341af362fc351af4da4557b5dbb1e6 | Python | engelmi/pyoddgen | /pyoddgen/manager.py | UTF-8 | 1,793 | 2.859375 | 3 | [
"MIT"
] | permissive | from pyoddgen.tools.directory import import_on_runtime
from pyoddgen.config import ProjectConfiguration, GeneratorConfiguration
class GeneratorManager(object):
def __init__(self, project_config):
if not isinstance(project_config, ProjectConfiguration):
raise Exception("Configuration of project must be of type '" + str(ProjectConfiguration) + "'! Got '" + str(type(project_config)) + "' instead.")
is_valid, msg = project_config.check_validity()
if not is_valid:
raise Exception("Project configuration is not valid: " + msg)
self.project_config = project_config
self.project_config.setup_project_directory_structure()
self.generator_config = self.project_config.fields["generator_config"]
if not isinstance(self.generator_config, GeneratorConfiguration):
raise Exception("Configuration of generator must be of type '" + str(ProjectConfiguration) + "'! Got '" + str(type(self.generator_config)) + "' instead.")
self.generator_config.setup_generator_directory(self.project_config.fields["project_dir"])
self.generator = self.setup_project_generator()
def setup_project_generator(self):
if isinstance(self.generator_config, GeneratorConfiguration):
generator_module_import = "pyoddgen.generators." + self.generator_config.fields["generator"][0]
generator_class_import = self.generator_config.fields["generator"][1]
generator_class = import_on_runtime(generator_module_import, generator_class_import)
return generator_class(self.generator_config, self.project_config.fields["project_dir"])
return None
def start_generation(self):
print("starting generation...")
print("end of generation")
| true |
395065bc87b67d5da3a225bfff56127d20a94570 | Python | jong1-alt/Lin | /demo76.py | UTF-8 | 584 | 3.21875 | 3 | [] | no_license | def variable_key_value_function(fix, **kwargs):
print(f'fix part={fix}')
for k, v in kwargs.items():
print(f"parameter name={k}, value={v}")
variable_key_value_function("parameter alone")
variable_key_value_function('POOP',name='python programming' )
variable_key_value_function('POOP',name='python programming',
duration=35,level='beginning')
course = {'code':'POOP',
'name':'Python programming',
'duration':35,
'level':'beginning',
'seasion':'winter'}
variable_key_value_function('POOP',**course) | true |
b2b544d19af6125534200095519ed9f3854cfae7 | Python | liggettla/FERMI | /paperGeneration/baseChangeAnalysis | UTF-8 | 17,568 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
def runArgparse():
print('Reading Argparse...')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--inFiles', '-i', type=str, nargs='*', help='Specifies the input vcf file(s).')
parser.add_argument('--inDir', '-d', type=str, help='Points to the input directory.')
parser.add_argument('--reference', '-r', type=str, help='Point to the reference genome to be used for flanking sequence.')
parser.add_argument('--savedata', '-s', type=str, help='Name of savefile if output of data structure is desired.')
parser.add_argument('--previousdata', '-p', type=str, help='Point to a saved data structure to be loaded.')
args = parser.parse_args()
inFiles = args.inFiles
ref = args.reference
inDir = args.inDir
if args.savedata:
output = args.savedata
else:
output = False
if args.previousdata:
previous = args.previousdata
else:
previous = False
return inFiles, ref, inDir, output, previous
def getData(samples, ref, inDir, output, previous):
probes = defineProbes()
if previous:
from util import loadData
allVariants = loadData(previous)
else:
allVariants = populatePandasDataframe(inDir, samples, probes, ref)
if output and not previous:
from util import saveData
saveData(allVariants, output)
return allVariants
# this plots average VAFs by the six possible base changes
def vafsSixChanges(allVariants, samples):
print('Computing avg VAFs by six variant types...')
changes = ['C>A', 'C>G','C>T','T>A','T>C','T>G']
means = []
std = []
colors = ['cyan','black','red','grey','green','magenta']
xlabel = 'Substitution Type'
ylabel = 'VAF'
title = 'Mean VAF by Substitution Type'
for change in changes:
tempmeans = []
for sample in range(len(samples)+1):
x = allVariants[allVariants['Individual'] == sample]
y = x[x['ConvChange'] == change]['VAF'].mean()
tempmeans.append(y)
# computer stats ignoring nan values
means.append(np.nanmean(tempmeans))
std.append(np.nanstd(tempmeans))
plotData(means, std, colors, changes, xlabel, ylabel, title)
# this plots the overall averaged VAFs between exon and TIII regions
def vafExonIntron(allVariants, samples):
print('Computing overall exon/TIII average VAFs...')
means = []
std = []
colors = ['grey','black']
xlabel = 'Region'
ylabel = 'VAF'
title = 'Mean VAF by Region'
labels = ['Exon', 'Intron']
tempexon = []
tempintron = []
for sample in range(len(samples)+1):
x = allVariants[allVariants['Individual'] == sample]
y = x[x['IntEx'] == 'Exon']['VAF'].mean()
z = x[x['IntEx'] == 'TIII']['VAF'].mean()
tempexon.append(y)
tempintron.append(z)
means.append(np.nanmean(tempexon))
std.append(np.nanstd(tempexon))
means.append(np.nanmean(tempintron))
std.append(np.nanstd(tempintron))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# this plots the mean VAFs by 6 base changes by region of the genome
def mutabilityByGenomeRegion(allVariants, samples):
print('Computing mean VAFs by six variant types and region...')
changes = ['C>A', 'C>G','C>T','T>A','T>C','T>G']
means = []
std = []
colors = ['cyan','cyan','black','black','red','red','grey','grey','green','green','magenta','magenta']
xlabel = 'Substitution Type'
ylabel = 'VAF'
title = 'Mean VAF by Substitution Type and Genomic Region'
labels = ['C>A Exon','C>A TIII','C>G Exon','C>G TIII','C>T Exon','C>T TIII','T>A Exon','T>A TIII','T>C Exon','T>C TIII','T>G Exon','T>G TIII']
for change in changes:
tempexon = []
tempintron = []
for sample in range(len(samples)+1):
q = allVariants[allVariants['Individual'] == sample]
x = q[q['Change'] == change]
y = x[x['IntEx'] == 'Exon']['VAF'].mean()
z = x[x['IntEx'] == 'TIII']['VAF'].mean()
tempexon.append(y)
tempintron.append(z)
means.append(np.nanmean(tempexon))
std.append(np.nanstd(tempexon))
means.append(np.nanmean(tempintron))
std.append(np.nanstd(tempintron))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# the purpose of this is to get the mutability by genome region
# but normalize this as a fraction of mutations
def mutabilityByGenomeRegionNormalized(allVariants, samples):
print('Computing normalized mean VAFs by six variant types and region...')
changes = ['C>A', 'C>G','C>T','T>A','T>C','T>G']
means = []
std = []
colors = ['cyan','cyan','black','black','red','red','grey','grey','green','green','magenta','magenta']
xlabel = 'Substitution Type'
ylabel = 'VAF'
title = 'Mean VAF by Substitution Type and Genomic Region'
labels = ['C>A Exon','C>A TIII','C>G Exon','C>G TIII','C>T Exon','C>T TIII','T>A Exon','T>A TIII','T>C Exon','T>C TIII','T>G Exon','T>G TIII']
for change in changes:
tempexon = []
tempintron = []
for sample in range(len(samples)+1):
q = allVariants[allVariants['Individual'] == sample]
x = q[q['Change'] == change]
y = x[x['IntEx'] == 'Exon']['VAF'].mean()
z = x[x['IntEx'] == 'TIII']['VAF'].mean()
normy = y / (y + z)
normz = z / (y + z)
tempexon.append(normy)
tempintron.append(normz)
means.append(np.nanmean(tempexon))
std.append(np.nanstd(tempexon))
means.append(np.nanmean(tempintron))
std.append(np.nanstd(tempintron))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# same analysis as above, but uses AO instead of VAF
def mutabilityByGenomeRegionNormalizedAO(allVariants, samples):
print('Computing normalized mean AO by six variant types and region...')
changes = ['C>A', 'C>G','C>T','T>A','T>C','T>G']
means = []
std = []
colors = ['cyan','cyan','black','black','red','red','grey','grey','green','green','magenta','magenta']
xlabel = 'Substitution Type'
ylabel = 'AO'
title = 'Mean Normalized AO by Substitution Type and Genomic Region'
labels = ['C>A Exon','C>A TIII','C>G Exon','C>G TIII','C>T Exon','C>T TIII','T>A Exon','T>A TIII','T>C Exon','T>C TIII','T>G Exon','T>G TIII']
for change in changes:
tempexon = []
tempintron = []
for sample in range(len(samples)+1):
q = allVariants[allVariants['Individual'] == sample]
x = q[q['Change'] == change]
y = x[x['IntEx'] == 'Exon']['AO'].mean()
z = x[x['IntEx'] == 'TIII']['AO'].mean()
normy = y / (y + z)
normz = z / (y + z)
tempexon.append(normy)
tempintron.append(normz)
means.append(np.nanmean(tempexon))
std.append(np.nanstd(tempexon))
means.append(np.nanmean(tempintron))
std.append(np.nanstd(tempintron))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# this looks at overall VAFs only in the CpG context to see if CpG sites
# are more mutable within exons or introns
def cpgMutability(allVariants, samples):
print('Computing mean VAFs within CpG context...')
means = []
std = []
colors = ['grey','black']
xlabel = 'Position'
ylabel = 'VAF'
title = 'Mean VAF of CpG Sites Within TIII/Exons'
labels = ['Exon','TIII']
tempexon = []
tempintron = []
for sample in range(len(samples)+1):
a = allVariants[allVariants.Downstream.str[0].eq('G')]
b = a[a['WT'] == 'C']
c = b[b['Individual'] == sample]
d = c[c['IntEx'] == 'Exon']['VAF'].mean()
e = c[c['IntEx'] == 'TIII']['VAF'].mean()
tempexon.append(d)
tempintron.append(e)
means.append(np.nanmean(tempexon))
std.append(np.nanstd(tempexon))
means.append(np.nanmean(tempintron))
std.append(np.nanstd(tempintron))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# the purpose of this method is to plot any differences in mutability in CpG
# regions by normalize these to 1 within an individual
def cpgMutabilityNormalized(allVariants, samples):
print('Computing mean normalized VAFs within CpG context...')
means = []
std = []
colors = ['grey','black']
xlabel = 'Position'
ylabel = 'VAF'
title = 'Normalized Mean VAF of CpG Sites Within TIII/Exons'
labels = ['Exon','TIII']
tempexon = []
tempintron = []
for sample in range(len(samples)+1):
a = allVariants[allVariants.Downstream.str[0].eq('G')]
b = a[a['WT'] == 'C']
c = b[b['Individual'] == sample]
d = c[c['IntEx'] == 'Exon']['VAF'].mean()
e = c[c['IntEx'] == 'TIII']['VAF'].mean()
normd = d / (d + e)
norme = e / (d + e)
tempexon.append(normd)
tempintron.append(norme)
means.append(np.nanmean(tempexon))
std.append(np.nanstd(tempexon))
means.append(np.nanmean(tempintron))
std.append(np.nanstd(tempintron))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# same type of analysis as above, but instead of using VAF, this uses AO
def cpgMutabilityNormalizedAO(allVariants, samples):
print('Computing mean normalized VAFs using AO within CpG context...')
means = []
std = []
colors = ['grey','black']
xlabel = 'Position'
ylabel = 'AO'
title = 'Normalized Mean AO of CpG Sites Within TIII/Exons'
labels = ['Exon','TIII']
tempexon = []
tempintron = []
for sample in range(len(samples)+1):
a = allVariants[allVariants.Downstream.str[0].eq('G')]
b = a[a['WT'] == 'C']
c = b[b['Individual'] == sample]
d = c[c['IntEx'] == 'Exon']['AO'].mean()
e = c[c['IntEx'] == 'TIII']['AO'].mean()
normd = d / (d + e)
norme = e / (d + e)
tempexon.append(normd)
tempintron.append(norme)
means.append(np.nanmean(tempexon))
std.append(np.nanstd(tempexon))
means.append(np.nanmean(tempintron))
std.append(np.nanstd(tempintron))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# the purpose of this analysis is to look at C>T changes by their
# trinucleotide sequences
def CtoTbyTrinucleotide(allVariants, samples):
print('Computing C>T changes by trinucleotide...')
means = []
std = []
colors = ['cyan','cyan','cyan','cyan','black','black','black','black','red','red','red','red','grey','grey','grey','grey']
xlabel = 'Context'
ylabel = 'VAF'
title = 'C>T Changes by Context'
labels = ['TCT','CCT','GCT','ACT','TCC','CCC','GCC','ACC','TCG','CCG','GCG','ACG','TCA','CCA','GCA','ACA']
for triplet in labels:
tempmean = []
for sample in range(len(samples)+1):
a = allVariants[allVariants['Change'] == 'C>T']
b = a[a.Upstream.str[-1].eq(triplet[0])]
c = b[b.Downstream.str[0].eq(triplet[-1])]['VAF'].mean()
tempmean.append(c)
means.append(np.nanmean(tempmean))
std.append(np.nanstd(tempmean))
plotData(means, std, colors, labels, xlabel, ylabel, title)
# the purpose here is to look at the 10 flanking bp changes
# in the upper and lower populations within the VAF comparison
# plots to see if this predicts the mutability of a particular
# base change
def contextUpperLower(allVariants, samples, regions='split'):
print('Computing upper and lower population contexts...')
from collections import defaultdict
from util import plotStacked
colors = ['cyan','cyan','cyan','cyan','black','black','black','black','red','red','red','red','grey','grey','grey','grey']
xlabel = 'Context'
ylabel = 'VAF'
title = 'C>T Changes by Context'
labels = ['TCT','CCT','GCT','ACT','TCC','CCC','GCC','ACC','TCG','CCG','GCG','ACG','TCA','CCA','GCA','ACA']
vafcutoff = ['Upper','Lower']
if regions == 'combined':
vafcutoff = ['']
changes = ['C>A', 'C>G','C>T','T>A','T>C','T>G']
letters = ['T','C','G','A']
for region in vafcutoff:
for change in changes:
allmeansup = {'T':[],'C':[],'G':[],'A':[]}
allstdsup = {'T':[],'C':[],'G':[],'A':[]}
allmeansdown = {'T':[],'C':[],'G':[],'A':[]}
allstdsdown = {'T':[],'C':[],'G':[],'A':[]}
for position in range(10):
uppositionmakeup = {'T':[],'C':[],'G':[],'A':[]}
downpositionmakeup = {'T':[],'C':[],'G':[],'A':[]}
for indiv in range(len(samples)):
sample = indiv + 1
fourbasesup = []
fractionup = []
fourbasesdown = []
fractiondown = []
for base in letters:
l = allVariants[allVariants['Change'] == change]
if region == 'Upper' and regions == 'split':
m = l[l['VAF'] > 0.0004]
if region == 'Lower' and regions == 'split':
m = l[l['VAF'] < 0.0004]
if regions == 'combined':
m = l[l['VAF'] > 0]
n = m[m['Individual'] == sample]
o = n[n.Upstream.str[position].eq(base)]['WT'].count()
fourbasesup.append(o)
p = n[n.Downstream.str[position].eq(base)]['WT'].count()
fourbasesdown.append(p)
#print base, o
#print sample, fourbasesup
# convert to fractions
# this can give divide by zero probably need some exception handler
for i in fourbasesup:
fractionup.append(i / float(sum(fourbasesup)))
for i in fourbasesdown:
fractiondown.append(i / float(sum(fourbasesdown)))
#print sample, fractionup
# add to master dictionary
uppositionmakeup['T'].append(fractionup[0])
uppositionmakeup['C'].append(fractionup[1])
uppositionmakeup['G'].append(fractionup[2])
uppositionmakeup['A'].append(fractionup[3])
downpositionmakeup['T'].append(fractiondown[0])
downpositionmakeup['C'].append(fractiondown[1])
downpositionmakeup['G'].append(fractiondown[2])
downpositionmakeup['A'].append(fractiondown[3])
#print position, uppositionmakeup
# average and add to overall dictionary at correct positions
for base in letters:
allmeansup[base].append(np.nanmean(uppositionmakeup[base]))
allstdsup[base].append(np.nanstd(uppositionmakeup[base]))
allmeansdown[base].append(np.nanmean(downpositionmakeup[base]))
allstdsdown[base].append(np.nanstd(downpositionmakeup[base]))
#print allmeansup, allstdsup
for i in allmeansup:
print i, allmeansup[i][9], allstdsup[i][9], allmeansdown[i][0], allstdsdown[i][0]
# combine upstream and downstream
for base in allmeansup:
allmeansup[base] = allmeansup[base] + allmeansdown[base]
allstdsup[base] = allstdsup[base] + allstdsdown[base]
# plot
if regions == 'split':
plotStacked(allmeansup, allstdsup, 'Base Position', 'Nuceotide Fraction', '%s Region %s Changes' % (region, change))
if regions == 'combined':
plotStacked(allmeansup, allstdsup, 'Base Position', 'Nuceotide Fraction', '%s Changes' % (change))
# this will find the average percent of probed region in
# a given individual that is mutated
def percentVariant(allVariants, samples):
totvar = []
percent = []
totalProbed = 4838 # this is experiment specific
for i in range(len(samples)):
indiv = i + 1
a = allVariants[allVariants['Individual'] == indiv]
b = a.drop_duplicates(['Loc'])
count = b['Loc'].count()
totvar.append(count)
percent.append(float(count) / totalProbed)
print np.mean(totvar), np.std(totvar), totvar
print np.mean(percent), np.std(percent), percent
if __name__ == '__main__':
from util import populatePandasDataframe
from util import defineProbes
from util import plotData
import pandas as pd
import numpy as np
samples, ref, inDir, output, previous = runArgparse()
allVariants = getData(samples, ref, inDir, output, previous)
vafsSixChanges(allVariants, samples)
#vafExonIntron(allVariants, samples)
#mutabilityByGenomeRegion(allVariants, samples)
#mutabilityByGenomeRegionNormalized(allVariants, samples)
#mutabilityByGenomeRegionNormalizedAO(allVariants, samples)
#cpgMutability(allVariants, samples)
#cpgMutabilityNormalized(allVariants, samples)
#cpgMutabilityNormalizedAO(allVariants, samples)
#CtoTbyTrinucleotide(allVariants, samples)
#contextUpperLower(allVariants, samples)
#contextUpperLower(allVariants, samples, 'combined')
#percentVariant(allVariants, samples)
| true |
24bfbc6a728e1ba65b95535441cca33fe27c9ab9 | Python | rheidenreich139/Heidenreich_Rebecca | /Heidenreich_Rebecca_Question5.py | UTF-8 | 1,268 | 3.09375 | 3 | [] | no_license | #Question 5. Create a geodatabase. Then, using the following list, generate feature classes for each of the
#elements in the list: featureList = [‘CapitalCities’, ‘Landmarks’, ‘HistoricPlaces’, ‘StateNames’, ‘Nationalities’,‘Rivers’]
import arcpy
out_folder_path = r"C:\gisclass\GIS610_Exercise3"
out_name = "exercise3GDB.gdb"
arcpy.env.overwriteOutput = True
arcpy.CreateFileGDB_management(r'C:\gisclass\GIS610_Exercise3', 'exercise3GDB.gdb')
current_workspace = r'C:\gisclass\GIS610_Exercise3\exercise3GDB.gdb'
geometry_type = 'POINT'
spatial_reference = arcpy.SpatialReference(102100)
featureClassNamesList = ['CapitalCities', 'Landmarks', 'HistoricPlaces', 'StateNames', 'Nationalities','Rivers']
arcpy.env.workspace = current_workspace
def createFeatureClass(in_fc_name):
arcpy.CreateFeatureclass_management(current_workspace, in_fc_name, geometry_type, "", "DISABLED", "DISABLED", spatial_reference)
print('Feature Class ' + in_fc_name + ' was sucessfully created.')
filteredFeatureClassNameList = [fc for fc in featureClassNamesList if fc.startswith("A")]
print(filteredFeatureClassNameList)
createFC = [createFeatureClass(fc) for fc in featureClassNamesList]
print('All Done')
| true |
8b3dcce11560042136265ec8ba6328547352d77e | Python | AleksandrMedvedev9000/pythonForTesters | /tests/test_new_group.py | UTF-8 | 867 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
from model.group import Group
def test_new_group(app):
old_groups = app.group.get_group_list()
group = Group(table_name="Preved!", table_header="Bonjour!", table_footer="Zdarova!")
app.group.create_new(group)
new_groups = app.group.get_group_list()
assert len(old_groups) + 1 == app.group.count()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
#def test_new_empty_group(app):
# old_groups = app.group.get_group_list()
# group = Group(table_name="", table_header="", table_footer="")
# app.group.create_new(group)
# new_groups = app.group.get_group_list()
# assert len(old_groups) + 1 == len(new_groups)
# old_groups.append(group)
# assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
| true |
4a6ac6c5e7c55226e8ff6752c77a0ac5eb42f08f | Python | hyc121110/ucrcs172_ps2 | /main.py | UTF-8 | 609 | 2.96875 | 3 | [] | no_license | # client for user to type query
# requirements: support for complex queries using VSM
import vsm
import create_index
# prompt user for a term
print("Please enter a query: ", end="")
query = input()
new_query = []
# preprocess query
for q in query.split():
q = vsm.word_preprocessing(q)
if q:
new_query.append(q)
# calculate cosine similarity score
v = vsm.generate_tfidf_vector(new_query)
scores = vsm.cos_sim_score(v)
# print scores
print("Using Cosine Siliarity...")
for i in range(len(scores)):
print("The similarity between \"", query, "\" and ", scores[i][1], " is ", scores[i][0], sep='') | true |
37f8e2f61427632b95ca551053ecdb612cc9c9c6 | Python | OnewayYoun/studygroup-for-codingTest | /05주차/2번(박유나).py | UTF-8 | 768 | 3.28125 | 3 | [] | no_license | N,e,w,s,n = map(int, input().split()) #이동거리, 동, 서, 남, 북 분리하기
HR=100
ewsn = [e/HR,w/HR,s/HR,n/HR] #각각의 확률을 100으로 나누기
visited = [[0 for i in range(N*2)] for ii in range(N*2)]
dx, dy = [1,-1,0,0], [0,0,-1,1] #상,하,좌,우
def dfs(count, x, y):
if count==N: #N만큼 이동했다면, 그만
return 1
visited[x][y] = 1 #방문 표시하기
ret = 0
for i in range(4):
X, Y = x+dx[i], y+dy[i] #이동할 상,하,좌,우
if visited[X][Y]:continue #방문하였다면 무시하고 진행
ret += dfs(count+1,X,Y)*ewsn[i] #dfs는 카운트 세고, 각각의 확률 곱해주기
visited[x][y] = 0 #방문 표시 없애기
return ret #최종 확률 출력
print(dfs(0,0,0))
| true |
dde80113ff554941ab5442afb5083108ad2998a9 | Python | mooonpark/code | /python/system-program/thread/06-锁.py | UTF-8 | 473 | 2.859375 | 3 | [] | no_license | import threading
import time
g_num = 0
def work1():
global g_num
mutex.acquire()
for i in range(1000000):
g_num += 1
#mutex.release()
print("work1 g_num:%s" %g_num)
def work2():
global g_num
mutex.acquire()
for i in range(1000000):
g_num += 1
mutex.release()
print("work2 g_num:%s" %g_num)
mutex = threading.Lock()
t1 = threading.Thread(target=work1)
t2 = threading.Thread(target=work2)
t1.start()
#time.sleep(2)
t2.start()
print("g_num:%s" %g_num)
| true |
50954e6c404c6e33a27b99ccc379307fddd423e4 | Python | suizo12/hs17-bkuehnis | /source/game_data/dataexport/dataexport.py | UTF-8 | 3,751 | 2.65625 | 3 | [] | no_license | from sklearn.model_selection import train_test_split
import pandas as pd
from game_data.gamescore import basketballgame
from game_data.dataexport.dataframe_helper import remove_temporary_colums, get_results
import glob
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import numpy as np
from sklearn.model_selection import cross_val_score
from game_data.gamescore.wikihoops import Wikihoops
import pickle
from sklearn.model_selection import ShuffleSplit
'''
Get all data from 2014-2018 season
Transform the data to panda
dump the panda file to '../static/dumps/panda_dataset.p'
'''
keep_columns = ['away_+/-', 'away_TS%', 'away_TOV', 'home_+/-', 'home_TS%', 'home_TOV', 'rating', 'game']
game_data = []
w = Wikihoops()
#for game_file in glob.glob("../static/matches/united_states/nba/2003-2004//*.json"):
# game = basketballgame.BasketballGame(game_file, w.frame, True)
# game_data.append(basketballgame.BasketballGame(game_file).data)
#for game_file in glob.glob("../static/matches/united_states/nba/2014-2015//*.json"):
# game_data.append(basketballgame.BasketballGame(game_file, w.frame, True).data)
#for game_file in glob.glob("../static/matches/united_states/nba/2015-2016//*.json"):
# game_data.append(basketballgame.BasketballGame(game_file, w.frame, True).data)
for game_file in glob.glob("../static/matches/united_states/nba/2016-2017//*.json"):
#game = basketballgame.BasketballGame(game_file, w.frame, True)
game_data.append(basketballgame.BasketballGame(game_file, w.frame, False).data)
for game_file in glob.glob("../static/matches/united_states/nba/2017-2018//*.json"):
# game = basketballgame.BasketballGame(game_file, w.frame, True)
game_data.append(basketballgame.BasketballGame(game_file, w.frame, False).data)
df = pd.DataFrame.from_records(game_data)
#df = df[keep_columns]
#df.plot(kind='box', subplots=True, sharex=False, layout=(2, 3), figsize=(18, 8))
#df.plot(kind='kde', subplots=True, sharex=False, layout=(2, 3), figsize=(18, 8))
# print('Is there any null values:')
# print(df.isnull().any())
print(df.shape)
with open('../static/dumps/panda_dataset.p', 'wb') as pickle_file:
pickle.dump(df, pickle_file)
y = get_results(df)
remove_temporary_colums(df)
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.2, random_state=1)
print(df.shape, X_train.shape, X_test.shape)
lr = linear_model.Ridge(alpha=.5, normalize=True)
print(lr.fit(X_train, y_train)) # lr ist nun unser trainiertes Model (Linear Regression)
rf = RandomForestRegressor(n_estimators=500, n_jobs=-1, random_state=72)
print(rf.fit(X_train, y_train)) # rf ist nun unser trainiertes Model (Random Forests)
pred_lr = lr.predict(X_test) # Linear Regression
pred_rf = rf.predict(X_test) # Random Forests
# Linear Regression
rmse_lr = np.sqrt(mean_squared_error(y_test, pred_lr))
print(rmse_lr)
# Random Forests
rmse_rf = np.sqrt(mean_squared_error(y_test, pred_rf))
print(rmse_rf)
scores = cross_val_score(rf, pd.concat([X_train, X_test]),
pd.concat([y_train, y_test]),
cv=5, scoring='neg_mean_squared_error')
print(np.sqrt(-1*scores))
combined_error = rmse_lr + rmse_rf
weight_lr = 1-rmse_lr/combined_error
weight_rf = 1-rmse_rf/combined_error
print("Lineare Regression:\t {}".format(rmse_lr))
print("Random Forests:\t\t {}".format(rmse_rf))
print("Weighted Avgerage:\t {}".format(np.sqrt(mean_squared_error(y_test, weight_lr*pred_lr + weight_rf*pred_rf))))
predictions = rf.predict(X_test)
print(predictions)
with open('../static/dumps/random_forest.p', 'wb') as pickle_file:
pickle.dump(predictions, pickle_file)
| true |
b0d944e1b0da6fb19dfe9d79b7b548c19292ce90 | Python | CaptainJRoy/Partition-Space-Monitor | /monSpace.py | UTF-8 | 6,579 | 2.71875 | 3 | [] | no_license | import netsnmp, thread, time
import curses
SESSION = 0
hrPartitionLabel = 0
prev_pct = {}
DICT = {}
REFRESH_TIME = 5
EXIT = False
def init_session():
"""
This function initializes the session that will be used to execute snmp
commands and the list from which it start the iteration of every partition,
it has no arguments and no return values
"""
global SESSION, hrPartitionLabel, prev_pct
SESSION = netsnmp.Session(Version = 2)
hrPartitionLabel = get_values('hrPartitionLabel')
for i in hrPartitionLabel:
prev_pct[i] = 0
def get_values(table):
"""
Function that returns the values especified in a table
@arg table - the table from which we want the values
@return - a list containing every value
"""
global SESSION
result = netsnmp.Varbind(table)
resultlist = netsnmp.VarList(result)
return SESSION.walk(resultlist)
def get_next(value):
"""
Function that returns the values especified in a element
@arg value - the value from which we want the result
@return - a list containing this result
"""
global SESSION
result = netsnmp.Varbind(value)
resultlist = netsnmp.VarList(result)
return SESSION.get(resultlist)[0]
def normalize_rt():
global REFRESH_TIME
if REFRESH_TIME < 10:
REFRESH_TIME = 10
elif REFRESH_TIME > 1200:
REFRESH_TIME = 1200
def populate_dictionary():
"""
This function receives no arguments and returns no values, all it does
is modify the current dictionary with the mesurements of every partition
"""
global hrPartitionLabel, DICT, REFRESH_TIME, prev_pct
DICT.clear()
index = 1
for i in hrPartitionLabel:
hrPartitionFSIndex = get_values('hrPartitionFSIndex')
fs_index = int(hrPartitionFSIndex[index-1])
if fs_index > 0:
try:
fss_index = int(get_next('hrFSStorageIndex.' + str(fs_index)))
storageSize = int(get_next('hrStorageSize.' + str(fss_index)))
storageUsed = int(get_next('hrStorageUsed.' + str(fss_index)))
free_pct = ((storageSize - storageUsed) / float(storageSize)) * 100
pct_change = free_pct - prev_pct[i]
prev_pct[i] = free_pct
DICT[i] = {
'hrPartitionFSIndex': int(hrPartitionFSIndex[index-1]),
'hrFSMountPoint' : get_next('hrFSMountPoint.' + str(fs_index)),
'hrFSStorageIndex' : fss_index,
'hrStorageAllUnits' : int(get_next('hrStorageAllocationUnits.' + str(fss_index))),
'hrStorageSize' : storageSize,
'hrStorageUsed' : storageUsed,
'freeSpace' : storageSize - storageUsed,
'freeSpacePct' : free_pct,
'pct_change' : pct_change
}
except:
if i in DICT:
del DICT[i]
prev_pct[i] = 0
pass
index += 1
"""
The following functions make everthing pretier
"""
def center(x, string):
return (x - len(str(string)))/2
def adjust(value):
if value < 1024:
return str(float(value)) + ' Bytes'
elif value / 1024 < 1024:
return str(float(value/1024)) + ' KBytes'
elif value / 1024 / 1024 < 1024:
return str(float(value/1024/1024)) + ' MBytes'
elif value / 1024 / 1024 / 1024 < 1024:
return str(float(value/1024/1024/1024)) + ' GBytes'
else:
return str(float(value/1024/1024/1024/1024)) + ' TBytes'
def adjust_t(value):
if value < 60:
return str(round(value, 3)) + ' seconds'
elif value / 60 < 60:
return str(round(value/60, 3)) + ' minutes'
else:
return str(round(value/60/60, 3)) + ' hours'
def pbar(window):
"""
Creates a fancy window on which we display the values
"""
global EXIT, REFRESH_TIME, DICT, prev_pct
try:
while not EXIT:
unchanged = True
thread.start_new_thread(populate_dictionary, ( ))
y,x = window.getmaxyx()
window.clear()
window.border(0)
window.addstr(1, center(x, "partitions"), "Partitions", curses.A_STANDOUT)
printline = 3
for i in DICT:
window.addstr(printline, center(x,str(i)), str(i), curses.A_BOLD)
printline += 1
s = "Partition Mounting Point: " + str(DICT[i]['hrFSMountPoint'])
window.addstr(printline, center(x, s), s)
printline += 1
s = "Total Space: "
tot = DICT[i]['hrStorageSize'] * DICT[i]['hrStorageAllUnits']
s += adjust(tot)
window.addstr(printline, center(x, s), s)
printline += 1
s = "Used Space: "
used = DICT[i]['hrStorageUsed'] * DICT[i]['hrStorageAllUnits']
s += adjust(used)
window.addstr(printline, center(x, s), s)
printline += 1
s = "Free Space: "
free = DICT[i]['freeSpace'] * DICT[i]['hrStorageAllUnits']
s += adjust(free)
free_pct = DICT[i]['freeSpacePct']
s2 = ' ['+ str(round(free_pct,3)) + '%]'
window.addstr(printline, center(x, s+s2), s)
if free_pct < 15:
window.addstr(printline, center(x, s+s2)+len(s), s2, curses.A_BLINK)
else:
window.addstr(printline, center(x, s+s2)+len(s), s2)
printline += 1
s = "Percentage Change: "
pct_change = DICT[i]['pct_change']
s += ' ['+ str(round(pct_change, 5)) + '%]'
window.addstr(printline, center(x, s), s)
printline += 3
if free_pct < 15:
REFRESH_TIME = 60
if unchanged:
if pct_change < 0:
REFRESH_TIME /= 2
else:
REFRESH_TIME *= 2
unchanged = False
normalize_rt()
s = " Refreshing every " + adjust_t(REFRESH_TIME) + " "
window.addstr(y-1, center(x, s), s)
window.refresh()
time.sleep(REFRESH_TIME)
except KeyboardInterrupt:
EXIT = True
except:
print "Error occurred!"
init_session()
curses.wrapper(pbar)
| true |
7a155be63fe79240baa7e15ca768c6ba1afa9aad | Python | Luiz6ustav0/verlab | /learningResources/computerVision/OpenCvPlaylist/canny_edge_detection.py | UTF-8 | 578 | 3.078125 | 3 | [] | no_license | """
This algorithm can be broken down, basically, in 5 steps:
1. Noise reduction
2. Gradient calculation
3. Non-maximum suppression
4. Double threshold
5. Edge Tracking by Hysteresis
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread("jp.png", 0)
canny = cv2.Canny(img, 100, 200, ) # Maybe add a trackbar?
titles = ['image', 'canny']
images = [img, canny]
for i in range(len(titles)):
plt.subplot(1, 2, i+1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
| true |
65c38bf6e2819576949c5b79c2ac0ef06546881e | Python | ynonp/python-for-kids | /pong/pong.py | UTF-8 | 626 | 3.296875 | 3 | [] | no_license | from p5 import *
left_bar_y = 0
ball_x = 100
ball_y = 100
ball_dx = 4
ball_dy = 2
def setup():
size(640, 480)
def draw():
global ball_x, ball_y, ball_dx, ball_dy, left_bar_y
background(150, 150, 150)
if ball_x > 640:
ball_dx = -4
if ball_x < 0:
ball_dx = 4
if ball_y > 480:
ball_dy = -2
if ball_y < 0:
ball_dy = 2
rect(5, left_bar_y, 10, 80)
circle(ball_x, ball_y, 20)
ball_x += ball_dx
ball_y += ball_dy
if key_is_pressed and key == 'DOWN':
left_bar_y += 3
elif key_is_pressed and key == 'UP':
left_bar_y -= 3
run()
| true |
090962fc0275d14c9ee9889eb0bd5f445497c8cb | Python | Gangadharbhuvan/HackerRank-Python-Solutions | /12-Alphabet_Rangoli.py | UTF-8 | 652 | 3 | 3 | [] | no_license | def print_rangoli(size):
# your code goes here
l = "".join(list(map(chr, range(97, 123))) )
k=size-1
for i in range(2*size-1):
if(i<size):
s="-".join(l[k+i:k:-1]+l[k:k+i+1])
print(s.center(4*size-3,'-'))
k=k-1
if(i==size):
j=(2*size-2)%i
k=k+2
s="-".join(l[k+j:k:-1]+l[k:k+j+1])
print(s.center(4*size-3,'-'))
if(i>size):
j=(2*size-2)%i
k=k+1
s="-".join(l[k+j:k:-1]+l[k:k+j+1])
print(s.center(4*size-3,'-'))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
| true |
f9ab5ff15b5a866665d3e8ac069c6384321535f3 | Python | iliasmezzine/QFML | /LSTM | UTF-8 | 6,148 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#Rolling PCA + All preprocessing functions.
import os
import pandas as pd
import numpy as np
from numpy import concatenate
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
import pywt as wt
import matplotlib.pyplot as plt
os.chdir("C://Users//ilias//OneDrive//Bureau//ML")
df = pd.read_excel("sxxp_returns.xlsx")
idx = df.index
def denoise_wt(data,c): #Signal denoising with wavelet transform, c controls the denoising intensity.
(u, d) = wt.dwt(data, "haar")
up_thresh = wt.threshold(u, np.std(u)/c, mode="soft")
down_thresh = wt.threshold(d, np.std(d)/c, mode="soft")
return wt.idwt(up_thresh, down_thresh, "haar")
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): #convert pb to slp
n_vars = 1 if type(data) is list else data.shape[1]
df = data
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
agg = pd.concat(cols, axis=1)
agg.columns = names
if dropnan:
agg.dropna(inplace=True)
return agg
def compute_lstm_results(c):
df = pd.DataFrame(denoise_wt(df,c)).set_index(idx)
df_comp = pd.read_excel("sxxp_comp_returns.xlsx")
pca = PCA(n_components = 13)
c_change = [np.sum((df_comp.iloc[i+1]-df_comp.iloc[i])**2) != 0 for i in range(len(df_comp) -1)]
c_change.insert(0,0)
df_comp['indic_change'] = np.cumsum(c_change) #indicator function for a composition change
grp_comp = df_comp.groupby(df_comp['indic_change'],axis=0).groups #time dfs with stable composition
time_index = [pd.to_datetime(grp_comp[i]) for i in range(len(grp_comp))] #list of time indices for each subset
pca_ready_dfs = [] #list holding the dfs for rolling PCA
for u in range(len(time_index)):
curr_df = df.loc[time_index[u]] #current dataframe
curr_comp = df_comp.loc[time_index[0]].iloc[0] #current index comp.
for name in curr_df.columns:
if curr_comp[name] == 0: #dropping the stocks that are not in the index for that time period
curr_df.drop(name, axis=1, inplace=True)
pca_ready_dfs.append(curr_df) #array storing the ready-to-use PCA inputs (last if we have to transform them)
all_inputs = [] #list holding the inputs
for x in pca_ready_dfs: #run the PCA for each df, store all the components (in value)
pca.fit(x)
#Here the .dot is used to retrieve the component in value (<returns_(t), pc_1>, ... <returns_(t), pc_max> )
curr_input = [[np.dot(x.iloc[i],pca.components_[j]) for i in range(len(x))] for j in range(len(pca.components_))]
all_inputs = all_inputs + [curr_input]
final_inputs = [] #list merging the inputs
for j in range(13):
cp_0 = []
for i in range(len(all_inputs)):
cp_0 += all_inputs[i][j]
final_inputs +=[cp_0]
final_inputs = pd.DataFrame(final_inputs).transpose()
sxxp = pd.read_excel("sxxp_returns_solo.xlsx")
final_inputs.set_index(sxxp.index,drop=True,inplace=True)
final_inputs.rename(index=str, columns={i:"pc_{}".format(i) for i in range(13)},inplace=True)
train_set = pd.concat([sxxp,final_inputs], axis=1, join='outer')
#Preprocessing the data
#Reframing the problem into a SLP.
#Splitting the ds into test, train
#Reshaping everything into LSTM dimension
#Rescaling the data using MinMaxScaler
values = train_set.values
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = pd.DataFrame(scaler.fit_transform(values))
reframed = series_to_supervised(scaled, 1, 1)
#Dropping unnecessary PCs[t]
ldrop = [i for i in range(15,28)]
reframed.drop(reframed.columns[ldrop], axis=1, inplace=True)
# Splitting the dataset into training and test sets. As a rule of thumb, taking 80% as training set and 20% as test set.
n_samples = int(len(reframed)*0.8)
values = reframed.values
train = values[:n_samples, :]
test = values[n_samples:, :]
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# Reshaping the dataset into LSTM-friendly dimension.
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
# Model Specification (LSTM)
# Model Training
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=0, shuffle=False)
#plt.plot(history.history['loss'], label='in-sample MSE')
#plt.plot(history.history['val_loss'], label='out-of-sample MSE')
#plt.legend()
#plt.show()
# Model Predictions
# Predict the test set
yhat = model.predict(test_X)
#Reshape test set in initial shape
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
#Concatenating the prediction with the test X, rescaling all back then keeping only the first column
#Rescaling the y_predicted
inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
#Rescaling the y_actual.
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
#Compute Model RMSE, plot actual vs predicted
plt.plot(inv_y, dashes=[1,1])
plt.plot(inv_yhat)
rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
| true |
52d16c09e4dd5b154992d226c00577dd7e03c64d | Python | yezhizhen/Python | /Jump2_gr.py | UTF-8 | 581 | 3.34375 | 3 | [] | no_license | class Solution(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# you want to jump now
step,near,far = 0,0,1
# far represents [far-1] elements you can reach
while far < len(nums):
#next_far = max(i+j for i,j in zip(range(near,far), nums[near:far])) + 1
# find next_far faster
next_far = 0
for i in range(near,far):
next_far = max(next_far, i+nums[i]+1)
near,far,step = far, next_far, step+1
return step | true |
3ab9fd25c2269b68e0d9f8e56112189e911ff697 | Python | tregtatyana/Homework-2 | /HW2.py | UTF-8 | 3,102 | 2.765625 | 3 | [] | no_license | import xml.dom.minidom
import sys
import csv
import time
from matrixops import algorithm_Fl_Warh
def resist(inp, outp):
dom = xml.dom.minidom.parse(inp)
dom.normalize()
# перейдем к schematics
node = dom.childNodes[0]
fl = 1
u = v = t = t_inv = n = -7
d = []
k_nonet = 0
for k in node.childNodes:
st = k.attributes or {}
name = k.nodeName
if name == "net":
for (x, y) in st.items():
if x == "id":
n = int(y)
# последний элемент с "net"
k_nonet = k
# заполним d
for i in range(n):
d.append([])
for j in range(n):
if i != j:
d[i].append(float('+inf'))
else:
d[i].append(0)
for k_nonet in node.childNodes:
st = k_nonet.attributes or {}
name = k_nonet.nodeName
if name == "capactor" or name == "resistor":
for (x, y) in st.items():
if x == "resistance":
t = float(y)
elif x == "net_from":
u = int(y) - 1
elif x == "net_to":
v = int(y) - 1
d[u][v] = d[v][u] = 1 / (1 / d[u][v] + 1 / t)
elif name == "diode":
for (x, y) in st.items():
if x == "resistance":
t = float(y)
elif x == "net_from":
u = int(y) - 1
elif x == "net_to":
v = int(y) - 1
elif x == "reverse_resistance":
t_inv = float(y)
d[u][v] = 1 / (1 / d[u][v] + 1 / t)
d[v][u] = 1 / (1 / d[v][u] + 1 / t_inv)
# Теперь d заполнен начальными значениями
start = time.time()
dot = algorithm_Fl_Warh(d)
finish = (time.time() - start)
start2 = time.time()
for m in range(n):
for i in range(n):
for j in range(n):
summ = d[i][m] + d[m][j]
if summ == 0 and d[i][j] == 0:
d[i][j] = 1 / (float("+inf"))
elif summ == 0:
d[i][j] = 1 / (1 / d[i][j] + float("+inf"))
elif d[i][j] == 0:
d[i][j] = 1 / (float("+inf") + 1 / summ)
elif (1 / d[i][j] + 1 / summ) == 0:
d[i][j] = float("+inf")
else:
d[i][j] = 1 / (1 / d[i][j] + 1 / summ)
finish2 = (time.time() - start2)
# Выведем отношение времени работы
if finish!=0:
print(finish2/finish)
else:
print("Time equals zero")
# Вывод результатов в таблицу
with open(outp, "w") as f:
for line_n in range(n):
for column_n in range(n-1):
f.write(str(round(d[line_n][column_n], 6)) + ", ")
f.write(str(round(d[line_n][n-1], 6)) + "\n")
if __name__ == "__main__":
resist(sys.argv[1], sys.argv[2])
| true |
74ecc4f31b6682111906ff76241612e73497437f | Python | prestonmlangford/sheets | /parse.py | UTF-8 | 2,535 | 2.828125 | 3 | [] | no_license | import lex
from error import CompileError, TokenError
import pysound
from pysound import add
import numpy as np
def compile(instrument,sheet):
# defaults
volume = 100
tempo = 120/60 # beats per second
beats_per_whole = 4 # beats in 4/4 time
beats_per_measure = 4 # beats in 4/4 time
octave = 3 # treble clef?
# loop variables
beat = 0
measure_number = -1
time = 0
track = np.zeros((1,))
for kind,token in lex.tokens(sheet):
if kind == "tempo":
tempo = token/60
elif kind == "volume":
volume = token
elif kind == "time":
if beat != 0:
raise CompileError("Time change only allowed at beginning of measure")
beats_per_measure, beats_per_whole = token
elif kind == "bar":
if (beat < beats_per_measure) and (beat != 0):
raise CompileError(
"Not enough beat in measure: {} < {}"
.format(beat,beats_per_measure)
)
elif beat > beats_per_measure:
raise CompileError(
"Too many beat in measure: {} > {}"
.format(beat,beats_per_measure)
)
else:
measure_number += 1
beat = 0
elif kind == "note":
tie,lower,upper,step,fraction,dots,stacato = token
duration = beats_per_whole/fraction
duration *= 2 - 0.5**dots
rest = duration*(1 - 0.5**stacato)
duration -= rest
beats = duration + rest
beat += beats
# equal temperament scale
# frequency = A0*2**(octave + upper - lower + step/12)
sound = instrument.play(volume,duration/tempo,octave + upper - lower, step)
track = add(track,time,sound)
#output += "i 1 {:.3f} {:.3f} {:.3f} {:.3f}\n".format(time,duration/tempo,frequency,volume/100)
time += beats/tempo
elif kind == "rest":
fraction,dots = token
rest = beats_per_whole/fraction
rest *= 2 - 0.5**dots
beat += rest
elif instrument.parse(kind,token):
print()
else:
raise CompileError("Unable to process token: " + kind)
return track
| true |
2698972de1aeac8f50f8ecd17cdde5d8e6bfdcea | Python | rohanmittal149/Python_Examples | /Rock_paper_scissors.py | UTF-8 | 1,177 | 3.90625 | 4 | [] | no_license | import random
my_computer = ['rock','paper','scissors']
print("***Rock Paper Scissors***")
i = int(input("Enter number of turns: "))
j = 0
k = 0
while i>0:
computer = random.choice(my_computer)
player = input("Player turn... ").lower()
print("computer selected " + computer)
if computer == player:
j = j + 0
k = k + 0
elif player == "rock":
if computer == "scissor":
k += 1
elif computer == "paper":
j += 1
elif player == "paper":
if computer == "rock":
k += 1
elif computer == "scissor":
j += 1
elif player == "scissors":
if computer == "paper":
k += 1
elif computer == "rock":
j += 1
else:
print("Something went wrong...\n")
i -= 1
print("Computer Score: " + str(j) + "\nPlayer Score: " + str(k))
if j<k:
print("Player Wins!!")
elif j>k:
print("Computer Wins!!")
else:
print("It's a tie")
| true |
df008a9f039def15549645c196cba865973cb87d | Python | MohammedSharaki/hackerrank-python-solutions | /08-List-Comprehensions.py | UTF-8 | 276 | 2.90625 | 3 | [] | no_license | if __name__ == '__main__':
X = int (input())
Y = int (input())
Z = int (input())
n = int (input())
X += 1
Y += 1
Z += 1
temp_list = [[x, y, z]for x in range(X) for y in range(Y) for z in range(Z) if z +x +y !=n]
print(temp_list)
| true |
2ea1e482cced47fc3285893e57a5df7359059a06 | Python | Zoli1212/python | /09-turtle.py | UTF-8 | 314 | 3.234375 | 3 | [] | no_license | import turtle
ablak = turtle.Screen()
ablak.title('Teknocok')
Sanyi = turtle.Turtle()
Sanyi.color('blue')
Sanyi.pensize(4)
Sanyi.forward(100)
Sanyi.left(70)
Sanyi.forward(150)
Mari = turtle.Turtle()
Mari.color('pink')
Mari.pensize(2)
Mari.speed(1)
Mari.right(315)
Mari.forward(150*2**0.5)
ablak.mainloop()
| true |
8946934b676a466d613c30ba64e4524930c2fad8 | Python | ayser259/delphi | /leave_one_out.py | UTF-8 | 1,824 | 2.921875 | 3 | [] | no_license | from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score,train_test_split, LeaveOneOut
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
from data_load import get_encoded_data, get_clean_data, get_one_hot_encoded_data
# df = get_one_hot_encoded_data('data.csv', drop_pref=True)
df = encoded_dict_list = get_encoded_data('data.csv')[0]
encoded_dict_list = get_encoded_data('data.csv')[1]
# print(get_encoded_data('data.csv'))
x_df = df.drop(axis=1,columns=["current_average"])
y_df = df["current_average"]
## USING SCLEARN TO TEST AND TRAIN
TEST_SIZE = 0.5
X_train, X_test, y_train, y_test = train_test_split(x_df, y_df, test_size=TEST_SIZE)
KNN = KNeighborsClassifier(n_neighbors=3).fit(X_train,y_train)
print("Accuracy: ", KNN.score(X_test, y_test))
print("test: ", KNN.predict(X_test))
## ===== LEAVE ONE OUT CROSS VALIDATION BEGINS HERE ==== ##
X = np.array(x_df) # convert dataframe into np array
y = np.array(y_df) # convert dataframe into np array
loo = LeaveOneOut()
loo.get_n_splits(X)
LeaveOneOut()
accuracy = []
for train_index, test_index in loo.split(X):
X_train, X_test = pd.DataFrame(X[train_index]), pd.DataFrame(X[test_index]) # use this for training the model
y_train, y_test = y[train_index].ravel(), y[test_index].ravel() # use this for testing the model
# TODO: this is where you change it to the specific algorithim: i.e. KNN, naives bayes, decision trees
KNN = KNeighborsClassifier(n_neighbors=3)
model = KNN.fit(X_train, y_train) # fit the model using training data
accuracy.append(KNN.score(X_test, y_test))
# Calculate accuracy
mean = np.array(accuracy).mean()
variance = np.array(accuracy).std() * 2
print("LOO CV Accuracy: %0.2f (+/- %0.2f)" % (mean, variance))
| true |
22114ac1efe7b71db8e9ac02169ebc8f19936209 | Python | PratikBali/python-learning | /asgn/a03/a3q1.py | UTF-8 | 315 | 3.625 | 4 | [] | no_license | arr = list()
def fun():
sum1 = 0
n = input('How many Number do you want to enter: ')
for i in range(0,int(n)):
no = input('Num: ')
sum1 = sum1 + no
arr.append(int(no))
return sum1
ret = fun()
print('Your Elements: ', arr)
print('Addition of all your elements are: ', ret) | true |
3dff77906da52fa1ca0065f034e29b23bdd1817f | Python | dereklarson/MontyHall | /montyhall.py | UTF-8 | 8,358 | 3.328125 | 3 | [
"MIT"
] | permissive | import numpy as np
import copy
import pprint
from collections import defaultdict
class Game:
def __init__(self, rng=None, n_doors=3, n_goats=2, max_doors=None, verbose=0):
"""Configure and initialize the game
rng: our random number generator, the numpy default is quite good (PCG64)
n_doors (int): sets the number of doors. set to None and use max_doors
n_goats (int): sets the number of goats, set to None for random in (0, n_doors)
max_doors (int): if n_doors is None, it is randomized between min_ and max_doors
verbose: set to 1 for some informational output, 2 for full output
"""
# Get settings
self.rng = rng or np.random.default_rng()
self.min_doors = 3
self.max_doors = max_doors or 3
self.verbose = verbose
# Initialize meta-state
self.rerolls = 0
# First game initialization
self.initialize_state(n_doors, n_goats)
def initialize_state(self, n_doors, n_goats):
"""Initializes an individual game, which might happen more than once (reroll)"""
self.choice = None # Current player selection
self.win = False # Whether the game is a win result for the player
self.args = (n_doors, n_goats) # Store for reinitialization
# Either directly set number of doors/goats or randomize them
door_spread = self.max_doors - self.min_doors + 1
self.n_doors = n_doors or self.rng.integers(door_spread) + self.min_doors
self.n_goats = n_goats or self.rng.integers(self.n_doors + 1)
# State of the prizes and doors: by default, all have prizes and are not visible
self.state = {
'prizes': np.ones(self.n_doors, dtype=bool),
'visible': np.zeros(self.n_doors, dtype=bool),
}
# Then, place goats randomly (N objects, choose k without replacement)
goatidxs = self.rng.choice(self.n_doors, self.n_goats, replace=False)
self.state['prizes'][goatidxs] = False
def pstate(self):
print(f"{self.n_goats} / {self.n_doors}")
pprint.pprint(self.state)
def choose(self, strategy='random'):
# If a prize is visible somehow, take it!
if any(prizeviz := np.multiply(self.state['visible'], self.state['prizes'])):
self.choice = list(prizeviz).index(True)
if self.verbose:
print("Taking a revealed prize")
if strategy == 'stay':
if self.choice is not None:
return
elif self.verbose:
print(f"Attempting stay with {self.choice}")
return
# Now, use passed strategy to choose option from the closed doors
options = [idx for idx, visible in enumerate(self.state['visible'])
if not visible]
if strategy == 'random':
self.choice = options[self.rng.integers(len(options))]
elif strategy == 'update':
if self.choice is not None:
try:
options.remove(self.choice)
except Exception:
if self.verbose:
print(f"Could not remove {self.choice} from {options}")
self.choice = options[self.rng.integers(len(options))]
def reveal(self, strategy='goat'):
"""Host reveals a door based on a strategy, default being a random unchosen goat
If the host can't reveal a door based on the strategy, we return a True value to
indicate the need to "reroll" the game (otherwise our stats are off)
"""
if strategy == 'goat':
options = [idx for idx, prize in enumerate(self.state['prizes'])
if not prize]
if self.choice in options:
options.remove(self.choice)
if not len(options):
if self.verbose:
print("No goats left to reveal, rerolling")
# Reroll so we get a valid series of game events
return True
elif strategy == 'random':
# Anything except the current player choice
options = [idx for idx in range(self.n_doors) if idx != self.choice]
else:
print(f"Game strategy not supported {strategy}")
# Reveal a random, allowable door
self.state['visible'][options[self.rng.integers(len(options))]] = True
def play(self, player="update", host="goat"):
"""A standard game is:
1) choose door randomly
2) A reveal or other update
3) optionally choose again
"""
self.choose(strategy='random')
# A true return for reveal means we reroll the game
if self.reveal(strategy=host):
self.rerolls += 1
if self.rerolls > 10:
print("Too many rerolls within game...bug alert")
self.initialize_state(*self.args)
return self.play(player, host)
# The player's second choice
self.choose(strategy=player)
self.win = self.state['prizes'][self.choice]
return self.win
class GameSeries:
def __init__(self, config):
self.rng = np.random.default_rng()
self.config = copy.deepcopy(config)
self.config['rules']['max_doors'] = (self.config['rules']['max_doors']
or self.config['rules']['n_doors'])
# Data collection
self.history = []
self.stats = defaultdict(int)
def header(self):
player = self.config['strategies']['player']
host = self.config['strategies']['host']
print(f"\n--- Simulating player strategy: {player} vs host strategy: {host} ---")
goats = self.config['rules']['n_goats'] or "random"
doors = (self.config['rules']['n_doors'] or
f"from 3 to {self.config['rules']['max_doors']}")
print(f"--- Using {goats} goats and {doors} doors ---")
def pstats(self):
print(f"Rerolls: {self.stats['rerolls']}")
fraction = self.stats['win'] / self.config['games']
variants = 0
for ct in range(self.config['rules']['max_doors'] + 1):
basekey = f"{ct}_goats"
wins = self.stats[f"{basekey}_wins"]
total = self.stats[basekey]
if total:
variants += 1
print(f"{basekey.replace('_',' ')}: won {wins} / {total}"
"for {100 * wins / total:.1f}%")
if variants > 1:
print(f"Aggregate Outcome: won {self.stats['win']} / {self.config['games']}"
"for {fraction:.3f}")
def simulate(self, n=None):
n = n or self.config['games']
for game_idx in range(n):
if self.config.get('verbose', 0) > 1:
print(f"---Game {game_idx + 1}")
game = Game(rng=self.rng, **(self.config['rules']))
game.play(**(self.config['strategies']))
self.history.append(game)
for stat in ['win', 'rerolls']:
self.stats[stat] += getattr(game, stat)
for ct in range(self.config['rules']['max_doors'] + 1):
# Count type of game played
self.stats[f"{ct}_goats"] += (ct == game.n_goats)
self.stats[f"{ct}_goats_wins"] += (ct == game.n_goats) and game.win
def test(self):
#Test to see if there are issues
exceptions = 0
print("Testing -- ( games | player | host )")
for games in [1, 10, 100]:
for player in ['stay', 'random', 'update']:
for host in ['goat', 'random']:
self.config['games'] = games
self.config['strategies']['player'] = player
self.config['strategies']['host'] = host
if self.config.get('verbose', 0):
print(f"{' '*13}{str(games).ljust(8)}"
"{player.ljust(9)}{host.ljust(9)}")
if exceptions > 5:
break
try:
self.simulate()
except Exception as exc:
print(exc)
exceptions += 1
print(f"Total exceptions {exceptions}")
| true |
a66f1b1ce2de93e288db05f43fb23cdd9623c38f | Python | facebookresearch/DomainBed | /domainbed/lib/misc.py | UTF-8 | 16,138 | 2.65625 | 3 | [
"MIT"
] | permissive | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Things that don't belong anywhere else
"""
import math
import hashlib
import sys
from collections import OrderedDict
from numbers import Number
import operator
import numpy as np
import torch
from collections import Counter
from itertools import cycle
def distance(h1, h2):
''' distance of two networks (h1, h2 are classifiers)'''
dist = 0.
for param in h1.state_dict():
h1_param, h2_param = h1.state_dict()[param], h2.state_dict()[param]
dist += torch.norm(h1_param - h2_param) ** 2 # use Frobenius norms for matrices
return torch.sqrt(dist)
def proj(delta, adv_h, h):
''' return proj_{B(h, \delta)}(adv_h), Euclidean projection to Euclidean ball'''
''' adv_h and h are two classifiers'''
dist = distance(adv_h, h)
if dist <= delta:
return adv_h
else:
ratio = delta / dist
for param_h, param_adv_h in zip(h.parameters(), adv_h.parameters()):
param_adv_h.data = param_h + ratio * (param_adv_h - param_h)
# print("distance: ", distance(adv_h, h))
return adv_h
def l2_between_dicts(dict_1, dict_2):
assert len(dict_1) == len(dict_2)
dict_1_values = [dict_1[key] for key in sorted(dict_1.keys())]
dict_2_values = [dict_2[key] for key in sorted(dict_1.keys())]
return (
torch.cat(tuple([t.view(-1) for t in dict_1_values])) -
torch.cat(tuple([t.view(-1) for t in dict_2_values]))
).pow(2).mean()
class MovingAverage:
def __init__(self, ema, oneminusema_correction=True):
self.ema = ema
self.ema_data = {}
self._updates = 0
self._oneminusema_correction = oneminusema_correction
def update(self, dict_data):
ema_dict_data = {}
for name, data in dict_data.items():
data = data.view(1, -1)
if self._updates == 0:
previous_data = torch.zeros_like(data)
else:
previous_data = self.ema_data[name]
ema_data = self.ema * previous_data + (1 - self.ema) * data
if self._oneminusema_correction:
# correction by 1/(1 - self.ema)
# so that the gradients amplitude backpropagated in data is independent of self.ema
ema_dict_data[name] = ema_data / (1 - self.ema)
else:
ema_dict_data[name] = ema_data
self.ema_data[name] = ema_data.clone().detach()
self._updates += 1
return ema_dict_data
def make_weights_for_balanced_classes(dataset):
counts = Counter()
classes = []
for _, y in dataset:
y = int(y)
counts[y] += 1
classes.append(y)
n_classes = len(counts)
weight_per_class = {}
for y in counts:
weight_per_class[y] = 1 / (counts[y] * n_classes)
weights = torch.zeros(len(dataset))
for i, y in enumerate(classes):
weights[i] = weight_per_class[int(y)]
return weights
def pdb():
sys.stdout = sys.__stdout__
import pdb
print("Launching PDB, enter 'n' to step to parent function.")
pdb.set_trace()
def seed_hash(*args):
"""
Derive an integer hash from all args, for use as a random seed.
"""
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31)
def print_separator():
print("="*80)
def print_row(row, colwidth=10, latex=False):
if latex:
sep = " & "
end_ = "\\\\"
else:
sep = " "
end_ = ""
def format_val(x):
if np.issubdtype(type(x), np.floating):
x = "{:.10f}".format(x)
return str(x).ljust(colwidth)[:colwidth]
print(sep.join([format_val(x) for x in row]), end_)
class _SplitDataset(torch.utils.data.Dataset):
"""Used by split_dataset"""
def __init__(self, underlying_dataset, keys):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
def __getitem__(self, key):
return self.underlying_dataset[self.keys[key]]
def __len__(self):
return len(self.keys)
def split_dataset(dataset, n, seed=0):
"""
Return a pair of datasets corresponding to a random split of the given
dataset, with n datapoints in the first dataset and the rest in the last,
using the given random seed
"""
assert(n <= len(dataset))
keys = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(keys)
keys_1 = keys[:n]
keys_2 = keys[n:]
return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)
def random_pairs_of_minibatches(minibatches):
perm = torch.randperm(len(minibatches)).tolist()
pairs = []
for i in range(len(minibatches)):
j = i + 1 if i < (len(minibatches) - 1) else 0
xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1]
xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
return pairs
def split_meta_train_test(minibatches, num_meta_test=1):
n_domains = len(minibatches)
perm = torch.randperm(n_domains).tolist()
pairs = []
meta_train = perm[:(n_domains-num_meta_test)]
meta_test = perm[-num_meta_test:]
for i,j in zip(meta_train, cycle(meta_test)):
xi, yi = minibatches[i][0], minibatches[i][1]
xj, yj = minibatches[j][0], minibatches[j][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
return pairs
def accuracy(network, loader, weights, device):
correct = 0
total = 0
weights_offset = 0
network.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
p = network.predict(x)
if weights is None:
batch_weights = torch.ones(len(x))
else:
batch_weights = weights[weights_offset : weights_offset + len(x)]
weights_offset += len(x)
batch_weights = batch_weights.to(device)
if p.size(1) == 1:
correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
network.train()
return correct / total
class Tee:
def __init__(self, fname, mode="a"):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
class ParamDict(OrderedDict):
"""Code adapted from https://github.com/Alok/rl_implementations/tree/master/reptile.
A dictionary where the values are Tensors, meant to represent weights of
a model. This subclass lets you perform arithmetic on weights directly."""
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
def _prototype(self, other, op):
if isinstance(other, Number):
return ParamDict({k: op(v, other) for k, v in self.items()})
elif isinstance(other, dict):
return ParamDict({k: op(self[k], other[k]) for k in self})
else:
raise NotImplementedError
def __add__(self, other):
return self._prototype(other, operator.add)
def __rmul__(self, other):
return self._prototype(other, operator.mul)
__mul__ = __rmul__
def __neg__(self):
return ParamDict({k: -v for k, v in self.items()})
def __rsub__(self, other):
# a- b := a + (-b)
return self.__add__(other.__neg__())
__sub__ = __rsub__
def __truediv__(self, other):
return self._prototype(other, operator.truediv)
############################################################
# A general PyTorch implementation of KDE. Builds on:
# https://github.com/EugenHotaj/pytorch-generative/blob/master/pytorch_generative/models/kde.py
############################################################
class Kernel(torch.nn.Module):
"""Base class which defines the interface for all kernels."""
def __init__(self, bw=None):
super().__init__()
self.bw = 0.05 if bw is None else bw
def _diffs(self, test_Xs, train_Xs):
"""Computes difference between each x in test_Xs with all train_Xs."""
test_Xs = test_Xs.view(test_Xs.shape[0], 1, *test_Xs.shape[1:])
train_Xs = train_Xs.view(1, train_Xs.shape[0], *train_Xs.shape[1:])
return test_Xs - train_Xs
def forward(self, test_Xs, train_Xs):
"""Computes p(x) for each x in test_Xs given train_Xs."""
def sample(self, train_Xs):
"""Generates samples from the kernel distribution."""
class GaussianKernel(Kernel):
"""Implementation of the Gaussian kernel."""
def forward(self, test_Xs, train_Xs):
diffs = self._diffs(test_Xs, train_Xs)
dims = tuple(range(len(diffs.shape))[2:])
if dims == ():
x_sq = diffs ** 2
else:
x_sq = torch.norm(diffs, p=2, dim=dims) ** 2
var = self.bw ** 2
exp = torch.exp(-x_sq / (2 * var))
coef = 1. / torch.sqrt(2 * np.pi * var)
return (coef * exp).mean(dim=1)
def sample(self, train_Xs):
# device = train_Xs.device
noise = torch.randn(train_Xs.shape) * self.bw
return train_Xs + noise
def cdf(self, test_Xs, train_Xs):
mus = train_Xs # kernel centred on each observation
sigmas = torch.ones(len(mus), device=test_Xs.device) * self.bw # bandwidth = stddev
x_ = test_Xs.repeat(len(mus), 1).T # repeat to allow broadcasting below
return torch.mean(torch.distributions.Normal(mus, sigmas).cdf(x_))
def estimate_bandwidth(x, method="silverman"):
x_, _ = torch.sort(x)
n = len(x_)
sample_std = torch.std(x_, unbiased=True)
if method == 'silverman':
# https://en.wikipedia.org/wiki/Kernel_density_estimation#A_rule-of-thumb_bandwidth_estimator
iqr = torch.quantile(x_, 0.75) - torch.quantile(x_, 0.25)
bandwidth = 0.9 * torch.min(sample_std, iqr / 1.34) * n ** (-0.2)
elif method.lower() == 'gauss-optimal':
bandwidth = 1.06 * sample_std * (n ** -0.2)
else:
raise ValueError(f"Invalid method selected: {method}.")
return bandwidth
class KernelDensityEstimator(torch.nn.Module):
"""The KernelDensityEstimator model."""
def __init__(self, train_Xs, kernel='gaussian', bw_select='Gauss-optimal'):
"""Initializes a new KernelDensityEstimator.
Args:
train_Xs: The "training" data to use when estimating probabilities.
kernel: The kernel to place on each of the train_Xs.
"""
super().__init__()
self.train_Xs = train_Xs
self._n_kernels = len(self.train_Xs)
if bw_select is not None:
self.bw = estimate_bandwidth(self.train_Xs, bw_select)
else:
self.bw = None
if kernel.lower() == 'gaussian':
self.kernel = GaussianKernel(self.bw)
else:
raise NotImplementedError(f"'{kernel}' kernel not implemented.")
@property
def device(self):
return self.train_Xs.device
# TODO(eugenhotaj): This method consumes O(train_Xs * x) memory. Implement an iterative version instead.
def forward(self, x):
return self.kernel(x, self.train_Xs)
def sample(self, n_samples):
idxs = np.random.choice(range(self._n_kernels), size=n_samples)
return self.kernel.sample(self.train_Xs[idxs])
def cdf(self, x):
return self.kernel.cdf(x, self.train_Xs)
############################################################
# PyTorch implementation of 1D distributions.
############################################################
EPS = 1e-16
class Distribution1D:
def __init__(self, dist_function=None):
"""
:param dist_function: function to instantiate the distribution (self.dist).
:param parameters: list of parameters in the correct order for dist_function.
"""
self.dist = None
self.dist_function = dist_function
@property
def parameters(self):
raise NotImplementedError
def create_dist(self):
if self.dist_function is not None:
return self.dist_function(*self.parameters)
else:
raise NotImplementedError("No distribution function was specified during intialization.")
def estimate_parameters(self, x):
raise NotImplementedError
def log_prob(self, x):
return self.create_dist().log_prob(x)
def cdf(self, x):
return self.create_dist().cdf(x)
def icdf(self, q):
return self.create_dist().icdf(q)
def sample(self, n=1):
if self.dist is None:
self.dist = self.create_dist()
n_ = torch.Size([]) if n == 1 else (n,)
return self.dist.sample(n_)
def sample_n(self, n=10):
return self.sample(n)
def continuous_bisect_fun_left(f, v, lo, hi, n_steps=32):
val_range = [lo, hi]
k = 0.5 * sum(val_range)
for _ in range(n_steps):
val_range[int(f(k) > v)] = k
next_k = 0.5 * sum(val_range)
if next_k == k:
break
k = next_k
return k
class Normal(Distribution1D):
def __init__(self, location=0, scale=1):
self.location = location
self.scale = scale
super().__init__(torch.distributions.Normal)
@property
def parameters(self):
return [self.location, self.scale]
def estimate_parameters(self, x):
mean = sum(x) / len(x)
var = sum([(x_i - mean) ** 2 for x_i in x]) / (len(x) - 1)
self.location = mean
self.scale = torch.sqrt(var + EPS)
def icdf(self, q):
if q >= 0:
return super().icdf(q)
else:
# To get q *very* close to 1 without numerical issues, we:
# 1) Use q < 0 to represent log(y), where q = 1 - y.
# 2) Use the inverse-normal-cdf approximation here:
# https://math.stackexchange.com/questions/2964944/asymptotics-of-inverse-of-normal-cdf
log_y = q
return self.location + self.scale * math.sqrt(-2 * log_y)
class Nonparametric(Distribution1D):
def __init__(self, use_kde=True, bw_select='Gauss-optimal'):
self.use_kde = use_kde
self.bw_select = bw_select
self.bw, self.data, self.kde = None, None, None
super().__init__()
@property
def parameters(self):
return []
def estimate_parameters(self, x):
self.data, _ = torch.sort(x)
if self.use_kde:
self.kde = KernelDensityEstimator(self.data, bw_select=self.bw_select)
self.bw = torch.ones(1, device=self.data.device) * self.kde.bw
def icdf(self, q):
if not self.use_kde:
# Empirical or step CDF. Differentiable as torch.quantile uses (linear) interpolation.
return torch.quantile(self.data, float(q))
if q >= 0:
# Find quantile via binary search on the KDE CDF
lo = torch.distributions.Normal(self.data[0], self.bw[0]).icdf(q)
hi = torch.distributions.Normal(self.data[-1], self.bw[-1]).icdf(q)
return continuous_bisect_fun_left(self.kde.cdf, q, lo, hi)
else:
# To get q *very* close to 1 without numerical issues, we:
# 1) Use q < 0 to represent log(y), where q = 1 - y.
# 2) Use the inverse-normal-cdf approximation here:
# https://math.stackexchange.com/questions/2964944/asymptotics-of-inverse-of-normal-cdf
log_y = q
v = torch.mean(self.data + self.bw * math.sqrt(-2 * log_y))
return v
| true |
8d9cfe2aa7c7fe73593322de90fd48d4d9aa86f0 | Python | Fiinall/UdemyPythonCourse | /Embaded Functions/booleanPrime.py | UTF-8 | 336 | 3.515625 | 4 | [] | no_license | from math import floor
def prime(a):
if (a == 0):
raise ValueError("0 is neither prime, nor composite number")
if ( a == 1 or a == 2 or a == 3):
return True
i = 2
while (i<=a**0.5):
if (a%i == 0):
return False
elif (i == floor(a**0.5)):
return True
i += 1
| true |
293eba3edaf5467da0758bf37b406a6483bf064a | Python | CogComp/nmn-drop | /utils/spacyutils.py | UTF-8 | 6,588 | 2.65625 | 3 | [] | no_license | import json
import spacy
from typing import List, Tuple
from spacy.tokens import Doc, Span, Token
from utils import util
class WhitespaceTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split(" ")
# All tokens 'own' a subsequent space character in this tokenizer
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
def getWhiteTokenizerSpacyNLP(disable_list: List[str] = ["textcat"]):
nlp = getSpacyNLP(disable_list)
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
return nlp
def getSpacyNLP(disable_list: List[str] = ["textcat"]):
# nlp = spacy.load('en', disable=disable_list)
nlp = spacy.load("en_core_web_lg", disable=disable_list)
return nlp
def getSpacyDocs(sents: List[str], nlp):
""" Batch processing of sentences into Spacy docs."""
return list(nlp.pipe(sents))
def getSpacyDoc(sent: str, nlp) -> Doc:
""" Single sent to Spacy doc """
return nlp(sent)
def getNER(spacydoc: Doc) -> List[Tuple[str, int, int, str]]:
"""Returns a list of (ner_text, ner_start, ner_end, ner_label). ner_end is exclusive. """
assert spacydoc.is_tagged is True, "NER needs to run."
ner_tags = []
for ent in spacydoc.ents:
ner_tags.append((ent.text, ent.start, ent.end, ent.label_))
return ner_tags
def getPropnSpans(spacydoc: Doc) -> List[Tuple[str, int, int, str]]:
pos_tags = getPOSTags(spacydoc)
propn_span_srtend = util.getContiguousSpansOfElement(pos_tags, "PROPN")
propn_spans = [
(spacydoc[propnspan[0] : propnspan[1]].text, propnspan[0], propnspan[1], "PROPN")
for propnspan in propn_span_srtend
]
return propn_spans
def getNER_and_PROPN(spacydoc: Doc) -> List[Tuple[str, int, int, str]]:
"""Returns a list of (ner_text, ner_start, ner_end, ner_label). ner_end is exclusive.
This also includes PROPN spans that are not part of a NER
"""
ner_tags = getNER(spacydoc)
ner_spans = [(x, y) for (_, x, y, _) in ner_tags]
pos_tags = getPOSTags(spacydoc)
propn_spans = util.getContiguousSpansOfElement(pos_tags, "PROPN")
propn_spans_tokeep = []
for propnspan in propn_spans:
add_propn = True
for nerspan in ner_spans:
if util.doSpansIntersect(propnspan, nerspan):
add_propn = False
break
if add_propn:
propn_spans_tokeep.append(propnspan)
for propnspan in propn_spans_tokeep:
ner_tags.append((spacydoc[propnspan[0] : propnspan[1]].text, propnspan[0], propnspan[1], "PROPN"))
return ner_tags
def getPOSTags(spacydoc: Doc) -> List[str]:
""" Returns a list of POS tags for the doc. """
pos_tags = [token.pos_ for token in spacydoc]
return pos_tags
def getTokens(spacydoc: Doc) -> List[str]:
tokens = [token.text for token in spacydoc]
return tokens
def getWhiteSpacedSent(spacydoc: Doc) -> str:
"""Return a whitespaced delimited spacydoc. """
tokens = getTokens(spacydoc)
return " ".join(tokens)
def getAll_SentIdAndTokenOffset(spacydoc: Doc) -> List[Tuple[int, int]]:
"""Get (sentence idx, withinSentOffset) for all tokens."""
numTokens = len(spacydoc)
tokenIdxs = []
sentence_end_pos = [sent.end for sent in spacydoc.sents]
sent_idx = 0
withinsent_tokenidx = 0
for i in range(0, numTokens):
if i == sentence_end_pos[sent_idx]:
sent_idx += 1
withinsent_tokenidx = 0
tokenIdxs.append((sent_idx, withinsent_tokenidx))
withinsent_tokenidx += 1
return tokenIdxs
def getSpanHead(doc: Doc, span: Tuple[int, int]):
"""
Returns token idx of the span root.
:param doc: Spacy doc
:param span_srt: Span start
:param span_end: Span end (exclusive)
:return: Token idx of the span head
"""
assert doc.is_parsed, "Doc isn't dep parsed."
doclength = len(doc)
(span_srt, span_end) = span
assert (span_srt >= 0) and (span_srt < doclength)
assert (span_end > 0) and (span_end <= doclength)
span: Span = doc[span_srt:span_end]
spanroot: Token = span.root
return spanroot.i
def getNERInToken(doc: Doc, token_idx: int):
"""
If the given token is a part of NE, return the NE span, otherwise the input token's span
:param doc: Spacy doc
:param token_idx: int idx of the token
:return: (srt-inclusive, end-exclusive) of the NER (if matches) else (token_idx, token_idx + 1)
"""
token: Token = doc[token_idx]
ner_spans = [(ent.start, ent.end) for ent in doc.ents]
if token.ent_iob_ == "O":
# Input token is not a NER
return (token_idx, token_idx + 1)
else:
# Token is an NER, find which span
# NER spans (srt, end) are in increasing order
for (srt, end) in ner_spans:
if token_idx >= srt and token_idx < end:
return (srt, end)
print("I SHOULDN'T BE HERE")
return (token_idx, token_idx + 1)
if __name__ == "__main__":
nlp = getSpacyNLP()
text = 'He died in the destruction of the Space Shuttle "Challenger", on which he was serving as Mission Specialist for mission STS-51-L.'
# sent = "Amherst ( ) is a town in Hampshire County , Massachusetts , United States , in the Connecticut River valley ."
doc: Doc = nlp(text)
for sent in doc.sents:
sent: Span = sent
print(getWhiteSpacedSent(doc[sent.start : sent.end]))
print(f"{sent.start} {sent.end}")
for token in doc:
print(f"{token.text}_{token.pos_}", end=" ")
print(" ")
for ent in doc.ents:
ent: Span = ent
print(f"{ent.text} {ent.start} {ent.end} {ent.label_} {ent.label}")
for span in getNER_and_PROPN(doc):
print(span)
# with open('/save/ngupta19/datasets/WDW/pruned_cloze/val_temp.jsonl', 'r') as inpf:
# for line in inpf:
# line = line.strip()
# if not line:
# continue
# jsonobj = json.loads(line)
# sentences: List[List[str]] = jsonobj['context']
# sentences: List[str] = [' '.join(sent) for sent in sentences]
#
# sents = getSpacyDocs(sentences, nlp)
# for sent in sents:
# span_srt = 0
# span_end = min(5, len(sent))
# span = (span_srt, span_end)
# # print(sent[span[0]:span[1]])
# spanhead = getSpanHead(sent, span)
# nerspan = getNERInToken(sent, spanhead)
| true |
cfb8708973247985bb7f1bfe99a32ca6dea10096 | Python | leti-olabarri/euro-2020 | /front/pages/players.py | UTF-8 | 5,011 | 2.625 | 3 | [] | no_license | import streamlit as st
from api import find_players
def players():
st.title("Players")
st.markdown("Ancelotti, face it. Kylian Mbappé is not coming this year. Modric is the best, but he is 35 years old. Marcelo is not in shape. And Bale... please, don't make me talk about Bale")
st.markdown("Here is a tool to help you sign the best players in the last UEFA Euro 2020. We have enough extra-community ones, and Messi is already in PSG")
container = st.container()
position = st.selectbox('Position*', [
"Goalkeeper",
"Defender",
"Midfielder",
"Forward"
])
age = st.slider("Select a range of age:",
15, 50, (15, 50))
matches = st.slider("Matches played:",
0, 7, (0, 7))
passing_acc_perc = st.slider("Porcentage of passing accuracy:",
0, 100, (0, 100))
if position != "Goalkeeper":
goals = st.slider("Goals:",
0, 5, (0, 5))
speed_km_h = st.slider("Speed (km/h):",
5, 70, (5, 70))
if position == "Forward":
fouls_suff = st.slider("Fouls suffered:",
0, 30, (0, 30))
attempts_on_target = st.slider("Attempts on target:",
0, 7, (0, 7))
balls_recovered = (None, None)
distance_covered_km = (None, None)
fouls_comm = (None, None)
if position == "Forward" or position == "Midfielder":
attempts = st.slider("Attempts:",
0, 50, (0, 50))
assists = st.slider("Assists:",
0, 15, (0, 15))
clearances = (None, None)
saves = (None, None)
goals_conceded = (None, None)
clean_sheets = (None, None)
if position == "Defender" or position == "Midfielder":
balls_recovered = st.slider("Balls recovered:",
0, 60, (0, 60))
distance_covered_km = st.slider("Distance covered (in km):",
0, 100, (0, 100))
fouls_comm = st.slider("Fouls commited:",
0, 30, (0, 30))
fouls_suff = (None, None)
attempts_on_target = (None, None)
if position == "Defender":
clearances = st.slider("Clearances:",
0, 40, (0, 40))
saves = (None, None)
goals_conceded = (None, None)
clean_sheets = (None, None)
fouls_suff = (None, None)
attempts = (None, None)
attempts_on_target = (None, None)
assists = (None, None)
if position == "Goalkeeper":
saves = st.slider("Saves:",
0, 30, (0, 30))
goals_conceded = st.slider("Goals conceded:",
0, 15, (0, 15))
clean_sheets = st.slider("Clean sheets:",
0, 7, (0, 7))
goals = (None, None)
speed_km_h = (None, None)
fouls_suff = (None, None)
attempts = (None, None)
attempts_on_target = (None, None)
assists = (None, None)
balls_recovered = (None, None)
distance_covered_km = (None, None)
clearances = (None, None)
fouls_comm = (None, None)
stats = {
"age_min": age[0],
"age_max": age[1],
"position": position,
"matches_min": matches[0],
"matches_max": matches[1],
"passing_acc_perc_min": passing_acc_perc[0],
"passing_acc_perc_max": passing_acc_perc[1],
"goals_min": goals[0],
"goals_max": goals[1],
"fouls_comm_min": fouls_comm[0],
"fouls_comm_max": fouls_comm[1],
"fouls_suff_min": fouls_suff[0],
"fouls_suff_max": fouls_suff[1],
"attempts_min": attempts[0],
"attempts_max": attempts[1],
"attempts_on_target_min": attempts_on_target[0],
"attempts_on_target_max": attempts_on_target[1],
"assists_min": assists[0],
"assists_max": assists[1],
"speed_km_h_min": speed_km_h[0],
"speed_km_h_max": speed_km_h[1],
"balls_recovered_min": balls_recovered[0],
"balls_recovered_max": balls_recovered[1],
"distance_covered_km_min": distance_covered_km[0],
"distance_covered_km_max": distance_covered_km[1],
"clearances_min": clearances[0],
"clearances_max": clearances[1],
"saves_min": saves[0],
"saves_max": saves[1],
"goals_conceded_min": goals_conceded[0],
"goals_conceded_max": goals_conceded[1],
"clean_sheets_min": clean_sheets[0],
"clean_sheets_max": clean_sheets[1]
}
if st.button("Get your players!"):
table = find_players(stats)
if type(table) == "<class 'str'>":
container.markdown(
f"## :warning: :warning: :warning: {table} :warning: :warning: :warning:")
else:
container.write(table)
| true |
94b91be5908cf008e415214564b2327fc23c7e61 | Python | TheMagicalPlace/Procedural-Generation-and-Pathfinding | /AstarPathfinding.py | UTF-8 | 5,952 | 3.265625 | 3 | [] | no_license |
# from https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2
import matplotlib.pyplot as plt
import numpy as np
import random,copy,time
from itertools import chain
from AnimatedPlotMods import liveplot
class Node():
"""A node class for A* Pathfinding"""
def __init__(self, parent=None, position=None):
self.parent = parent
self.position = tuple(position) # x-y position on the grid
# values for heuristic function
self.g = 0
self.h = 0
self.f = 0
def __hash__(self):
return hash(self.position)
def __eq__(self, other):
return hash(self) == hash(other)
class Pathfinder:
def __init__(self,maze,start,end,plotobj=None):
self.plotobj = plotobj # initilize the live plot
self.maze = maze
self.start = start
self.end = end
start_node = Node(None, start)
start_node.g = start_node.h = start_node.f = 0
end_node = Node(None, end)
end_node.g = end_node.h = end_node.f = 0
self.snode = start_node
self.enode = end_node
def __call__(self, *args, **kwargs):
return self.pathfinder()
def pathfinder(self):
open_list = [] # unexplored/possible next node(s) to traverse
closed_list =[] # explored or non-viable nodes
open_list.append(self.snode) # initial node
while len(open_list) > 0:
current_node = open_list[0] # the next explored node is taken from the top of the list
current_index = 0
# checking if there is a better node to look at
for index, item in enumerate(open_list):
if item.f < current_node.f:
current_node = item
current_index = index
# updates the live plot
if self.plotobj is not None:
if closed_list:
#xy = list(zip(*set([n.position for n in closed_list])))
if True:
self.plotobj.show_path([current_node.position[0]],[current_node.position[1]])
else:
# setting initial node values
for x in range(0,len(self.maze)):
for y in range(0,len(self.maze[x])):
if self.maze[x][y]:self.maze[x][y]=0
else: self.maze[x][y]=1
# moving the current node to the closed list
open_list.pop(current_index)
closed_list.append(current_node)
# checking if the target node has been found
if current_node == self.enode:
print('FOUND END')
path = []
current = current_node
#self.plotobj.show_solution([], [], color='b', marker='*')
# going back through parents to findpath
while current is not None:
path.append(current.position)
current = current.parent
p2 = list(zip(*path))
self.plotobj.show_path([current.position[0]], [current.position[1]],color='yellow')
return path[::-1] # Return reversed path
# only cardinal movments allowed here, i.e. no up, down, left, right
possible_moves = [(0, -1), (0, 1), (-1, 0), (1, 0)] #(-1, -1), (-1, 1), (1, -1), (1, 1)]
children = []
# checking possible child nodes
for move in possible_moves:
node_position = (current_node.position[0] + move[0], current_node.position[1] + move[1])
# the position has to be in the grid, otherwise look at next possible move
if node_position[0] > (len(self.maze) - 1) or node_position[0] < 0 or node_position[1] > (
len(self.maze[len(self.maze) - 1]) - 1) or node_position[1] < 0:
continue
# can't move onto walls
if self.maze[node_position[0]][node_position[1]] != 0:
continue
# create a new node and assign it as a child of the parent node
new_node = Node(current_node,node_position)
children.append(new_node)
# looking at child nodes
for child in children:
# Child is on the closed list, skip
for seem_child in chain(closed_list,open_list):
#print(child.position,seem_child.position)
if child == seem_child:
#print('Broke')
break
# assigning a value to the node
else:
child.g = current_node.g+1 # distance from origin in moves (i.e. no. of nodes between origin and node)
# heuristic value, weighed slightly over distance from parent to help avoid the search getting 'stuck'
# in cases where the next best node is the parent node and the child node is the best node.
child.h = self.value_heuristic(child,self.enode)*1 # heuristic value
child.f = child.g+child.h
# in cases where the two 'best' nodes are are the current child node and its parent
# (i.e. a dead end in the maze close to the exit), the algorithm is likely stalling at a dead end,
# so in order to allow it to continue the child node is not added to the open list.
if current_node.parent is not None and child == current_node.parent:
closed_list.append(child)
continue
open_list.append(child)
def value_heuristic(self,node1,node2):
# manhatten distance
value = abs(node1.position[0]-node2.position[0])+abs(node1.position[1]-node2.position[1])
return value
| true |
2e413e22d36118fdae3b2f538a1691e5e27a9250 | Python | YunyLee/BaekJoon | /그 외/2309_일곱난쟁이.py | UTF-8 | 469 | 3.21875 | 3 | [] | no_license | import sys
sys.stdin = open('input_2309.txt', 'r')
N = []
for i in range(9):
temp = int(input())
N.append(temp)
N = sorted(N) # 정렬하기
total_sum = sum(N)
sumV = 100
goal = total_sum - sumV # 여기서는 40
remove1 = 0
remove2 = 0
for i in range(len(N)):
for j in range(len(N)):
if N[i] + N[j] == goal:
remove1 = N[i]
remove2 = N[j]
break
N.remove(remove1)
N.remove(remove2)
for k in N:
print(k)
| true |
28cc62a69dbfa17610aa1e00b878b2e691b1fe4a | Python | funrollloops/halting | /tmoney1.py | UTF-8 | 1,405 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
import random
import sys
from player_common import State, PlayerResponse, run_player
rank_to_distance = {
0: 0,
2: 3,
3: 5,
4: 7,
5: 9,
6: 11,
7: 13,
8: 11,
9: 9,
10: 7,
11: 5,
12: 3,
}
INITIAL_GOAL = sum(rank_to_distance.values())
def goal_remaining(state):
goal = 0
for rank in state.uncommitted.keys():
if state.first_player:
goal += state.player1[rank]
else:
goal += state.player2[rank]
return goal
def value_move(rank, state):
return 100 if rank in state.uncommitted.keys() else rank_to_distance[rank]
def decide_stop(state):
if len(state.uncommitted) < 3:
return False
progress = 0
for rank, value in state.uncommitted.items():
if state.first_player:
progress += state.player1[rank - 2] - value
else:
progress += state.player2[rank - 2] - value
# print('/%s' % progress, file=sys.stderr)
return progress >= 4
def tmoney1_player(state: State) -> PlayerResponse:
valid_moves = state.valid_moves()
best = (-100000, False, 0, 0)
for m1, m2 in state.valid_moves():
s1, s2 = value_move(m1, state), value_move(m2, state)
score = s1 + s2
candidate = (score, m1, m2)
if candidate > best:
best = candidate
stop = decide_stop(state)
return PlayerResponse(track1=best[1], track2=best[2], stop=stop)
if __name__ == '__main__':
run_player(tmoney1_player)
| true |
46853322ee080f6199f0ba1d703024b678e83565 | Python | sudhansom/python_sda | /python_fundamentals/11-oop/oop-exercise-01.py | UTF-8 | 918 | 4.03125 | 4 | [] | no_license | class Vehicles:
def __init__(self, name, price, types='ford', color='white'):
self.name = name
self.types = types
self.color = color
self.price = price
def describe(self):
return f"The name of the Vehicle is {self.name}, type is {self.types} of {self.color} and price {self.price}"
def __str__(self):
return f"Vehicles {self.name} and type {self.types}"
list_of_vehicles = []
for i in range(3):
name = input("Enter name: ")
price = int(input("Enter price"))
yes_no = input("do you want to add type and color : y or n :")
if yes_no == 'y':
types = input("Enter type: ")
color = input("Enter color: ")
vehicle = Vehicles(name, price, types, color)
else:
vehicle = Vehicles(name, price)
list_of_vehicles.append(vehicle)
for i in list_of_vehicles:
print(i.describe())
print(list_of_vehicles[0])
| true |
7c6c3711c3b533615479d1973e6c6b5e3bf34b11 | Python | hmgoforth/824proj | /inpainting/dataset.py | UTF-8 | 3,021 | 2.515625 | 3 | [] | no_license | import torch
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
from skimage import io
import argparse
import matplotlib.pyplot as plt
import pickle
import time
import h5py
import utils
from pdb import set_trace as st
class DeepfashionInpaintingDataset(Dataset):
''''
Dataset for Inpainting, using DeepFashion images
'''
def __init__(self, filedict_path, pathtoind_path, texture_maps_path, max_multiview=None):
with open(filedict_path, "rb") as fp:
filedict = pickle.load(fp)
with open(pathtoind_path, "rb") as fp:
self.pathtoind = pickle.load(fp)
f = h5py.File(texture_maps_path, 'r')
self.texture_maps = f['texture_maps']
self.filelist = filedict['filelist']
# max number of views to sample per example
# note that there may not be this many views available
if max_multiview is None:
self.max_multiview = filedict['max_multiview']
else:
self.max_multiview = max_multiview
def __len__(self):
return len(self.filelist)
def __getitem__(self, idx):
item = self.filelist[idx]
item_path = item['path']
# start = time.time()
# load texture map for this idx
im_texture = torch.from_numpy(self.texture_maps[idx, :, :, :, :]) / 255
# load texture maps for multiple views
mv_texture = torch.zeros(self.max_multiview, 24, 3, im_texture.shape[2], im_texture.shape[3])
for i, view_path in enumerate(item['views']):
view_idx = self.pathtoind[view_path]
mv_texture[i, :, :, :, :] = torch.from_numpy(self.texture_maps[view_idx, :, :, :, :]) / 255
# end = time.time()
# num_reads = num_views + 1
# print('elapsed: {:.3f}'.format((end-start)/num_reads))
# print(item['views'])
# print(item_path)
num_views = len(item['views'])
ret_dict = {'im_texture': im_texture,
'mv_texture': mv_texture,
'num_views': num_views}
return ret_dict
def parse_args():
parser = argparse.ArgumentParser(description='preprocess IUV for deepfashion')
parser.add_argument(
'--filedict',
help='location of deepfashion filedict',
default='deepfashion_filelist.txt',
type=str
)
parser.add_argument(
'--pathtoind-dict',
help='where to save pathtoind_dict',
default='deepfasion_pathtoind.txt',
type=str
)
parser.add_argument(
'--hdf5-file',
help='where to save hdf5 file',
default='deepfashion_textures.hdf5',
type=str
)
return parser.parse_args()
def main(args):
ds = DeepfashionInpaintingDataset(args.filedict, args.pathtoind_dict, args.hdf5_file)
for idx in range(len(ds)):
print(idx)
data=ds[idx]
utils.plot_texture_map(data['im_texture'])
if __name__ == '__main__':
args = parse_args()
main(args) | true |
855202e21213aec3fa3062724b1111bb872445d4 | Python | liuiuge/LeetCodeSummary | /Findthedifference.py | UTF-8 | 274 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
class Solution:
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
ans = 0
for elem in s + t:
ans ^= ord(elem)
return chr(ans)
| true |
b4736d4b4cc4924630500e838fd50134872c3610 | Python | cetoli/kuarup | /poo09/kuarup/tribos/xavante/rede2.py | UTF-8 | 1,740 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python
"""
:Author: Andre Abrantes
:Copyright: ©2009, `GPL <http://is.gd/3Udt>`
"""
from visual import *
from peixe_xavante import *
def init_window ():
scene.title = "Rede"
scene.width = 300 + 9
scene.height = 300 + 30
scene.autocenter = 1
scene.autoscale = 1
#scene.forward = (0, -1.65, -1)
#scene.scale = (0.095, 0.095, 0.095)
#scene.up = (0, 0, pi)
cm=255.0
scene.background=(128/cm,128/cm,255/cm)
nfotos=0
def grava_quadro ():
global nfotos
#os.system ("import -window Rede rede%03d.jpg" % nfotos)
nfotos += 1
import time
time.sleep(1)
def f(x, y):
return (x/2)**2 + (y/2)**2
if __name__ == "__main__":
init_window()
inicio = -5.0
fim = 5.0
passo = 0.5
raio_fio = 0.02
cor_fio = (color.green[0] - 0.5, color.green[1] - 0.5, color.green[2] - 0.5)
for x in arange(inicio, fim, passo):
if x == inicio or x == fim-passo:
raio_fio *= 2
if x == inicio+passo:
raio_fio /= 2
c = curve(pos=(x,inicio,f(x,inicio)), color=cor_fio, radius=raio_fio)
for y in arange(inicio, fim, passo):
c.append(pos=(x,y,f(x,y)))
raio_fio /= 2
for y in arange(inicio, fim, passo):
if y == inicio or y == fim-passo:
raio_fio *= 2
if y == inicio+passo:
raio_fio /= 2
c = curve(pos=(inicio,y,f(inicio,y)), color=cor_fio, radius=raio_fio)
for x in arange(inicio, fim, passo):
c.append(pos=(x,y,f(x,y)))
peixe = PeixeXavante(tamanho=10)
peixe.rotate(axis=(1,0,0), angle=pi/2)
peixe.rotate(axis=(0,0,1), angle=pi/2)
peixe.move( (0,0,3.5) )
| true |
bfa447668e33d499712593a506d776c8abecb26a | Python | akshathamanju/Problems | /Trees/Binary tree/6. Two_Binary Trees are identical.py | UTF-8 | 1,570 | 4.34375 | 4 | [] | no_license | class Node:
def __init__(self, d):
self.data = d
self.left = None
self.right = None
# function to convert sorted array to a
# balanced BST
# input : sorted array of integers
# output: root node of balanced BST
def sortedArrayToBST(arr):
if not arr:
return None
# find middle
mid = (len(arr)) // 2
# make the middle element the root
root = Node(arr[mid])
# left subtree of root has all
# values <arr[mid]
root.left = sortedArrayToBST(arr[:mid])
# right subtree of root has all
# values >arr[mid]
root.right = sortedArrayToBST(arr[mid + 1:])
return root
def are_identical(root1, root2):
if root1 == None and root2 == None:
return True
if root1 != None and root2 != None:
return (root1.data == root2.data and
are_identical(root1.left, root2.left) and
are_identical(root1.right, root2.right))
return False
arr1 = [100, 50, 200, 25, 125, 350]
arr2 = [1, 2, 10, 50, 180, 199]
arr1.sort()
arr2.sort()
root1 = sortedArrayToBST(arr1)
root2 = sortedArrayToBST(arr2)
arr3 = [100, 50, 200, 25, 125, 350]
arr4 = [100, 50, 200, 25, 125, 350]
arr3.sort()
arr4.sort()
root3= sortedArrayToBST(arr3)
root4 = sortedArrayToBST(arr4)
if (are_identical(root1, root2)):
print("The trees are identical")
else:
print("The trees are not identical")
if (are_identical(root3, root4)):
print("The trees are identical")
else:
print("The trees are not identical") | true |
2066ebd83cd0b580ca5230585418997c10f11f47 | Python | GoldenSimba97/Alignment_in_Chatbots | /Measure_tests/test_formality.py | UTF-8 | 8,560 | 2.5625 | 3 | [] | no_license | # Need to download nltk before pos tagger can be used
# nltk.download()
# nltk.download('punkt')
# nltk.download('averaged_perceptron_tagger')
# nltk.download('maxent_treebank_pos_tagger')
import nltk
import pandas as pd
from heapq import nlargest
from heapq import nsmallest
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.pairwise import cosine_similarity
from collections import Counter
import gensim
from gensim.models import word2vec
import csv
import time
# F-score formula
# F score = (noun frequency + adjective freq + preposition freq
# + article freq - pronoun freq - verb freq - adverb freq
# - interjection freq + 100)/2
# Needed pos tags and example words for calculation of F-score
# NN Noun table
# NNS
# NNP
# NNPS
# JJ adjective Green
# JJR
# JJS
# IN preposition in, of, LIKE
# DT article, determiner the, a, an
# PRP pronoun I, he, it
# PRP$
# WP
# WP$
# VB verb are, going, like
# VBD
# VBG
# VBN
# VBP
# VBZ
# RB adverb completely, however, usually
# RBR
# RBS
# WRB
# UH interjection uhhuhhuhh
start_time = time.time()
# Used to create formal and informal word lists
# formal_file = open("FormalityLists/formal_seeds_100.txt", "r")
# formal = []
# for line in formal_file:
# line = line.replace("\r\n", "")
# line = line.replace("\t", "")
# formal.append(line)
#
# informal_file = open("FormalityLists/informal_seeds_100.txt", "r")
# informal = []
# for line in informal_file:
# line = line.replace("\r\n", "")
# line = line.replace("\t", "")
# informal.append(line)
#
# text_file = open("FormalityLists/CTRWpairsfull.txt", "r")
# informal.append("\n")
# formal.append("\n")
# for line in text_file:
# lines = line.split("/")
# informal.append(lines[0] + "\n")
# formal.append(lines[1])
#
# with open("FormalityLists/formal_list.txt","w") as output:
# for line in formal:
# output.write(line)
#
# with open("FormalityLists/informal_list.txt","w") as output:
# for line in informal:
# output.write(line)
# Open formal and informal word lists
formal = []
formal_file = open("FormalityLists/formal_list", "r")
for line in formal_file:
line = line.replace("\n", "")
formal.append(line)
informal = []
informal_file = open("FormalityLists/informal_list", "r")
for line in informal_file:
line = line.replace("\n", "")
informal.append(line)
# Open test file of annotated formality scores
test = pd.read_csv("fii_annotations/mturk_experiment_2.csv", sep=',', encoding = "ISO-8859-1")
test_formality = test["Formality"]
test_sentences = test["Sentence"]
mid_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
# Determine the total formality score of the input by computing the frequency of nouns, adjectives, prepositions
# articles, pronouns, verbs, adverbs and interjections. If a word of the input exists in the formal words list
# the formality score will be increased with 10% and if a word of the input exists in the informal words list
# the formality score will be decreased with 10%. The total formality score will be returned. The higher the score
# the more formal the input.
def determine_formality(sentence):
sentence = sentence.lower()
# POS tag the input sentence
text = nltk.word_tokenize(sentence)
s_len = float(len(text))
tagged = nltk.pos_tag(text)
NN_count = JJ_count = IN_count = DT_count = PRP_count = VB_count = RB_count = UH_count = 0
formality = 1
# Get the counts needed to determine frequencies for calculation of F-score.
# If punctuation is encountered, decrease the length of the sentence by 1.
for tag in tagged:
if tag[1] == "NN" or tag[1] == "NNS" or tag[1] == "NNP" or tag[1] == "NNS":
NN_count += 1
elif tag[1] == "JJ" or tag[1] == "JJR" or tag[1] == "JJS":
JJ_count += 1
elif tag[1] == "IN":
IN_count += 1
elif tag[1] == "DT":
DT_count += 1
elif tag[1] == "PRP" or tag[1] == "PRP$" or tag[1] == "WP" or tag[1] == "WP$":
PRP_count += 1
elif tag[1] == "VB" or tag[1] == "VBD" or tag[1] == "VBG" or tag[1] == "VBN" or tag[1] == "VBP" or tag[1] == "VBZ":
VB_count += 1
elif tag[1] == "RB" or tag[1] == "RBR" or tag[1] == "RBS" or tag[1] == "WRB":
RB_count += 1
elif tag[1] == "UH":
UH_count += 1
elif tag[1] == "." or tag[1] == ":" or tag[1] == "," or tag[1] == "(" or tag[1] == ")":
s_len -= 1
# Increase formality score if a formal word is encountered and decrease it if an informal word is encountered
for tag in tagged:
if tag[0] in formal:
formality *= 1.1
elif tag[0] in informal:
formality *= 0.9
return formality * f_score(NN_count/s_len*100, JJ_count/s_len*100, IN_count/s_len*100, DT_count/s_len*100,
PRP_count/s_len*100, VB_count/s_len*100, RB_count/s_len*100, UH_count/s_len*100)
# Calculation of the F score
def f_score(NN_freq, JJ_freq, IN_freq, DT_freq, PRP_freq, VB_freq, RB_freq, UH_freq):
return ((NN_freq + JJ_freq + IN_freq + DT_freq - PRP_freq - VB_freq - RB_freq - UH_freq + 100)/2)
# Calculate MSE and MAE to compare the true formality scores from mturk_experiment_2 and the calculated scores.
# Scale the calculated scores to the range from 1-7 used in mturk_experiment_2.
def test_formality_score():
formality_score = []
for test_sentence in test_sentences:
score = determine_formality(test_sentence)
new_score = ((score * 6) / 100) + 1
formality_score.append(round(new_score,1))
test["Formality_score"] = formality_score
return mean_squared_error(test_formality, formality_score), mean_absolute_error(test_formality, formality_score)
print(test_formality_score())
print("--- %s seconds ---" % (time.time() - mid_time))
print(determine_formality("Hi! My name is Kim and I am 21 years old. I work as a receptionist at a physical therapy firm. I hate doing dishes, because they are so very dirty. I love playing volleyball, reading, watching tv series and shopping. you cunt, why won't you tell me something. why would I do that."))
# Test sentences with comment formality score from mechanical turk if used from mturk_experiment_2.
# print(determine_formality("Just wipe the Mac OS X partition when u install the dapper.")) # 1.2
# print(determine_formality("Water sports and golf are abundant- and we have some of the greatest cycling in the world, we will be hosting the Ironman competition while you are here.")) # 3.2
# print(determine_formality("At the Shuttle Landing Facility at NASA's Kennedy Space Center in Florida, hardware that will be used in the launch of the Ares I-X rocket is offloaded from a C-5 aircraft.")) # 5.8
# #
# print(determine_formality("A few companies have decided to buck the trend by not offering any employment contracts.")) # 1.2
# print(determine_formality("A few of President Obama's top advisers, as well as one or two rare guests, sit down on the network sofas this Sunday.")) # 3.6
# print(determine_formality("Although the Chinese government has not taken action against Yuan or the publisher, a nongovernmental organization, the Chinese Assn. for the Promotion of Olympic Culture said last week it would file a civil lawsuit against Yuan's publisher, Beijing Fonghong Media Co., to prevent publication of any copies beyond the 200,000 in print in China.")) # 6.2
# print(determine_formality("And part of the subtext of the Afghanistan debate is that as a matter of bureaucratic warfare, it makes enormous sense for the currently ascendant COIN faction to try to press its advantages - to exaggerate the extent of what was achieved in Iraq in 2007, and to overstate the strategic significance of achieving some kind of comprehensive success in Afghanistan.")) # 6.4
# print(determine_formality("China will impose five-year anti-dumping tariffs, ranging from 5 percent to 35.4 percent, on imports of adipic acid from the United States, the European Union and the Republic of Korea, the Ministry of Commerce (MOC) said on Sunday.")) # 6.6
# print(determine_formality("CIT spokesman Curt Ritter declined to comment yesterday.")) # 6.8
# print(determine_formality("Thanks...Michael"))
| true |
722429bb18bd66ff9fa6a9d8931875500a5a19ac | Python | stepansergeevitch/legit_elections | /client.py | UTF-8 | 3,310 | 3.046875 | 3 | [] | no_license | import socket
from cryptosystem.encryption import Encryptor
class Client:
KEY_REQUEST = b"KEY\n"
DATA_REQUEST = b"DATA\n"
NAMES_REQUEST = b"NAMES\n"
SUCCESS = b"SUCCESS\n"
ERROR = b"ERROR\n"
def __init__(self, server_ip="127.0.0.1", server_port=9999):
self.server_ip = server_ip
self.server_port = server_port
self.client = None
self.pending_data_send = False
def connect(self):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.server_ip, self.server_port))
def close(self):
self.client.close()
self.client = None
def request_names(self):
self.connect()
assert self.pending_data_send == False
self.client.send(Client.NAMES_REQUEST)
response = self.client.recv(4096)
assert response.startswith(Client.NAMES_REQUEST)
self.client.recv(4096)
self.close()
return response[len(Client.NAMES_REQUEST):].decode("utf-8").split("\n")
def request_keys(self):
self.connect()
assert self.pending_data_send == False
self.client.send(Client.KEY_REQUEST)
response = self.client.recv(4096)
assert response.startswith(Client.KEY_REQUEST)
self.pending_data_send = True
return [int(k) for k in response[len(Client.KEY_REQUEST):].decode("utf-8").split("\n")]
def send_matrix(self, matrix):
# Should have pending connection here
assert self.pending_data_send == True
data = '\n'.join(
','.join(map(lambda x: str(x), row))
for row in matrix
).encode("utf-8")
self.client.send(Client.DATA_REQUEST + data)
response = self.client.recv(4096)
self.pending_data_send = False
self.client.recv(4096)
self.close()
return response.startswith(self.SUCCESS)
def prompt_voting(names):
print(f"For each candidate name please type you grade from 1 (best) to {len(names)} (worst)")
print("Each vote should be unique")
votes = []
valid = lambda v: v > 0 and v <= len(names)
for name in names:
vote = int(input(f"{name}: "))
while vote in votes or not valid(vote):
if vote in votes:
print(f"You've already given this grade to {names[votes.index(vote)]}, please try again")
else:
print(f"Vote must be in between 1 and {len(vote)} inclusively")
vote = int(input(f"{name}: "))
votes.append(vote)
return votes
def votes_to_matrix(votes):
return [
[1 if i + 1 == v else 0 for i in range(len(votes))]
for v in votes
]
def encrypt_matrix(matrix, encryptor):
return [
[encryptor.encrypt(it) for it in row]
for row in matrix
]
def run_votes():
client = Client()
names = client.request_names()
votes = prompt_voting(names)
keys = client.request_keys()
matrix = votes_to_matrix(votes)
encryptor = Encryptor(keys)
enc_matrix = encrypt_matrix(matrix, encryptor)
success = client.send_matrix(enc_matrix)
if success:
print("Your vote was sent successfully")
else:
print("There was error sending your vote")
if __name__ == "__main__":
run_votes()
| true |
c549bb66a30d2f859e594d39715a3103d959d15f | Python | ronknighton/DoctorApiOrm | /validation_helpers.py | UTF-8 | 3,622 | 2.578125 | 3 | [] | no_license | import re
from validate_email import validate_email
import uuid
import hashlib
def is_npi_good(code):
if code is None:
return False
if len(code) != 10 or not code.isdigit():
return False
else:
return True
def is_postal_code_good(code):
if code is None:
return False
if len(code) != 5 or not code.isdigit():
return False
else:
return True
def is_radius_good(radius):
if radius is None:
return False
if not radius.isdigit() or int(radius) > 50:
return False
else:
return True
def is_taxonomy_good(tax):
if tax is None:
return False
if len(tax) != 10:
return False
if tax != '':
first_three = tax[:3]
last_three = tax[-4:-1]
# three = int(last_three)
# print(three)
if not first_three.isdigit():
return False
elif not last_three.isdigit():
return False
else:
return True
def is_string_good(word, length=35):
if word is None:
return False
if len(word) > length:
return False
elif not re.match(r'^\w+$', word):
return False
else:
return True
def is_phrase_good(phrase, length=35):
if phrase is None:
return False
if len(phrase) > length:
return False
phrase_list = phrase.split(' ')
for word in phrase_list:
if not is_string_good(word):
return False
return True
def is_comment_good(comment):
if comment is None:
return False
if comment != '':
length = len(comment)
if length > 455:
return False
words = comment.split(' ')
for line in words:
line = line.replace(',', '')
line = line.replace('.', '')
line = line.replace('!', '')
if not re.match(r'^\w+$', line):
return False
return True
def filter_message(message, length=50):
message = message[:length]
filtered = ""
for line in message.split('\n'):
line = re.sub(r"[^a-zA-Z0-9]+", ' ', line)
filtered += line + '\n'
return filtered
def check_email(email):
if email is None:
return False
return validate_email(email)
def validate_password(password, length=15):
if len(password) > length:
return False
spaces = re.findall(' ', password)
if len(spaces) > 0:
return False
else:
return True
def hash_password(password):
# uuid is used to generate a random number
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
def check_password(hashed_password, user_password):
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
def is_uuid_good(code):
if code is None or code == '':
return False
code_list = code.split('-')
if len(code_list) != 5:
return False
else:
return True
def is_user_allowed_post_comment(user, now, time_span):
if not user['Verified']:
return False
if not user['LoggedIn']:
return False
login_time = user['LoginTime']
elapsed_time = now - login_time
# Allows 1 hour for posting/editing/deleting comments
hours = elapsed_time.seconds / 3600
if hours > time_span:
return False
return True
| true |
3ae6ca6a5ebdd9285d2c66787c74bcfe1c74e35f | Python | hitochan777/kata | /atcoder/abc200/D.py | UTF-8 | 510 | 2.75 | 3 | [] | no_license | from collections import defaultdict
N = int(input())
A = list(int(x) for x in input().split())
n = min(N, 8)
lists = defaultdict(list)
for i in range(1<<n):
total = 0
seq = []
for j in range(n):
if (i >> j) & 1 == 1:
seq.append(j+1)
total += A[j]
total %= 200
if len(lists[total]) > 0:
print("Yes")
print(len(lists[total][0]), *lists[total][0])
print(len(seq), *seq)
exit()
elif len(seq) > 0:
lists[total].append(seq)
print("No")
| true |
0dde86eddf7483de75595faf107151a0088e68e7 | Python | ThaisGuerini/Python | /Exercícios_aula_18.py | UTF-8 | 3,107 | 3.84375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 12:17:05 2018
@author: thais
"""
class MeuTempo0 :
# Métodos previamente definidos aqui ...
def __init__ ( self , hrs = 0 , mins = 0 , segs = 0 ):
""" Criar um novo objeto MeuTempo inicializado para hrs, min, segs.
Os valores de mins e segs podem estar fora do intervalo de 0-59,
mas o objecto MeuTempo resultante será normalizado. """
# Calcular total de segundos para representar
self.totalsegs = hrs * 3600 + mins * 60 + segs
self.horas = self.totalsegs // 3600 # Divisão em h, m, s
restosegs = self.totalsegs % 3600
self.minutos = restosegs // 60
self.segundos = restosegs % 60
if self.horas >=24:
self.horas = self.horas%24
def to_seconds ( self ):
"" "Retorna o número de segundos representados por esta instância " ""
return self.totalsegs
def __sub__ ( self , other ):
""" Retorna a soma do tempo atual e outro, para utilizar com o simbolo + """
return MeuTempo0 ( 0 , 0 , self.to_seconds() - other.to_seconds())
def __str__(self):
"""Retorna uma representação do objeto como string, legível para humanos."""
return '%.2d:%.2d:%.2d' % (self.horas, self.minutos, self.segundos)
a=MeuTempo0(1,2,3)
b=MeuTempo0(0,0,50)
print(a-b)
#Exercício 1
class MeuTempo :
# Métodos previamente definidos aqui ...
def __init__ ( self , hrs = 0 , mins = 0 , segs = 0 ):
""" Criar um novo objeto MeuTempo inicializado para hrs, min, segs.
Os valores de mins e segs podem estar fora do intervalo de 0-59,
mas o objecto MeuTempo resultante será normalizado. """
# Calcular total de segundos para representar
self.totalsegs = hrs * 3600 + mins * 60 + segs
self.horas = self.totalsegs // 3600 # Divisão em h, m, s
restosegs = self.totalsegs % 3600
self.minutos = restosegs // 60
self.segundos = restosegs % 60
if self.horas >=24:
self.horas = self.horas%24
def depois ( self , other ):
"" "Retorna True se self for estritamente maior que other" ""
return(self.totalsegs>other.totalsegs)
def antes ( self , other ):
"" "Retorna True se self for estritamente menor que other" ""
return(self.totalsegs<other.totalsegs)
def igual ( self , other ):
"" "Retorna True se self for estritamente maior que other" ""
return(self.totalsegs==other.totalsegs)
def entre(self,t1,t2):
if self.depois(t1) and self.antes(t2):
return True
if self.igual(t1) and self.antes(t2):
return True
else:
return False
t=MeuTempo(10,30,40)
q=t.entre(MeuTempo(10,20,11),MeuTempo(10,55,15))
print(q) | true |
7d65924010aaff4843383279b80f4d93f2bf26fd | Python | lht960/segmentation | /submit.py | UTF-8 | 3,863 | 2.59375 | 3 | [] | no_license | import csv
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
# % matplotlib inline
from scipy import ndimage
from skimage import morphology
from skimage.measure import regionprops, label
from inputs import _banish_darkness
def localization(x, y):
"""Simple post-processing and get IVDs positons.
Return:
positons: calculated by `ndimage.measurements.center_of_mass`
y: after fill holes and remove small objects.
"""
labels, nums = label(y, return_num=True)
areas = np.array([prop.filled_area for prop in regionprops(labels)])
assert nums >= 7, 'Fail in this test, should detect at least seven regions.'
# Segment a joint region which should be separate (if any).
while np.max(areas) > 10000:
y = ndimage.binary_opening(y, structure=np.ones((3, 3, 3)))
areas = np.array([prop.filled_area for prop in regionprops(label(y))])
# Remove small objects.
threshold = sorted(areas, reverse=True)[7]
y = morphology.remove_small_objects(y, threshold + 1)
# Fill holes.
y = ndimage.binary_closing(y, structure=np.ones((3, 3, 3)))
y = morphology.remove_small_holes(y, min_size=512, connectivity=3)
positions = ndimage.measurements.center_of_mass(x, label(y), range(1, 8))
return np.array(positions), y
def save_as_img(x, y, positions, savename):
"""Convert predicted `.npy` result to black-and-white image per slice.
Overlapped with Inputs gray image and positons for better visual effects.
Notice: positons **annotations** are drawn approximately at depth/2 in x-axis.
"""
for i in range(x.shape[0]):
fig, ax = plt.subplots()
ax.imshow(x[i], 'gray')
ax.imshow(y[i], 'jet', alpha=0.5)
if i == x.shape[0] // 2:
ax.plot(positions[:, 2], positions[:, 1], 'c+', ms=7)
plt.xticks([])
plt.yticks([])
plt.savefig(savename + str(i + 1) + '.png', bbox_inches='tight', dpi=x.shape[1])
# plt.show()
plt.close(fig)
def save_as_nii(y, savename):
y_nii = nib.Nifti1Image(y.astype(np.uint8), np.eye(4))
nib.save(y_nii, savename + '.nii')
def write_csv(positions, savename):
"""The 7 IVD centers in mm unit are stored from T11-T12 (the first one) to
L5-S1 (the last one) in CSV format."""
# Sort localizations.
positions = np.array(sorted(positions, key=lambda i: i[2], reverse=True))
# Convert to mm unit.
# The resolution of all images were resampled to 2 mm × 1.25 mm × 1.25 mm.
positions *= np.array([2, 1.25, 1.25])
csv_file = open(savename + '.csv', 'w+', newline='')
writer = csv.writer(csv_file)
writer.writerows(positions)
csv_file.close()
def submit():
"""Assume:
predicted file is stored at `./pred/`
Inputs file is stored at `./Test/`, say, 'Test_Subject01.nii'
"""
test_file_base = './data/Test/Test_Subject'
pred_file_base = './pred/vnet_'
for idx in range(10):
test_filename = test_file_base + str(idx + 1).zfill(2) + '.nii'
pred_filename = pred_file_base + str(idx) + '.npy'
x = nib.load(test_filename).get_data()
y_clipped = np.load(pred_filename)[0]
# Restore clipped results back to inputs size.
y = np.zeros_like(x, dtype=np.bool)
*_, top, bottom = _banish_darkness(x, y)
y[:, top: bottom] = y_clipped
# Localize predicted results and includs post-processing this step.
positions, y = localization(x, y)
# Save results.
savename = str(idx + 1).zfill(2)
write_csv(positions, './final_results/' + savename)
save_as_nii(y, './final_results/' + savename)
save_as_img(x, y, positions, './visualize/' + savename + '_')
print('Test Subject {} has done.'.format(idx))
if __name__ == '__main__':
submit()
| true |
88c8da318dd3cb048d23dfa7ad6c9d9e18ff22bf | Python | marcosdotps/dagda | /dagda/cli/command/monitor_cli_parser.py | UTF-8 | 2,269 | 2.796875 | 3 | [] | no_license | import argparse
import sys
from log.dagda_logger import DagdaLogger
class MonitorCLIParser:
# -- Public methods
# MonitorCLIParser Constructor
def __init__(self):
super(MonitorCLIParser, self).__init__()
self.parser = DagdaMonitorParser(prog='dagda.py monitor', usage=monitor_parser_text)
self.parser.add_argument('container_id', metavar='CONTAINER_ID', type=str)
self.parser.add_argument('--start', action='store_true')
self.parser.add_argument('--stop', action='store_true')
self.args, self.unknown = self.parser.parse_known_args(sys.argv[2:])
# Verify command line arguments
status = self.verify_args(self.args)
if status != 0:
exit(status)
# -- Getters
# Gets docker container id
def get_container_id(self):
return self.args.container_id
# Gets if start is requested
def is_start(self):
return self.args.start
# Gets if stop is requested
def is_stop(self):
return self.args.stop
# -- Static methods
# Verify command line arguments
@staticmethod
def verify_args(args):
if not args.start and not args.stop:
DagdaLogger.get_logger().error('Missing arguments.')
return 1
elif args.start and args.stop:
DagdaLogger.get_logger().error('Arguments --start & --stop: Both arguments can not be together.')
return 2
# Else
return 0
# Custom parser
class DagdaMonitorParser(argparse.ArgumentParser):
# Overrides the error method
def error(self, message):
self.print_usage()
exit(2)
# Overrides the format help method
def format_help(self):
return monitor_parser_text
# Custom text
monitor_parser_text = '''usage: dagda.py monitor [-h] CONTAINER_ID [--start] [--stop]
Your personal docker security monitor.
Positional Arguments:
CONTAINER_ID the input docker container id
Optional Arguments:
-h, --help show this help message and exit
--start start the monitoring over the container with
the input id
--stop stop the monitoring over the container with the
input id
''' | true |
80316490b15e60cd36efa363e50949e5fee1f0f8 | Python | ellinx/LC-python | /MinimumAreaRectangle.py | UTF-8 | 1,077 | 3.609375 | 4 | [] | no_license | """
Given a set of points in the xy-plane, determine the minimum area of a rectangle formed from these points,
with sides parallel to the x and y axes.
If there isn't any rectangle, return 0.
Example 1:
Input: [[1,1],[1,3],[3,1],[3,3],[2,2]]
Output: 4
Example 2:
Input: [[1,1],[1,3],[3,1],[3,3],[4,1],[4,3]]
Output: 2
Note:
1. 1 <= points.length <= 500
2. 0 <= points[i][0] <= 40000
3. 0 <= points[i][1] <= 40000
4. All points are distinct.
"""
class Solution:
def minAreaRect(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
n = len(points)
mm = collections.defaultdict(set)
for x,y in points:
mm[x].add(y)
ret = 0
for i in range(n-1):
x1,y1 = points[i]
for j in range(i+1,n):
x2, y2 = points[j]
if x1==x2 or y1==y2:
continue
if y1 in mm[x2] and y2 in mm[x1]:
ret = abs(x1-x2)*abs(y1-y2) if ret==0 else min(ret, abs(x1-x2)*abs(y1-y2))
return ret
| true |
d66ac2a437033a46ef630a06c4a779d285f33a48 | Python | MilesDavid/OKAS | /fib.py | UTF-8 | 934 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import decimal
import numpy as np
def fib(N):
if N <= 0:
return
elif N <= 2:
return 1
fn = fn1 = 1
for i in range(2, N):
tmp = fn1
fn1 += fn
fn = tmp
return fn1
def fib_matrix(N):
if N <= 0:
return
elif N <= 2:
return 1
"""
[[ Fn+1, Fn ], [[ 1, 1 ], ** n
Fn, Fn-1 ]] == 1, 0 ]]
"""
N -= 1
pows = set()
i = 0
while N > 1:
if N % 2:
pows.add(i)
N //= 2
i += 1
pows.add(i)
m1 = np.matrix([[1, 1], [1, 0]], np.dtype(decimal.Decimal))
factors = []
for i in pows:
m = m1.copy()
m = m ** (2 ** i)
factors.append(m)
if factors:
res_matrix = 1
for factor in factors:
res_matrix *= factor
return res_matrix[0, 0]
| true |
aa09b20d5ee9fe655d45cb278691c31e53e58e9d | Python | rmassoth/pokedex-plus | /tests/test_pokedex.py | UTF-8 | 972 | 3.140625 | 3 | [
"MIT"
] | permissive | from pokedex import pokedex
char = pokedex.get_pokemon('charmander')
bulb = pokedex.get_pokemon('bulbasaur')
squirt = pokedex.get_pokemon('squirtle')
pikachu = pokedex.get_pokemon('pikachu')
snover = pokedex.get_pokemon('snover')
my_pokemon = [char, bulb, squirt, pikachu]
def test_get_best_pokemon_mine_is_stronger():
assert pokedex.get_best_pokemon(char, bulb) == char
def test_get_best_pokemon_mine_is_weaker():
assert pokedex.get_best_pokemon(char, squirt) == squirt
def test_get_effectiveness_mine_is_stronger():
assert pokedex.get_effectiveness(char, bulb) == 2
def test_get_effectiveness_theirs_is_stronger():
assert pokedex.get_effectiveness(bulb, char) == 0.5
def test_get_effectiveness_equal():
assert pokedex.get_effectiveness(char, pikachu) == 1
def test_get_pokemon():
my_pokemon = pokedex.get_pokemon('charmander')
assert my_pokemon.name_ == 'charmander'
def test_effectiveness_dual_type():
assert pokedex.get_effectiveness(char, snover) == 4 | true |
8caa395aceb0426dc305c8764936d9d270328232 | Python | john-mpelkas/Simple-Perceptron | /Perceptron.py | UTF-8 | 709 | 3.1875 | 3 | [] | no_license | import numpy as np
import math
#Activation function
def sign(n):
if n >= 0:
return 1
else:
return -1
class Perceptron():
def __init__(self):
seed = [-1, 1]
self.weights = [np.random.choice(seed), np.random.choice(seed)]
self.lr = 0.25
# Perceptron Guess
def guess(self, inputs):
sum = 0
for i in range(len(self.weights)):
sum += inputs[i] * self.weights[i]
output = sign(sum)
return (output)
# Adjusting weights
def train(self, inputs, label):
guess = self.guess(inputs)
error = label - guess
for i in range(len(self.weights)):
self.weights[i] += error * inputs[i]
| true |
d8224e3a54f9c9bbb4599a04471211e999e17a5e | Python | s-good/AutoQC | /qctests/EN_increasing_depth_check.py | UTF-8 | 3,842 | 2.78125 | 3 | [
"MIT"
] | permissive | """
Implements the EN increasing depth check.
"""
from . import EN_spike_and_step_check
import numpy as np
from collections import Counter
import util.main as main
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
# Check if the QC of this profile was already done and if not
# run the QC.
query = 'SELECT en_increasing_depth_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(p.uid()) + ';'
qc_log = main.dbinteract(query, targetdb=parameters["db"])
qc_log = main.unpack_row(qc_log[0])
if qc_log[0] is not None:
return qc_log[0]
return run_qc(p, parameters)
def mask_index(mat, index):
"""
update comparison matrix by setting (index,j) and (i,index) to 0 for all i,j
corresponds to recomputing the matrix after qc[index] is set True.
"""
n = len(mat)
for i in range(n):
mat[index, i] = 0
mat[i, index] = 0
def run_qc(p, parameters):
# Get z values from the profile.
d = p.z()
mask = d.mask
n = p.n_levels()
# Initialize qc array.
qc = np.zeros(n, dtype=bool)
# Basic check on each level.
qc[d < 0] = True
qc[d > 11000] = True
# don't perform more sophisticated tests for single-level profiles
if n == 1:
return qc
# if all the depths are the same, flag all levels and finish immediately
most_common_depth = Counter(d.data).most_common(1)
if most_common_depth[0][1] == len(d.data):
qc = np.ones(n, dtype=bool)
uid = p.uid()
return qc
# initialize matrix
# Comp gets set to 1 if there is not an increase in depth.
rows = []
for i in range(n):
# generate ith row
row = d[i] < d
# invert logic for columns gt row
row = np.concatenate([row[0:i], ~row[i:]])
rows.append(row)
comp = np.vstack(rows)
# enforce initial qc, masks:
qcs = [i for i,q in enumerate(qc) if q]
masks = [i for i,m in enumerate(mask) if m]
for m in list(set(qcs+masks)):
mask_index(comp, m)
# enforce diagonal
for i in range(n):
comp[i,i] = 0
comp.astype(int)
# Now check for inconsistencies in the depth levels.
currentMax = 1
while currentMax > 0:
# Check if comp was set to 1 anywhere and which level was
# most inconsistent with the others.
currentMax = 0
currentLev = -1
otherLev = -1
for i in range(n):
lineSum = np.sum(comp[:, i])
if lineSum >= currentMax:
currentMax = lineSum
currentLev = i
# Reject immediately if more than one inconsistency or
# investigate further if one inconsistency.
if currentMax > 1:
qc[currentLev] = True
elif currentMax == 1:
# Find out which level it is inconsistent with.
for i in range(n):
if comp[i, currentLev] == 1: otherLev = i
# Check if one was rejected by the spike and step
# check, otherwise reject both.
try:
spikeqc
except:
spikeqc = EN_spike_and_step_check.test(p, parameters)
if spikeqc[currentLev]: qc[currentLev] = True
if spikeqc[otherLev]: qc[otherLev] = True
if spikeqc[currentLev] == False and spikeqc[otherLev] == False:
qc[currentLev] = True
qc[otherLev] = True
# update comp matrix:
if currentLev > -1 and qc[currentLev]:
mask_index(comp, currentLev)
if otherLev > -1 and qc[otherLev]:
mask_index(comp, otherLev)
return qc
| true |
377908bbf9d8fd4685895dc5ad110e04423f707c | Python | jacobaek/whoisjacobaek | /hw1_1.py | UTF-8 | 245 | 3.53125 | 4 | [] | no_license | def if_function(a,b,c):
if(a==True):
return b
else:
return c
print(if_function(True, 2, 3))
print(if_function(False, 2, 3))
print(if_function(3==2, 3+2, 3-2))
print(if_function(3>2, 3+2, 3-2) ) | true |
872e6bf895f539baa15ad850a6c495543498c38f | Python | endy-imam/advent-of-code-2020 | /day01/day01.py | UTF-8 | 898 | 3.203125 | 3 | [] | no_license | import os
from utils import get_data, run, map_list
# INPUT SECTION
DIR_ROOT = os.path.dirname(__file__)
puzzle_input = map_list(int, get_data(DIR_ROOT).split())
# GLOBAL VALUES
SUM_TO_FIND = 2020
# MAIN FUNCTIONS
def part_one():
memo = set()
for num in puzzle_input:
num_to_find = SUM_TO_FIND - num
if num_to_find in memo:
return num_to_find * num
memo.add(num)
def part_two():
nums = sorted(puzzle_input)
for i, num in enumerate(nums[:-2]):
j, k = i+1, len(nums)-1
while j < k:
total_sum = num + nums[j] + nums[k]
if total_sum == SUM_TO_FIND:
return num * nums[j] * nums[k]
elif total_sum > SUM_TO_FIND:
k -= 1
elif total_sum < SUM_TO_FIND:
j += 1
# RUNNING FUNCTION
if __name__ == "__main__":
run(part_one, part_two)
| true |
e8ebd7ae414f6fbf9325bc48bc5a6e1859c188a1 | Python | jonpemby/jobbr | /src/searcher.py | UTF-8 | 2,583 | 2.609375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | import requests
from urllib.parse import quote_plus
from threading import Thread
from src.exceptions import MissingApiKeyError, MissingCxError, NoResultsError, ResponseError
from src.post import Post
class Searcher(Thread):
def __init__(self, query, params={}):
super().__init__()
self.current_query = ''
self.set_query(query)
self.index = 1
self.params = params
self.results = []
def get_query(self):
return self.current_query
def set_query(self, query):
self.current_query = quote_plus(query)
def get_params(self):
return self.params
def get_param(self, param):
return self.params[param]
def get_cx(self):
try:
return self.get_param('cx')
except ValueError:
raise MissingCxError("You need a custom search engine ID for this to work")
def get_api_key(self):
try:
return self.get_param('api-key')
except ValueError:
raise MissingApiKeyError("You need a Google API key for this to work")
def get_queries(self):
try:
return self.get_param('queries')
except ValueError:
return 1
def run(self):
for i in range(self.get_queries()):
response = self.send_query()
results = response.json()
if 'error' in results:
raise ResponseError(results['error'])
self.push_results(results)
if i < self.get_queries() and 'nextPage' in results['queries']:
self.set_page(results['queries']['nextPage'][0]['startIndex'])
return self.get_results()
def get_results(self):
return self.results
def send_query(self):
return requests.get("https://www.googleapis.com/customsearch/v1?q={}&key={}&cx={}&startIndex={}&dateRestrict={}"
.format(self.get_query(),
self.get_api_key(),
self.get_cx(),
self.get_page(),
self.get_date_range()))
def push_results(self, results):
if 'items' not in results:
raise NoResultsError("No results found for {}".format(self.get_query()))
for item in results['items']:
self.results.append(Post(item))
def set_page(self, index):
self.index = index
def get_page(self):
return self.index
def get_date_range(self):
return self.get_param('date-range')
| true |
acd0859cada3969d18c9c1f1dc20d83bc4fc9c61 | Python | mdmshf/codechef | /python/malvika.py | UTF-8 | 116 | 2.71875 | 3 | [] | no_license | for _ in range(int(input())):
n,m=input().split()
n,m=(int(n),int(m))
s=(n-1)+(m-1)*2
print(s)
| true |
704b730e30a0115ab2e79d74b37a5efe92fc7d7b | Python | johnbrussell/gtfs-traversal | /gtfs_traversal/data_munger.py | UTF-8 | 18,038 | 2.671875 | 3 | [] | no_license | from datetime import datetime, timedelta
class DataMunger:
def __init__(self, end_date, route_types_to_solve, stops_to_solve, data, stop_join_string):
self.data = data
self.stop_join_string = stop_join_string
self._buffered_analysis_end_time = None
self._end_date = end_date
self._location_routes = None
self._minimum_stop_times = None
self._route_list = None
self._route_types_to_solve = route_types_to_solve
self._stops_by_route_in_solution_set = None
self._transfer_stops = None
self._trip_time_cache = {}
self._unique_routes_to_solve = None
self._unique_stops_to_solve = stops_to_solve
def first_trip_after(self, earliest_departure_time, route_number, origin_stop_id):
# hmmm, what is earliest_departure_time, and what if it's after midnight toward the end of the service day?
# handle case where the origin stop is the last stop on the route
if self.is_last_stop_on_route(origin_stop_id, route_number):
return None, None
# GTFS uses days longer than 24 hours, so need to add a buffer to the end date to allow 25+ hour trips
latest_departure_time = self.get_buffered_analysis_end_time()
origin_stop_number = self.get_stop_number_from_stop_id(origin_stop_id, route_number)
solution_trip_id = None
for trip_id in self.get_trips_for_route(route_number):
raw_departure_time = self.get_stops_for_trip(trip_id)[origin_stop_number].departureTime
if (earliest_departure_time, raw_departure_time) in self._trip_time_cache:
time = self._trip_time_cache[(earliest_departure_time, raw_departure_time)]
else:
# Currently, this function does not work on routes that visit one stop multiple times in a trip.
# To fix, can pass the origin_stop_number to the function, instead of origin_stop_id
date_at_midnight = datetime(year=earliest_departure_time.year, month=earliest_departure_time.month,
day=earliest_departure_time.day)
time = self.get_datetime_from_raw_string_time(date_at_midnight, raw_departure_time)
self._trip_time_cache[(earliest_departure_time, raw_departure_time)] = time
if earliest_departure_time <= time < latest_departure_time:
latest_departure_time = time
solution_trip_id = trip_id
if solution_trip_id is None:
return None, None
return latest_departure_time, solution_trip_id
def get_all_stop_coordinates(self):
return self.data.stopLocations
def get_buffered_analysis_end_time(self):
if self._buffered_analysis_end_time is None:
self._buffered_analysis_end_time = datetime.strptime(self._end_date, '%Y-%m-%d') + timedelta(days=1)
return self._buffered_analysis_end_time
def get_datetime_from_raw_string_time(self, date_at_midnight, time_string):
return date_at_midnight + timedelta(seconds=self.convert_to_seconds_since_midnight(time_string))
def get_minimum_stop_times(self, start_time):
if self._minimum_stop_times is not None:
return self._minimum_stop_times
minimum_stop_times = {}
# minimum_stop_times is a dictionary where keys are stops and values are half of the minimum amount of time
# required to travel either to or from that stop from another solution stop
for stop in self.get_unique_stops_to_solve():
routes_at_stop = self.get_routes_at_stop(stop)
for route in routes_at_stop:
if route not in self.get_unique_routes_to_solve():
continue
# Currently, this function does not support the situation where one trip visits the same stop
# multiple times.
# Currently, this function assumes that the first trip of the day along each route is the fastest.
best_departure_time, best_trip_id = self.first_trip_after(start_time, route, stop)
if best_trip_id is None:
continue
stop_number = self.get_stop_number_from_stop_id(stop, route)
next_stop_number = str(int(stop_number) + 1)
if next_stop_number not in self.get_stops_for_route(route):
continue
stops_on_route = self.get_stops_for_route(route)
next_stop = stops_on_route[next_stop_number].stopId
travel_time_to_next_stop = self.get_travel_time_between_stops_in_seconds(
best_trip_id, stop_number, next_stop_number)
if next_stop not in minimum_stop_times:
minimum_stop_times[next_stop] = 24 * 60 * 60
if stop not in minimum_stop_times:
minimum_stop_times[stop] = 24 * 60 * 60
minimum_stop_times[next_stop] = min(minimum_stop_times[next_stop], travel_time_to_next_stop / 2)
minimum_stop_times[stop] = min(minimum_stop_times[stop], travel_time_to_next_stop / 2)
self._minimum_stop_times = minimum_stop_times
return self._minimum_stop_times
def get_minimum_remaining_time(self, unvisited_stops, start_time):
total_minimum_remaining_time = 0
for stop in unvisited_stops:
routes_at_stop = self.get_routes_at_stop(stop)
best_time_at_stop = 24 * 60 * 60
for route in routes_at_stop:
if route not in self.get_unique_routes_to_solve():
continue
if self.is_last_stop_on_route(stop, route):
stop_number = self.get_stop_number_from_stop_id(stop, route)
previous_stop_number = str(int(stop_number) - 1)
stops_on_route = self.get_stops_for_route(route)
previous_stop = stops_on_route[previous_stop_number].stopId
best_departure_time, best_trip_id = self.first_trip_after(start_time, route, previous_stop)
else:
best_departure_time, best_trip_id = self.first_trip_after(start_time, route, stop)
if best_trip_id is None:
continue
stop_number = self.get_stop_number_from_stop_id(stop, route)
next_stop_number = str(int(stop_number) + 1)
previous_stop_number = str(int(stop_number) - 1)
stops_on_route = self.get_stops_for_route(route)
if next_stop_number in self.get_stops_for_route(route):
travel_time_to_next_stop = self.get_travel_time_between_stops_in_seconds(
best_trip_id, stop_number, next_stop_number)
if stops_on_route[next_stop_number].stopId in unvisited_stops:
best_time_at_stop = min(best_time_at_stop, travel_time_to_next_stop / 2)
else:
best_time_at_stop = min(best_time_at_stop, travel_time_to_next_stop)
if previous_stop_number in self.get_stops_for_route(route):
travel_time_from_previous_stop = self.get_travel_time_between_stops_in_seconds(
best_trip_id, previous_stop_number, stop_number)
if stops_on_route[previous_stop_number].stopId in unvisited_stops:
best_time_at_stop = min(best_time_at_stop, travel_time_from_previous_stop / 2)
else:
best_time_at_stop = min(best_time_at_stop, travel_time_from_previous_stop)
total_minimum_remaining_time += best_time_at_stop
return total_minimum_remaining_time
def get_minimum_remaining_transfers(self, current_route, unvisited_stops):
minimum_remaining_transfers = 0
routes_accounted_for = set()
for stop in unvisited_stops:
routes_at_stop = self.get_routes_at_stop(stop)
solution_routes_at_stop = [s for s in routes_at_stop if s in self.get_unique_routes_to_solve()]
if len(solution_routes_at_stop) > 1:
continue
route = solution_routes_at_stop[0]
if route in routes_accounted_for:
continue
minimum_remaining_transfers += 1
routes_accounted_for.add(route)
if current_route in routes_accounted_for:
minimum_remaining_transfers -= 1
return max(0, minimum_remaining_transfers)
def get_next_stop_id(self, stop_id, route):
if self.is_last_stop_on_route(stop_id, route):
return None
stop_number = self.get_stop_number_from_stop_id(stop_id, route)
next_stop_number = str(int(stop_number) + 1)
stops_on_route = self.get_stops_for_route(route)
return stops_on_route[next_stop_number].stopId
def get_off_course_stop_locations(self):
return {s: l for s, l in self.get_all_stop_coordinates().items() if s not in self.get_unique_stops_to_solve()}
def get_route_trips(self):
return self.data.uniqueRouteTrips
def get_route_types_to_solve(self):
return [str(r) for r in self._route_types_to_solve]
def get_route_list(self):
if self._route_list is None:
self._route_list = [route_id for route_id, route in self.data.uniqueRouteTrips.items()]
return self._route_list
def get_routes_at_stop(self, stop_id):
return self.get_routes_by_stop()[stop_id]
def get_routes_by_stop(self):
if self._location_routes is not None:
return self._location_routes
location_routes = {}
for route_id, info in self.get_route_trips().items():
trip_id = info.tripIds[0]
stops = self.get_trip_schedules()[trip_id].tripStops
for stop, stop_info in stops.items():
if stop_info.stopId not in location_routes:
location_routes[stop_info.stopId] = set()
location_routes[stop_info.stopId].add(route_id)
self._location_routes = location_routes
return location_routes
def get_solution_routes_at_stop(self, stop_id):
routes_at_stop = self.get_routes_at_stop(stop_id)
return {route for route in routes_at_stop if route in self.get_unique_routes_to_solve()}
def get_stop_locations_to_solve(self):
return {s: l for s, l in self.get_all_stop_coordinates().items() if s in self.get_unique_stops_to_solve()}
def get_stop_number_from_stop_id(self, stop_id, route_id):
stops_on_route = self.get_stops_for_route(route_id)
for stop_number, stop_departure_namedtuple in stops_on_route.items():
if stop_departure_namedtuple.stopId == stop_id:
return stop_number
raise ValueError("route_id and origin_stop_id mismatch")
def get_stops_at_ends_of_solution_routes(self):
stops_at_ends_of_solution_routes = set()
for r in self.get_unique_routes_to_solve():
trip_stops = self.get_stops_for_route(r)
stops_at_ends_of_solution_routes.add(trip_stops['1'].stopId)
stops_at_ends_of_solution_routes.add(trip_stops[str(len(trip_stops))].stopId)
return stops_at_ends_of_solution_routes
def get_stops_by_route_in_solution_set(self):
if self._stops_by_route_in_solution_set is not None:
return self._stops_by_route_in_solution_set
route_stops = {}
for stop in self.get_unique_stops_to_solve():
for route in self.get_routes_at_stop(stop):
if route not in self.get_unique_routes_to_solve():
continue
if route not in route_stops:
route_stops[route] = set()
route_stops[route].add(stop)
self._stops_by_route_in_solution_set = route_stops
return self._stops_by_route_in_solution_set
def get_stops_for_route(self, route_id):
return self.get_stops_for_trip(self.get_trips_for_route(route_id)[0])
def get_stops_for_trip(self, trip_id):
return self.get_trip_schedules()[trip_id].tripStops
def get_total_minimum_time(self, start_time):
total_minimum_time = 0
for v in self.get_minimum_stop_times(start_time).values():
total_minimum_time += v
return total_minimum_time
def get_transfer_stops(self, start_time):
if self._transfer_stops is not None:
return self._transfer_stops
transfer_stops = set()
adjacent_stops = {}
arrival_adjacent_stops = {}
endpoint_stops = set()
for stop in self.get_unique_stops_to_solve():
routes_at_stop = self.get_solution_routes_at_stop(stop)
for route in routes_at_stop:
stop_number = self.get_stop_number_from_stop_id(stop, route)
if stop_number == '1':
endpoint_stops.add(stop)
best_departure_time, best_trip_id = self.first_trip_after(start_time, route, stop)
if best_trip_id is None:
endpoint_stops.add(stop)
continue
next_stop_number = str(int(stop_number) + 1)
stops_on_route = self.get_stops_for_route(route)
next_stop = stops_on_route[next_stop_number].stopId
if stop not in adjacent_stops:
adjacent_stops[stop] = set()
if next_stop not in arrival_adjacent_stops:
arrival_adjacent_stops[next_stop] = set()
adjacent_stops[stop].add(next_stop)
arrival_adjacent_stops[next_stop].add(stop)
for stop in self.get_unique_stops_to_solve():
if stop in adjacent_stops and len(adjacent_stops[stop]) >= 3:
transfer_stops.add(stop)
if stop in arrival_adjacent_stops and len(arrival_adjacent_stops[stop]) >= 3:
transfer_stops.add(stop)
if stop in adjacent_stops and len(adjacent_stops[stop]) >= 2 and stop in endpoint_stops:
transfer_stops.add(stop)
if stop in arrival_adjacent_stops and len(arrival_adjacent_stops[stop]) >= 2 and stop in endpoint_stops:
transfer_stops.add(stop)
if stop not in adjacent_stops:
pass
elif any(adjacent_stop not in arrival_adjacent_stops
for adjacent_stop in adjacent_stops[stop]) and len(self.get_routes_at_stop(stop)) >= 2:
transfer_stops.add(stop)
elif stop not in arrival_adjacent_stops:
pass
elif any(adjacent_stop not in arrival_adjacent_stops[stop]
for adjacent_stop in adjacent_stops[stop]) and len(self.get_routes_at_stop(stop)) >= 2:
transfer_stops.add(stop)
if stop not in arrival_adjacent_stops:
pass
elif any(arrival_adjacent_stop not in adjacent_stops
for arrival_adjacent_stop in arrival_adjacent_stops[stop]) and \
len(self.get_routes_at_stop(stop)) >= 2:
transfer_stops.add(stop)
elif stop not in adjacent_stops:
pass
elif any(arrival_adjacent_stop not in adjacent_stops[stop]
for arrival_adjacent_stop in arrival_adjacent_stops[stop]) and \
len(self.get_routes_at_stop(stop)) >= 2:
transfer_stops.add(stop)
self._transfer_stops = transfer_stops
return self._transfer_stops
def get_travel_time_between_stops_in_seconds(self, trip, on_stop_number, off_stop_number):
assert float(off_stop_number) >= float(on_stop_number), 'cannot travel backwards along trip'
trip_stops = self.get_stops_for_trip(trip)
on_time_raw = trip_stops[on_stop_number].departureTime
on_time_seconds_since_midnight = self.convert_to_seconds_since_midnight(on_time_raw)
off_time_raw = trip_stops[off_stop_number].departureTime
off_time_seconds_since_midnight = self.convert_to_seconds_since_midnight(off_time_raw)
return off_time_seconds_since_midnight - on_time_seconds_since_midnight
@staticmethod
def convert_to_seconds_since_midnight(raw_time_string):
hours, minutes, seconds = raw_time_string.split(':')
return 3600 * float(hours) + 60 * float(minutes) + float(seconds)
def get_trip_schedules(self):
return self.data.tripSchedules
def get_trips_for_route(self, route_id):
return self.get_route_trips()[route_id].tripIds
def get_unique_routes_to_solve(self):
if self._unique_routes_to_solve is not None:
return self._unique_routes_to_solve
self._unique_routes_to_solve = {route_id for route_id, route in self.data.uniqueRouteTrips.items() if
str(route.routeInfo.routeType) in self.get_route_types_to_solve()}
return self._unique_routes_to_solve
def get_unique_stops_to_solve(self):
if self._unique_stops_to_solve is not None:
return self._unique_stops_to_solve
unique_stops_to_solve = set()
for r in self.get_unique_routes_to_solve():
trip_id = self.get_route_trips()[r].tripIds[0]
trip_stops = self.get_trip_schedules()[trip_id].tripStops
for stop in trip_stops.values():
unique_stops_to_solve.add(stop.stopId)
self._unique_stops_to_solve = unique_stops_to_solve
return unique_stops_to_solve
def is_last_stop_on_route(self, stop_id, route):
stop_number = self.get_stop_number_from_stop_id(stop_id, route)
return str(int(stop_number) + 1) not in self.get_stops_for_route(route)
def is_solution_route(self, route_id):
return route_id in self.get_unique_routes_to_solve()
| true |
8ba1af90b1c4a9f30b947a5ae32948052aec406a | Python | Chaeguevara/21_1-SNU | /ManufactureAI/Hw2/xorEtoE.py | UTF-8 | 1,357 | 3.296875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/(1+np.exp(-x))
def back_prop_w1(g, y, x1):
return (-2)*(g-y)*y*(1-y)*x1
def back_prop_w2(g, y, x2):
return (-2)*(g-y)*y*(1-y)*x2
def back_prop_theta(g, y):
return 2*(g-y)*y*(1-y)
def feedforward(x1, x2, w1, w2, theta):
return sigmoid(x1*w1 + x2*w2 - theta)
w1 = 0.1
w2 = 0.1
theta = 0.5
lr = 0.1
epoch = 200
wb1 = list()
wb2 = list()
thetab = list()
for i in range(epoch):
if i % 4 == 0:
x1 = 0
x2 = 0
g = 0
elif i % 4 == 1:
x1 = 1
x2 = 0
g = 0
elif i % 4 == 2:
x1 = 0
x2 = 1
g = 0
elif i % 4 == 3:
x1 = 1
x2 = 1
g = 1
y = feedforward(x1, x2, w1, w2, theta)
w1 = w1 - lr*back_prop_w1(g, y, x1)
w2 = w2 - lr*back_prop_w2(g, y, x2)
theta = theta - lr*back_prop_theta(g, y)
print("y\t:", y, "g\t:", g, "g-y\t:", g-y, "x1\t:", x1, "x2\t:", x2)
print("w1\t:", w1, "w2\t:", w2, "theta\t:", theta)
print("-------------------------------------------------")
wb1.append(w1)
wb2.append(w2)
thetab.append(theta)
x = np.arange(1, epoch, 1)
print(x)
plt.figure(1)
plt.plot(x, wb1[1:epoch], 'k')
plt.figure(2)
plt.plot(x, wb2[1:epoch], 'k')
# plt.figure(3)
# plt.plot(x, thetab[1:epoch], 'k')
plt.show() | true |
3e1c626fa6e158c5cd0c6272a8126b7e3caa46fd | Python | eartheekapat/UNSW_ALGO_2 | /asst/2/q_4.py | UTF-8 | 117 | 3 | 3 | [] | no_license | A = [14, 1, 1, 1, 1, 1]
n = len(A)
left = n*(n-1)/2
right = sum(A)
print(left <= right)
print(left)
print(right)
| true |
8672894bd19dcc176d26377c2996aa508ee10254 | Python | biolab/baylor_dicty_paper | /PC1vsTime_plots.py | UTF-8 | 30,611 | 2.53125 | 3 | [] | no_license | print('Preparing PC1 vs time plots.')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing as pp
from sklearn.decomposition import PCA
from sklearn.model_selection import LeaveOneOut
import itertools
import matplotlib
from collections import defaultdict
import random
import matplotlib.patches as mpatches
from matplotlib import rcParams
import os
import pygam
from helper import save_pickle, GROUPS, STAGES, PATH_RESULTS, PATH_DATA
# ***********************
# **** Helper functions
class CustomScaler:
def __init__(self, reference: np.ndarray):
"""
:param reference: np.ndarray or pandas DF on which to fit scaler
"""
if isinstance(reference, pd.DataFrame):
reference = reference.values
self.reference = reference
self.scalers = {}
def transform(self, data: np.ndarray, log, scale: str):
"""
:param data: Data to be scaled. np.ndarray od pandas DF.
:param log: log2(data+1)
:param scale: 'minmax','m0s1','divide_mean'
:return: Scaled data
"""
if not (log, scale) in self.scalers.keys():
scaler = None
ref = self.reference.copy()
if log:
ref = np.log2(ref + 1)
if scale == 'minmax':
scaler = pp.MinMaxScaler()
scaler.fit(ref)
elif scale == 'm0s1':
scaler = pp.StandardScaler()
scaler.fit(ref)
elif scale == 'divide_mean':
scaler = ref.mean(axis=0)
self.scalers[(log, scale)] = scaler
scaler = self.scalers[(log, scale)]
scaled = None
if isinstance(data, pd.DataFrame):
data = data.values
if log:
data = np.log2(data + 1)
if scale in ['minmax', 'm0s1']:
scaled = scaler.transform(data)
elif scale == 'divide_mean':
scaled = data / scaler
return scaled
def get_dimredplot_param(data, col, default, to_mode=False):
"""
Based on data DF (information for single/multiple points) find series or mode of the parameter or use default
if there is no column for the parameter.
:param data: DF with data
:param col: Column for which to extract the value
:param default: Default to use if column is absent for the data.
:param to_mode: Convert result to mode insetad of returning extracted series.
:return: Extracted data as column or mode.
"""
if isinstance(data, pd.DataFrame):
if col in data.columns:
result = data[col]
if to_mode:
result = result.mode()[0]
return result
else:
return default
elif isinstance(data, pd.Series):
if col in data.index:
return data[col]
else:
return default
else:
return default
# Jitter function
def rand_jitter(n, min, max, strength=0.005):
"""
Number is jittered based on: n + randomN * (max-min) * stength, where -1 <= randomN <= 1
:param n: Number to jitter
:param min: Used to determine the size of the jittering
:param max: Used to determine the size of the jittering
:param strength: Larger strength leads to stronger jitter. Makes sense to be below 1 (adds random number scaled by
max-min and strength.
:return: New number.
"""
dev = (max - min) * strength
return n + random.uniform(-1, 1) * dev
# Colours for plotting
COLOURS_GROUP = {'agg-': '#d40808', 'lag_dis': '#e68209', 'tag_dis': '#ffb13d', 'tag': '#d1b30a', 'cud': '#4eb314',
'WT': '#0fa3ab', 'sFB': '#525252', 'prec': '#7010b0'}
COLOURS_STAGE = {'NA': '#d9d9d9', 'no_agg': '#ed1c24', 'stream': '#985006',
'lag': '#f97402', 'tag': '#d9d800', 'tip': '#66cf00', 'slug': '#008629', 'mhat': '#00c58f',
'cul': '#0ff2ff', 'FB': '#00b2ff', 'yem': '#666666'}
def dim_reduction_plot(plot_data: pd.DataFrame(), plot_by: str, fig_ax: tuple, order_column, colour_by_phenotype=False,
add_name=True, colours: dict = COLOURS_GROUP, colours_stage: dict = COLOURS_STAGE,
legend_groups='lower left', legend_phenotypes='upper right', fontsize=6,
plot_order: list = None, plot_points: bool = True, add_avg: bool = False, add_sem: bool = False,
sem_alpha: float = 0.1, alternative_lines: dict = None, sep_text: tuple = (30, 30),
phenotypes_list: list = STAGES, plot_lines: bool = True, jitter_all: bool = False,
point_alpha=0.5, point_size=5, jitter_strength: tuple = (0.005, 0.005)):
"""
Plots PC1 vs time of strains, phenotype groups, and developmental stages.
For plotting parameters that are not for individual points (e.g. line width, alpha)
uses mode when plotting lines and legend.
:param plot_data: Data of individual 'points'. Must have columns: 'x','y', 'Group' (for colouring),
order_column (how to order points in line),
and a column matching the plot_by parameter (for line plotting and names).
Can have 'size' (point size - default param point_size), 'width' (line width),
'alpha' (for plotting - default for points is point_alpha), 'linestyle', 'shape' (point shape),
and phenotypes columns (matching phenotypes_list, valued 0 (absent) or 1 (present)).
:param plot_by: Plot lines and text annotation based on this column.
:param fig_ax: Tuple (fig,ax) with plt elements used for plotting
:param order_column: Order of plotting of groups from plot_by, first is plotted first.
:param colour_by_phenotype: Whether to colours samples by phenotype. If false colour by 'Group' colour.
:param add_name: Add text with name from plot_by groups.
:param colours: Colours for 'Group'. Key: 'Group' value, value: colour.
:param colours_stage: Colours for plotting stages, used if colour_by_phenotype=True. Dict with keys: from
phenotypes_list and value: colour.
:param legend_groups: Position for Group legend, if None do not plot
:param legend_phenotypes: Position for stages legend, if None do not plot
:param fontsize: Fontsize for annotation.
:param plot_order: Plot lines and SEMs in this order. Matching groups from plot_by.
:param plot_points: Whether to plot points.
:param add_avg: Average points with same x value (used for plotting lines and text positioning).
:param add_sem: Plot SEM zones.
:param sem_alpha: Alpha for SEM zone.
:param phenotypes_list: List of phenotypes used to find stage columns in plot_data
:param alternative_lines: Plot different lines than based on data points from plot_data.
Dict with keys being groups obtained by plot_by and values tuple of lists: ([xs],[ys]). Use this also for
text annotation.
:param plot_lines: Whether to plot lines. Lines are plotted between points (rows) in plot_data, ordered by
order_column
:param jitter_all: Jitter all points. Else jitter only when multiple stages are annotated to same point, not
jittering the first stage.
:param point_alpha: Default alpha for points used if alpha column is absent
:param point_size: Default size for points used if size column is absent
:param jitter_strength: Tuple (strength_x, strength_y) used to jitter points. Use floats << 1 - based on data range.
Higher uses more jittering.
:param sep_text: Separate text annotations so that they do not overlap. Smaller number increases the separation
of text annotations. Tuple with (x,y), where x,y denote values for x and y axis.
"""
# Sort data in order to be plotted
if plot_order is not None:
plot_data = plot_data.loc[
plot_data[plot_by].map(dict(zip(plot_order, range(len(plot_order))))).sort_values().index]
else:
plot_order = plot_data[plot_by].unique()
fig, ax = fig_ax
# Plot data points
if plot_points:
# Either add one point per measurment (coloured by group) or multiple jitter points coloured by phenotypes
if not colour_by_phenotype:
for row_name, point in plot_data.iterrows():
ax.scatter(point['x'], point['y'], s=get_dimredplot_param(point, 'size', point_size),
c=colours[point['Group']], alpha=get_dimredplot_param(point, 'alpha', point_alpha, True),
marker=get_dimredplot_param(point, 'shape', 'o', True))
# By phenotypes
else:
min_x = plot_data['x'].min()
min_y = plot_data['y'].min()
max_x = plot_data['x'].max()
max_y = plot_data['x'].max()
for point in plot_data.iterrows():
point = point[1]
phenotypes = point[phenotypes_list]
x = point['x']
y = point['y']
if jitter_all:
x = rand_jitter(n=x, min=min_x, max=max_x, strength=jitter_strength[0])
y = rand_jitter(n=y, min=min_y, max=max_y, strength=jitter_strength[1])
# jitter when needed - e.g. multiple stages are annotated to a sample
if phenotypes.sum() < 1:
ax.scatter(x, y, s=get_dimredplot_param(point, 'size', point_size),
c=colours_stage['NA'],
alpha=get_dimredplot_param(point, 'alpha', point_alpha, True),
marker=get_dimredplot_param(point, 'shape', 'o', True))
elif phenotypes.sum() == 1:
phenotype = phenotypes[phenotypes > 0].index[0]
ax.scatter(x, y, s=get_dimredplot_param(point, 'size', point_size),
c=colours_stage[phenotype],
alpha=get_dimredplot_param(point, 'alpha', point_alpha, True),
marker=get_dimredplot_param(point, 'shape', 'o', True))
else:
# Do not jitter the stage (point) of a sample that will be plotted first
first = True
for phenotype in phenotypes_list:
if phenotypes[phenotype] == 1:
if not first:
x = rand_jitter(n=x, min=min_x, max=max_x, strength=jitter_strength[0])
y = rand_jitter(n=y, min=min_y, max=max_y, strength=jitter_strength[1])
ax.scatter(x, y, s=get_dimredplot_param(point, 'size', point_size),
c=colours_stage[phenotype],
alpha=get_dimredplot_param(point, 'alpha', point_alpha, True),
marker=get_dimredplot_param(point, 'shape', 'o', True))
first = False
# Group for lines/names
grouped = plot_data.groupby(plot_by)
# Add SEM zones
if add_sem:
for name in plot_order:
data_rep = grouped.get_group(name).sort_values(order_column)
group = data_rep['Group'].values[0]
grouped_x = data_rep.groupby(['x'])
x_line = grouped_x.mean().index
y_line = grouped_x.mean()['y']
sem = grouped_x.sem()['y']
ax.fill_between(x_line, y_line - sem, y_line + sem, alpha=sem_alpha, color=colours[group])
# Add line between replicates' measurments - either lines connecting points, averages between points,
# or predefined lines
if plot_lines:
for name in plot_order:
data_rep = grouped.get_group(name).sort_values(order_column)
group = data_rep['Group'].values[0]
if alternative_lines is None:
if not add_avg:
x_line = data_rep['x']
y_line = data_rep['y']
else:
grouped_x = data_rep.groupby(['x'])
x_line = grouped_x.mean().index
y_line = grouped_x.mean()['y']
else:
x_line, y_line = alternative_lines[data_rep[plot_by].values[0]]
ax.plot(x_line, y_line, color=colours[group],
alpha=get_dimredplot_param(data_rep, 'alpha', 0.5, True),
linewidth=get_dimredplot_param(data_rep, 'width', 0.5, True),
linestyle=get_dimredplot_param(data_rep, 'linestyle', 'solid', True))
# Add replicate name
if add_name:
used_text_positions = pd.DataFrame(columns=['x', 'y'])
x_span = plot_data['x'].max() - plot_data['x'].min()
y_span = plot_data['y'].max() - plot_data['y'].min()
for name in plot_order:
data_rep = grouped.get_group(name).sort_values(order_column)
group = data_rep['Group'].values[0]
idx = -1
# Add name near the line
if alternative_lines is None:
if not add_avg:
x_values = data_rep['x'].values
y_values = data_rep['y'].values
else:
grouped_x = data_rep.groupby(['x'])
x_values = grouped_x.mean().index.values
y_values = grouped_x.mean()['y'].values
else:
x_values, y_values = alternative_lines[data_rep[plot_by].values[0]]
# Make sure that names are separated enough
x = float(x_values[idx]) + x_span / 500
y = float(y_values[idx]) + y_span / 500
while ((abs(used_text_positions['x'] - x) < (x_span / sep_text[0])).values &
(abs(used_text_positions['y'] - y) < (y_span / sep_text[1])).values).any():
idx -= 1
x = float(x_values[idx]) + x_span / 500
y = float(y_values[idx]) + y_span / 500
used_text_positions = used_text_positions.append({'x': x, 'y': y}, ignore_index=True)
ax.text(x, y, data_rep[plot_by].values[0], fontsize=fontsize, color=colours[group])
# Legends for groups and phenotypes
alpha_legend = get_dimredplot_param(plot_data, 'alpha', 0.5)
if isinstance(alpha_legend, pd.Series):
alpha_legend = alpha_legend.median()
if legend_groups is not None:
patchList = []
for name, colour in colours.items():
data_key = mpatches.Patch(color=colour, label=name, alpha=alpha_legend)
patchList.append(data_key)
title = 'Phenotype'
if colour_by_phenotype:
title = title + ' (line)'
legend_groups = ax.legend(handles=patchList, title=title, loc=legend_groups)
if colour_by_phenotype and legend_phenotypes is not None:
patchList = []
for name, colour in colours_stage.items():
data_key = mpatches.Patch(color=colour, label=name, alpha=alpha_legend)
patchList.append(data_key)
legend_stages = ax.legend(handles=patchList, title="Stage (point)", loc=legend_phenotypes)
if legend_groups is not None:
ax.add_artist(legend_groups)
# Use this to make sure that enforcing MIN/MAX X/Y will not cut off parts of plots (e.g. due to jittering)
def adjust_axes_lim(ax: plt.Axes, min_x_thresh: float, max_x_thresh: float, min_y_thresh: float, max_y_thresh: float):
"""
Adjust ax limit so that it will be at least as small as min threshold and as big as max threshold.
If min threshold is larger than existing min axes value it will not change (and vice versa for max).
Thus the axes should be beforehand adjusted not to include padding around plot elements, as this will be
included in min/max axes value as well.
:param ax: Adjust range on axes object
:param min_x_thresh: ax x_min must be at least that small.
:param max_x_thresh: ax x_max must be at least that big.
:param min_y_thresh: ax y_min must be at least that small.
:param max_y_thresh: ax y_max must be at least that big.
"""
y_min, y_max = ax.get_ylim()
x_min, x_max = ax.get_xlim()
if round(y_min, 3) >= round(min_y_thresh, 3):
y_min = min_y_thresh
else:
print('min y was set to', y_min, 'instead of', min_y_thresh)
if round(y_max, 3) <= round(max_y_thresh, 3):
y_max = max_y_thresh
else:
print('max y was set to', y_max, 'instead of', max_y_thresh)
if round(x_min, 3) >= round(min_x_thresh, 3):
x_min = min_x_thresh
else:
print('min x was set to', x_min, 'instead of', min_x_thresh)
if round(x_max, 3) <= round(max_x_thresh, 3):
x_max = max_x_thresh
else:
print('max x was set to', x_max, 'instead of', max_x_thresh)
ax.set_ylim(y_min, y_max)
ax.set_xlim(x_min, x_max)
# *****************
# *** Load data
path_save = PATH_RESULTS + 'PC1vsTime/'
if not os.path.exists(path_save):
os.makedirs(path_save)
genes = pd.read_csv(PATH_DATA + 'mergedGenes_RPKUM.tsv', sep='\t', index_col=0)
conditions = pd.read_csv(PATH_DATA + 'conditions_mergedGenes.tsv', sep='\t', index_col=None)
font = 'Arial'
matplotlib.rcParams.update({'font.family': font})
# In each strain group use different linetype for each strain
linestyles = ['solid', 'dashed', 'dotted', 'dashdot', (0, (5, 5))]
strain_linestyles = dict()
used_linestyles = defaultdict(set)
for strain in conditions['Strain'].unique():
group = GROUPS[strain]
for style in linestyles:
if style not in used_linestyles[group]:
used_linestyles[group].add(style)
strain_linestyles[strain] = style
break
# Default plot parameters
linewidth_mutant = 2
alpha_mutant = 0.7
linewidth_AX4 = 5
alpha_AX4 = 1
size_mutant = 30
scale_size_AX4 = linewidth_AX4 / linewidth_mutant
# *** PCA fit
# Data pre-processing parameters
LOG = True
SCALE = 'm0s1'
# Reference data (AX4 RPKUM data with non-zero expressed genes)
genes_data = genes[conditions.query('Strain =="AX4"')['Measurment']].copy()
genes_data = genes_data[(genes_data != 0).any(axis=1)].T
DATA_REFERENCE, SCALER = (genes_data, CustomScaler(genes_data))
# Scale reference
DATA_REFERENCE = pd.DataFrame(SCALER.transform(DATA_REFERENCE, log=LOG, scale=SCALE),
index=DATA_REFERENCE.index, columns=DATA_REFERENCE.columns)
# PCA on reference data
pca = PCA(n_components=1, random_state=0)
pca = pca.fit(DATA_REFERENCE)
save_pickle(path_save + 'PCA_AX4NonNullGenes_scale' + SCALE + 'log' + str(LOG) + '.pkl', pca)
# Use AX4-trained PCA to transform data of other strains
data_strains = genes[conditions['Measurment']].T[DATA_REFERENCE.columns]
data_strains = pd.DataFrame(SCALER.transform(data_strains, log=LOG, scale=SCALE), index=data_strains.index,
columns=data_strains.columns)
PCA_TRANSFORMED = pca.transform(data_strains).ravel()
# All strains PCA data for plotting
DATA_TRANSFORMED = pd.DataFrame({'y': PCA_TRANSFORMED,
'linestyle': [strain_linestyles[strain] for strain in conditions['Strain']],
'width': [linewidth_mutant if strain != 'AX4' else linewidth_AX4 for strain in
conditions['Strain']],
'alpha': [alpha_mutant if strain != 'AX4' else alpha_AX4 for strain in
conditions['Strain']]
})
DATA_TRANSFORMED[['x', 'Group', 'Strain', 'Replicate'] + STAGES] = conditions[
['Time', 'Group', 'Strain', 'Replicate'] + STAGES]
DATA_TRANSFORMED = DATA_TRANSFORMED.sort_values('x')
# *** Explained variance by PC1 for each strain
pca_named = pd.DataFrame(PCA_TRANSFORMED, index=data_strains.index)
print('Explained variance by PC1 for each strain')
for strain in conditions['Strain'].unique():
strain_samples = conditions.query('Strain ==@strain')['Measurment']
transformed_strain = pca_named.loc[strain_samples]
raw_strain = data_strains.loc[strain_samples, :]
print('%-12s%-12.3f' % (strain, transformed_strain.var()[0] / raw_strain.var().sum()))
# *** GAM fitting to PC1 (Y) vs time (X) data
# CV parameter combinations
param_combinations = list(itertools.product(*[list(np.logspace(-6, 0, 11, base=2)), [10, 15, 20]]))
param_combinations = [{'lam': lam, 'n_splines': n_splines} for lam, n_splines in param_combinations]
# Select best GAM for each strain and get data for plotting
strain_GAMs = dict()
for strain in conditions['Strain'].unique():
data_transformed = DATA_TRANSFORMED.query('Strain =="' + strain + '"')
data_transformed = data_transformed.sort_values('x')
# CV to select GAM parameters (regularisation, n splines)
# CV with loo x (time); e.g. all points at that x are used for testing
splitter = LeaveOneOut()
squarred_errors = defaultdict(list)
x_list = data_transformed['x'].unique()
for train_index, test_index in splitter.split(x_list):
x_train, x_test = x_list[train_index], x_list[test_index]
data_train = data_transformed[data_transformed['x'].isin(x_train)]
data_test = data_transformed[data_transformed['x'].isin(x_test)]
for param_idx, params in enumerate(param_combinations):
gam = pygam.LinearGAM(pygam.s(0, **params))
gam.fit(data_train['x'].values.reshape(-1, 1), data_train['y'].values.reshape(-1, 1))
prediction = gam.predict(data_test['x'].values.reshape(-1, 1))
# SE of all points at test location
squared_error = (data_test['y'] - prediction) ** 2
squarred_errors[param_idx].extend(list(squared_error.values))
# Select params with smallest MSE
mese = pd.DataFrame()
for param_idx, sqes in squarred_errors.items():
me = np.nanmean(sqes)
mese = mese.append({'param_idx': param_idx, 'mese': me}, ignore_index=True)
best = mese.sort_values('mese').iloc[0, :]
params_best = param_combinations[int(best['param_idx'])]
print(strain, 'GAM parameters: ', params_best)
# Make the model on whole dataset for plotting
gam = pygam.LinearGAM(pygam.s(0, **params_best))
gam.fit(data_transformed['x'].values.reshape(-1, 1), data_transformed['y'].values.reshape(-1, 1))
xs = np.linspace(min(x_list), max(x_list), 100)
ys = gam.predict(xs)
strain_GAMs[strain] = (xs, ys)
save_pickle(path_save + 'strainGAMs.pkl', strain_GAMs)
# ***********
# ** Plots
# Min/max y and x axis value for plotting - takes in account GAM and PC1 transformed data; synchronised across plots
y_values = []
for gam_xy in strain_GAMs.values():
y_values.extend(gam_xy[1])
y_values.extend(list(DATA_TRANSFORMED['y']))
MAX_Y = max(y_values)
MIN_Y = min(y_values)
range_y = MAX_Y - MIN_Y
# Add some extra space (for line thickness/points size)
MAX_Y = MAX_Y + 0.02 * range_y
MIN_Y = MIN_Y - 0.02 * range_y
MIN_X = DATA_TRANSFORMED['x'].min()
MAX_X = DATA_TRANSFORMED['x'].max()
range_x = MAX_X - MIN_X
# Add more padding to account for jittering which is not included here
MAX_X = MAX_X + 0.05 * range_x
MIN_X = MIN_X - 0.05 * range_x
# Set little ax margins so that necessary (tight) ax range of each plot can be calculated
plt.rcParams['axes.xmargin'] = 0.01
plt.rcParams['axes.ymargin'] = 0.01
# *** Plots of fitted GAMs to individual strains with shown sample points
replicates_cmap_name = 'tab10'
for strain in conditions['Strain'].unique():
data_transformed = DATA_TRANSFORMED.query('Strain =="' + strain + '"')
data_transformed = data_transformed.sort_values('x')
xs, ys = strain_GAMs[strain]
fig, ax = plt.subplots()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('Time')
ax.set_ylabel('PC1')
ax.plot(xs, ys, 'k', lw=2, alpha=0.7)
# Colour each replicate separately
replicates = data_transformed['Replicate']
replicates_unique = list(replicates.unique())
cmap = plt.get_cmap(replicates_cmap_name).colors[:len(replicates_unique)]
rep_colours = dict(zip(replicates_unique, cmap))
ax.scatter(data_transformed['x'], data_transformed['y'], c=[rep_colours[rep] for rep in replicates], alpha=0.7)
ax.set_title(strain, fontdict={'fontsize': 13, 'fontfamily': font})
adjust_axes_lim(ax=ax, min_x_thresh=MIN_X, max_x_thresh=MAX_X, min_y_thresh=MIN_Y, max_y_thresh=MAX_Y)
# Put x axis tickmarks at sample times
sampling_times = [time for time in data_transformed['x'].unique() if time % 4 == 0]
if strain == 'gtaC':
sampling_times = data_transformed['x'].unique()
ax.set_xticks(sampling_times)
ax.set_xticks(data_transformed['x'].unique(), minor=True)
ax.tick_params(axis='x', which='minor', length=5)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(path_save + 'GAM_' + strain + '.pdf')
plt.close()
# Plot replicate legend for all images - needed for editing images latter
fig, ax = plt.subplots()
for strain in conditions['Strain'].unique():
data_transformed = DATA_TRANSFORMED.query('Strain =="' + strain + '"')
replicates = data_transformed['Replicate']
replicates_unique = list(replicates.unique())
cmap = plt.get_cmap(replicates_cmap_name).colors[:len(replicates_unique)]
rep_colours = dict(zip(replicates_unique, cmap))
print(rep_colours)
for rep, color in rep_colours.items():
plt.scatter([0], [0], c=[color], label=rep,
alpha=1, edgecolors='none')
ax.legend(ncol=3)
ax.axis('off')
plt.savefig(path_save + 'GAM_replicates_legend.pdf')
plt.close()
# *** Strain plots with developmental stages annotation (UNUSED)
# See combined stages plot for legend
matplotlib.rcParams.update({'font.size': 15})
for strain in conditions['Strain'].unique():
fig, ax = plt.subplots(figsize=(10, 4))
data_plot = DATA_TRANSFORMED.query('Strain =="' + strain + '"').drop('alpha', axis=1)
data_plot['linestyle'] = ['solid'] * data_plot.shape[0]
data_plot['width'] = [linewidth_mutant] * data_plot.shape[0]
# Plot each replicate with different symbol shape
replicates = list(data_plot['Replicate'].unique())
replicates_map = dict(zip(replicates, ['o', '^', 'd', 's', 'X', '*', 'v']))
data_plot['shape'] = [replicates_map[rep] for rep in data_plot['Replicate']]
dim_reduction_plot(data_plot,
plot_by='Strain', fig_ax=(fig, ax), order_column='x',
colour_by_phenotype=True, legend_groups=None, legend_phenotypes=None,
add_name=False, fontsize=11, colours={GROUPS[strain]: 'black'},
add_avg=True, alternative_lines=strain_GAMs, sep_text=(15, 30), jitter_all=True,
point_alpha=0.5, point_size=40, plot_lines=True, jitter_strength=(0.03, 0.05))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('Time')
ax.set_ylabel('PC1')
adjust_axes_lim(ax=ax, min_x_thresh=MIN_X, max_x_thresh=MAX_X, min_y_thresh=MIN_Y, max_y_thresh=MAX_Y)
fig.suptitle(strain, fontsize=15, fontfamily=font)
# Put x axis tickmarks at sample times
sampling_times = [time for time in data_plot['x'].unique() if time % 4 == 0]
if strain == 'gtaC':
sampling_times = data_plot['x'].unique()
ax.set_xticks(sampling_times)
ax.set_xticks(data_plot['x'].unique(), minor=True)
ax.tick_params(axis='x', which='minor', length=5)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(path_save + 'stagesGAM_' + strain + '.pdf')
plt.close()
# *** Plot with GAM fits of all strains
# Order in which to plot strains for best visibility
strains = list(conditions['Strain'].unique())
plot_order = conditions[['Strain', 'Group']].copy()
plot_order['Group'] = pd.Categorical(plot_order['Group'],
categories=['agg-', 'prec', 'WT', 'sFB', 'tag', 'cud', 'lag_dis', 'tag_dis'],
ordered=True)
plot_order = list(plot_order.sort_values('Group')['Strain'].unique())
plot_order.remove('AX4')
plot_order = plot_order + ['AX4']
matplotlib.rcParams.update({'font.size': 13})
fig, ax = plt.subplots(figsize=(10, 10))
dim_reduction_plot(DATA_TRANSFORMED, plot_by='Strain', fig_ax=(fig, ax), order_column='x',
colour_by_phenotype=False, legend_groups='upper left',
add_name=True,
fontsize=13, plot_order=plot_order, plot_points=False,
alternative_lines=strain_GAMs, sep_text=(15, 30))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('Time')
ax.set_ylabel('PC1')
adjust_axes_lim(ax=ax, min_x_thresh=MIN_X, max_x_thresh=MAX_X, min_y_thresh=MIN_Y, max_y_thresh=MAX_Y)
fig.suptitle("GAM fits to PC1 vs time of all strains",
fontdict={'fontsize': 13, 'fontfamily': font})
# Put x axis tickmarks at sample times
sampling_times = [time for time in DATA_TRANSFORMED['x'].unique() if time % 4 == 0]
ax.set_xticks(sampling_times)
ax.set_xticks(DATA_TRANSFORMED['x'].unique(), minor=True)
ax.tick_params(axis='x', which='minor', length=5)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(path_save + 'GAM_combined.pdf')
plt.close()
# *** Plot PC1 vs time of all strains with stage annotations (UNUSED)
matplotlib.rcParams.update({'font.size': 15})
fig, ax = plt.subplots(figsize=(10, 10))
dim_reduction_plot(DATA_TRANSFORMED.drop('alpha', axis=1), plot_by='Strain', fig_ax=(fig, ax), order_column='x',
colour_by_phenotype=True, legend_groups=None, legend_phenotypes='upper left',
add_name=False, fontsize=11, plot_order=plot_order,
add_avg=True, alternative_lines=strain_GAMs, sep_text=(15, 30), jitter_all=True,
point_alpha=0.5, point_size=30, plot_lines=False, jitter_strength=(0.01, 0.01))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('Time')
ax.set_ylabel('PC1')
adjust_axes_lim(ax=ax, min_x_thresh=MIN_X, max_x_thresh=MAX_X, min_y_thresh=MIN_Y, max_y_thresh=MAX_Y)
a = fig.suptitle('PC1 vs time of all strains with annotated stages',
fontdict={'fontsize': 13, 'fontfamily': font})
sampling_times = [time for time in DATA_TRANSFORMED['x'].unique() if time % 4 == 0]
ax.set_xticks(sampling_times)
ax.set_xticks(DATA_TRANSFORMED['x'].unique(), minor=True)
ax.tick_params(axis='x', which='minor', length=5)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(path_save + 'stages_combined.pdf')
plt.close()
| true |
9ebac17bc5d1bf00bc15c84dea3f07b9b9f37379 | Python | spatankar276/FacebookLogIn | /facebook_auto_login.py | UTF-8 | 363 | 2.671875 | 3 | [] | no_license |
from selenium import webdriver
import time
browser = webdriver.Chrome()
browser.get('https://www.facebook.com/')
emailElem = browser.find_element_by_id('email')
emailElem.send_keys('Enter your email:')
passwordElem = browser.find_element_by_id('pass')
passwordElem.send_keys('Enter your password:')
login = browser.find_element_by_id('loginbutton')
login.click()
| true |
14281f5f8d1425ad347217d16182c804ffbcf275 | Python | vincentlal/DIMY | /dbf.py | UTF-8 | 5,919 | 2.859375 | 3 | [] | no_license | # Task 6 and 7
from CustomBloomFilter import CustomBloomFilter
from datetime import datetime, timedelta
import time
import threading
from QBF import QBF
from CBF import CBF
import requests
class DBF():
def __init__(self, startTime, endTime):
self._startTime = startTime
self._endTime = endTime
self._dbf = CustomBloomFilter(filter_size=800000, num_hashes=3)
def __contains__(self, encID):
return encID in self._dbf
def __repr__(self):
return f'DBF(startTime:{self._startTime}, endTime:{self._endTime}'
def state(self):
return self._dbf.getIndexes()
@property
def startTime(self):
return self._startTime
@property
def endTime(self):
return self._endTime
def add(self, encID):
indexes = self._dbf.add(encID)
# print(f'Inserting EncID into DBF at positions: {str(indexes)[1:-1]}')
print(f'DBF state after insertion: {self._dbf.getIndexes()}')
@property
def filter(self):
return self._dbf.filter
class DBFManager():
# Constructor
def __init__(self):
self._dbfList = []
self._qbf = None
self._processStarted = time.time()
self._cycles = 0
self._cycleRate = 600 # how many seconds 1 DBF is to be used for
# Create initial DBF object
start = datetime.now()
end = datetime.now() + timedelta(seconds=self._cycleRate)
dbfObj = DBF(start, end)
self._dbfList.append(dbfObj)
print("#############################################################")
print("Create DBF(" + start.strftime("%Y-%m-%d %H:%M:%S") + ", " + end.strftime("%Y-%m-%d %H:%M:%S") + ")")
# Start thready for cycling DBFs
self._dbfThread = threading.Thread(target=self.initialiseDBFCycling, name='DBF-Cycler', daemon=True)
self._dbfThread.start()
# Cycle DBFs every 10 minutes with no drift
def initialiseDBFCycling(self):
while True:
time.sleep(self._cycleRate - ((time.time() - self._processStarted) % float(self._cycleRate)))
self.cycleDBFs()
def __repr__(self):
return str(self._dbfList)
def cycleDBFs(self):
start = self._dbfList[-1].endTime
end = start + timedelta(seconds=self._cycleRate)
if (len(self._dbfList) == 6):
self._dbfList.pop(0)
print("#############################################################")
print("Create new DBF(" + start.strftime("%Y-%m-%d %H:%M:%S") + ", " + end.strftime("%Y-%m-%d %H:%M:%S") + ")")
self._dbfList.append(DBF(start,end))
self._cycles += 1
if (self._cycles == 6):
self._cycles = 0
self.setQBF()
self.sendQBFToEC2Backend()
# Add EncID to DBF
def addToDBF(self, encID):
if (self._cycles == 7):
return
dbfObj = self._dbfList[-1]
start = dbfObj.startTime
end = dbfObj.endTime
print("#############################################################")
print("Inserting " + encID.hex() + " into the DBF(" + start.strftime("%Y-%m-%d %H:%M:%S") + ", " + end.strftime("%Y-%m-%d %H:%M:%S") + ")")
dbfObj.add(encID) # add encID to current DBF
def combineIntoQBF(self):
return QBF(self._dbfList)
def setQBF(self):
print("#############################################################")
print(f'Combining DBFs into a single QBF: {datetime.now()}')
self._qbf = self.combineIntoQBF()
print(f'Next Query Time: {datetime.now() + timedelta(hours=1)}')
def combineIntoCBF(self):
return CBF(self._dbfList)
def sendQBFToEC2Backend(self):
if (self._qbf == None):
# print('here')
self.setQBF()
print("#############################################################")
print('Uploading QBF to backend server...')
# url = "http://ec2-3-26-37-172.ap-southeast-2.compute.amazonaws.com:9000/comp4337/qbf/query"
url = "http://localhost:55000/comp4337/qbf/query"
payload = self._qbf.rawJSON()
headers = {"Content-Type": "application/json"}
res = requests.request("POST", url, json=payload, headers=headers)
resJSON = res.json()
print("#############################################################")
if (resJSON['result'] == "No Match."):
print("QBF Uploaded to EC2 Server - Result: No Match - You are safe.")
else:
print("QBF Uploaded to EC2 Server - Result: Match - You are potentially at risk.")
print("Please consult a health official, self-isolate and do a COVID-19 test at your earliest convenience.")
def uploadCBF(self):
print("#############################################################")
# url = "http://ec2-3-26-37-172.ap-southeast-2.compute.amazonaws.com:9000/comp4337/cbf/upload"
url = "http://localhost:55000/comp4337/cbf/upload"
payload = self.combineIntoCBF().rawJSON()
headers = {"Content-Type": "application/json"}
print('Uploading CBF to backend server...')
res = requests.request("POST", url, json=payload, headers=headers)
resJSON = res.json()
print("#############################################################")
if (resJSON['result'] == "Success"):
print("CBF successfully uploaded to EC2 Server")
self._cycles = 7 # Setting this to > 6 will disable QBF generation
print('QBF generation has been disabled')
else:
print("Failed to upload CBF - rejected by EC2 Server")
def __contains__(self, encID):
for dbf in self._dbfList:
if (encID in dbf):
return True
return False | true |
bcd2503741b32e25a532fffbe657fc4c3298c043 | Python | anlar/prismriver-lyrics | /prismriver/plugin/alivelyrics.py | UTF-8 | 1,421 | 2.703125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from prismriver.plugin.common import Plugin
from prismriver.struct import Song
class AliveLyricsPlugin(Plugin):
ID = 'alivelyrics'
def __init__(self, config):
super(AliveLyricsPlugin, self).__init__('AliveLyrics', config)
def search_song(self, artist, title):
to_delete = [' ', '.', ',', '&', '?', '!', "'", '"', '/', '(', ')']
link = 'http://www.alivelyrics.com/{}/{}/{}.html'.format(
self.prepare_url_parameter(artist[0]),
self.prepare_url_parameter(artist, to_delete=to_delete),
self.prepare_url_parameter(title, to_delete=to_delete))
page = self.download_webpage_text(link)
if page:
soup = self.prepare_soup(page)
navbar = soup.find('div', {'class', 'navbar'})
navbar_parts = navbar.findAll('a')
song_artist = navbar_parts[2].text
song_title = navbar_parts[3].next_sibling[2:].strip()
lyrics_pane = soup.find('pre', {'class': 'lyrics'})
lyrics = self.parse_verse_block(lyrics_pane)
return Song(song_artist, song_title, self.sanitize_lyrics([lyrics]))
def prepare_url_parameter(self, value, to_delete=None, to_replace=None, delimiter='-', quote_uri=True,
safe_chars=None):
return super().prepare_url_parameter(value, to_delete, to_replace, delimiter, quote_uri, safe_chars).lower()
| true |
f93c4d06f42b13e95145d0bfd2ba81ae6f6d6417 | Python | EricMontague/MailChimp-Newsletter-Project | /server/tests/unit/flask_app/models/test_user.py | UTF-8 | 1,506 | 2.984375 | 3 | [] | no_license | """This module contains unit tests for the user model."""
from pytest import raises
from app.models import User
from unittest.mock import patch
@patch("app.models.user.generate_password_hash")
def test_setting_password(generate_password_mock):
"""Test that when the password is set,
a password hash is generated.
"""
generate_password_mock.return_value = "wgion9402trgjw"
user = User(
username="firstuser",
password="password",
email="firstuser@gmail.com"
)
assert user.password_hash is not None
assert user.password_hash == "wgion9402trgjw"
@patch("app.models.user.check_password_hash")
def test_password_verification_successful(check_password_mock, user):
"""Test user password verification when the correct
password is given.
"""
check_password_mock.return_value = True
assert user.verify_password("password") is True
@patch("app.models.user.check_password_hash")
def test_password_verification_successful(check_password_mock, user):
"""Test user password verification when the incorrect
password is given.
"""
check_password_mock.return_value = False
assert user.verify_password("password") is False
def test_no_password_getter(user):
"""Test that the password attribute is not readable."""
with raises(AttributeError):
user.password
def test_repr(user):
"""Test that the __repr__ method returns
the expected value.
"""
assert user.__repr__() == f"<User: '{user.username}'>"
| true |
21622897fc03b0a41d3d99c1fdcd7c936c93ac6b | Python | leylagcampos/Python | /Review_III.py | UTF-8 | 1,342 | 3.65625 | 4 | [] | no_license | #Review III
class Persona():
def __init__(self,nombre,pesokg,alturacm,edad,sexo):
self.peso=pesokg
self.altura=alturacm
self.edad=edad
self.__nombre=nombre
self.sexo=sexo
def Reporte(self):
print("hola ",self.__nombre," actualmente tienes ",self.edad ," años,mides ",self.altura," y pesas ",self.peso)
class TasaMetabólicaBasal(Persona):
calorias1=0
calorias2=0
calorias3=0
def HarrisBenedict(self):
if self.sexo=="F":
self.calorias1=665+(9.6 * self.peso)+(1.85* self.altura)-(4.7* self.edad)
elif self.sexo=="M":
self.calorias1=66.5+(13.8*self.peso)+(5*self.altura)-(6.8 * self.edad)
def MifflinStJeor(self):
if self.sexo=="F":
self.calorias2=(10* self.peso)+(6.25* self.altura)-(5* self.edad)-161
elif self.sexo=="M":
self.calorias2=(10*self.peso)+(6.25*self.altura)-(5* self.edad)+ 5
def OMS(self):
self.calorias3=(14.7*self.peso)+496 #completar aqui y las por nivel de actividad
def Reporte1(self):
print("Según HarrisBenedict deberías consumir: ",self.calorias1,", segun Mifflin :",
self.calorias2*1.2," , según la OMS :",self.calorias3, " calorias")
Persona1=TasaMetabólicaBasal("Leyla",55,1.55,21,"F")
Persona1.HarrisBenedict()
Persona1.MifflinStJeor()
Persona1.OMS()
Persona1.Reporte()
Persona1.Reporte1() | true |
6c4e3209622e554f4be2ca3db7ba660daf7bc1ee | Python | billkabb/learn_python | /MIT/w2l4.py | UTF-8 | 123 | 3.125 | 3 | [] | no_license | x=5
p=4
result=1
for turn in range(p):
print('iteration:'+str(turn)+'current result'+str(result))
result=result*x
| true |
05baae3e27c03383e053891bd283b9998b94f4f7 | Python | fidler3/Puzzle-Problems | /CH2 deletemiddle/deletemiddle.py | UTF-8 | 357 | 2.734375 | 3 | [] | no_license |
def deletemiddle(node):
by2 = node
by1 = node
first = True
while by2 != None:
if first:
first = False
by2 = by2.next
if(by2 == None):
by1.next = by1.next.next
else:
by2 = by2.next
else:
by2 = by2.next
if(by2 == None):
by1.next = by1.next.next
else:
by2 = by2.next
by1 = by1.next
by1.next = by1.next.next
| true |
922332ca9df6c20f713d016e3708192087983022 | Python | raczandras/szkriptnyelvek | /OM/4/listcomp.py | UTF-8 | 881 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python3
def main():
#1
inp = ['auto', 'villamos', 'metro']
eredmeny = [ s.upper() + '!' for s in inp]
print(eredmeny)
#2
inp = ['aladar', 'bela', 'cecil']
eredmeny = [ s.capitalize() for s in inp]
print(eredmeny)
#3
eredmeny = [ 0 for s in range(10)]
print(eredmeny)
#4
inp = list(range(1, 10+1))
eredmeny = [ n*2 for n in inp]
print(eredmeny)
#5
inp = list(range(1, 10+1))
eredmeny = [ int(s) for s in inp]
print(eredmeny)
#6
inp = "1234567"
eredmeny = [ int(s) for s in inp ]
print(eredmeny)
#7
inp = list(str.split('The quick brown fox jumps over the lazy dog'))
eredmeny = [ len(s) for s in inp ]
print(eredmeny)
#8
#############################################################################
if __name__ == "__main__":
main()
| true |
962a429939bb83352f2e3553a413199749434c35 | Python | yilmazmuhammed/ITU-CE-Courses | /BIL103E - Intr. to Inf. Syst.&Comp. Eng./Course Files/OtherExamples/Bottle Forms/time6.py | UTF-8 | 924 | 2.65625 | 3 | [] | no_license | from bottle import route, run, request, static_file
from datetime import datetime
from pytz import timezone
def htmlify(text,title):
page = """
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>%(title)s</title>
</head>
<body>
%(text)s
</body>
</html>
""" % {'text':text,'title':title}
return page
def css(fname):
toreturn = static_file(fname,root='/home/docean/tmp/css')
return toreturn
route('/css/<fname>','GET',css)
def home_page():
return static_file('homepage6.html',root='/home/docean/tmp/static')
route('/','GET',home_page)
def time_page():
zone_str = request.POST["zone"]
zone = timezone(zone_str)
curr_time = datetime.now(zone)
return htmlify(str(curr_time),"Time in zone")
route('/time','POST',time_page)
run(debug=True)
| true |
260709005251c8a38b1b634aeb9cf7a6529c7de8 | Python | btchope/electrumq | /use_conf.py | UTF-8 | 654 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
__author__ = 'zhouqi'
import ConfigParser, os
config = ConfigParser.ConfigParser()
# config.readfp(open('defaults.cfg'))
# config.read(['site.cfg', os.path.expanduser('~/.myapp.cfg')])
config.add_section('Section1')
config.set('Section1', 'an_int', '15')
config.set('Section1', 'a_bool', 'true')
config.set('Section1', 'a_float', '3.1415')
config.set('Section1', 'baz', 'fun')
config.set('Section1', 'bar', 'Python')
config.set('Section1', 'foo', '%(bar)s is %(baz)s!')
config.set('Section1', 'foo2', {'a': 2})
print config.get('Section1', 'an_int1')
with open('example.cfg', 'wb') as configfile:
config.write(configfile) | true |
909ec4a55f5b4feb56be6bfacc6cb7828409a63c | Python | Silentsoul04/PythonCode | /Import/module/package.py | UTF-8 | 198 | 2.8125 | 3 | [] | no_license | from __future__ import print_function
class MyClass(object):
p = None
def __init__(self, p):
self._p = p
def __repr__(self):
return "MyClass.p = {}".format(self._p)
| true |