blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4fa2c2e79cee07cb0a16668b515674d2360a4389
|
Python
|
juliawarner/TEA-SeniorDesign
|
/Code/Testing/ControllerSocket_test.py
|
UTF-8
| 1,126
| 3.40625
| 3
|
[] |
no_license
|
# TEA@UCF Senior Design Mechatronics Project
# Julia Warner
# Tests connection between Raspberry Pi and controller computer using sockets.
#python library used to create sockets
import socket
#constants for IP addresses and port number
RASPI_IP = '192.168.2.7'
PORT_NUM = 14357
#create socket object
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#connect to raspberry pi
connection.connect((RASPI_IP, 14357))
#print success message
print(f"Connection to Raspberry Pi on {RASPI_IP} established!")
#start waiting for user input
running = True
while(running):
#get user input
userInput = input()
#send message to the raspberry pi
connection.send(bytes(userInput, "utf-8"))
#receive message from raspberry pi
rawResponse = connection.recv(1024)
response = rawResponse.decode("utf-8")
#print response
print(f'The Raspberry Pi said: {response}')
#check if raspberry pi closed the connection
if(response == 'I heard stop. Closing connection'):
print('Closing controller test program')
running = False
#end connection when loop ends
connection.close()
| true
|
d0fb631207723870089eb8990e0bc4e178d78773
|
Python
|
Harshxz62/CourseMaterials
|
/SEM 5/Computer Networks/LAB/week5/PES1201800125_Tanishq_Vyas_WEEK5_Submission/TCPClient.py
|
UTF-8
| 391
| 3.328125
| 3
|
[] |
no_license
|
#TCP Client
from socket import *
import sys
serverName = sys.argv[1]
serverPort = int(sys.argv[2])
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName,serverPort))
sentence = input("Input lowercase sentence: ")
clientSocket.send(str.encode(sentence))
modifiedSentence = clientSocket.recv(1024)
print("From Server: ", modifiedSentence.decode())
clientSocket.close()
| true
|
87c86ae9689c7e19e1516c7a7793cf5ed3d490ab
|
Python
|
VachaArraniry/python_portfolio
|
/listbasic.py
|
UTF-8
| 501
| 4.46875
| 4
|
[] |
no_license
|
fruits = ["apple", "durian", "strawberry", "orange"] # list literal
print(len(fruits))
fruits.append("kiwi") # add an item to the list
print(len(fruits))
print(fruits[1])
# get orange from the list
print(fruits[3])
# get the last fruit from the list
print(fruits[-1]) # simple version
print(fruits[len(fruits)-1]) # mathematical version
# add an item to a specific index
fruits.insert(2, 'rambutan')
print(fruits)
# for loop
for item in fruits:
print(item.upper())
| true
|
e3e7bde92a428499e972dc3dbf95dcf70b6285b6
|
Python
|
vipulmjadhav/python_programming
|
/python_programs/file_handling/writeFile.py
|
UTF-8
| 113
| 3.125
| 3
|
[] |
no_license
|
file = open('demo.txt','r+')
file.write('program for writinng in file')
for i in file:
print(i)
file.close()
| true
|
adc8e46204d1c54c5d00622c20dd1ea307c5f069
|
Python
|
DaHuO/Supergraph
|
/codes/CodeJamCrawler/CJ/16_0_1_abhigupta4_Counting_Sheep.py
|
UTF-8
| 432
| 3.46875
| 3
|
[] |
no_license
|
def counting_sheep(n):
if n == 0:
return "INSOMNIA"
occur = set()
temp = str(n)
for ele in temp:
occur.add(ele)
count = 1
while (len(occur) != 10):
count += 1
temp = n * count
for ele in str(temp):
occur.add(ele)
return count
for i in range(input()):
N = input()
if N == 0:
print "Case #" + str(i + 1) + ": INSOMNIA"
else:
print "Case #" + str(i + 1) + ": " + str(counting_sheep(N) * N)
| true
|
5eb93e10a99bfa513e9fcbb6cc1d5adb4858e0a2
|
Python
|
yuhanlyu/coding-challenge
|
/lintcode/route_between_two_nodes_in_graph.py
|
UTF-8
| 772
| 3.578125
| 4
|
[] |
no_license
|
# Definition for a Directed graph node
# class DirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
from collections import deque
class Solution:
"""
@param graph: A list of Directed graph node
@param s: the starting Directed graph node
@param t: the terminal Directed graph node
@return: a boolean value
"""
def hasRoute(self, graph, s, t):
queue, visited = deque([s]), set([s])
while queue:
node = queue.popleft()
if node == t:
return True
for neighbor in node.neighbors:
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
return False
| true
|
20e4ad56d3e6273cbd33a2cae0d47b9bc489f561
|
Python
|
ZAKERR/-16-python-
|
/软件第六次作业/软件162/2016021103李洋/图形练习.py
|
UTF-8
| 733
| 3.515625
| 4
|
[] |
no_license
|
#import turtle
#t=turtle.Pen()
'''
#t.circle(50,steps=4)
t.speed(5)
turtle.goto(0,0)
for i in range(4):
turtle.forward(100)
turtle.right(90)
turtle.home()
turtle.circle(50,270)
'''
'''
import turtle
t=turtle.Pen()
t.speed(1000)
for x in range(1000):
t.circle(x)
t.left(91)
'''
'''
for x in range(1,19):
t.forward(100)
if x%2==0:
t.left(175)
else:
t.left(225)
'''
import turtle
p = turtle
turtle.bgcolor("purple")
color=["red","yellow","green","blue","black"]
a=[60,120,90,30,0]
b=[0,0,-30,-30,0]
for i in range(5):
p.pensize(3)
p.color(color[i])
p.circle(30,360)
p.pu()
p.goto(a[i],b[i])
p.pd()
turtle.done()
| true
|
d9ee09db1451c800249c4257d1d883019ed2bbec
|
Python
|
bc0403/EC2019
|
/Alexander-6th/pp_2p14.py
|
UTF-8
| 173
| 2.984375
| 3
|
[] |
no_license
|
R1 = 10
R2 = 20
R3 = 40
Rsum = R1*R2 + R2*R3 + R3*R1
Ra = Rsum/R1
Rb = Rsum/R2
Rc = Rsum/R3
print(f"Ra, Rb, and Rc is {Ra:.4f}, {Rb:.4f}, and {Rc:.4f} ohm, respectively.")
| true
|
6f14849a3e67e169f00fd581da04e2db71533c19
|
Python
|
dcohashi/IntroPython2015
|
/students/ericrosko/session07/test_html_render.py
|
UTF-8
| 3,385
| 3.015625
| 3
|
[] |
no_license
|
import html_render as hr
from io import StringIO
import re # import regular expressions so I can strip spaces and \n's from text
def test_instantiate():
e = hr.Element()
def test_create_content():
e = hr.Element("stuff")
assert e.content is not None
def test_content_None():
e = hr.Element()
print (e.content)
assert None not in e.content
def test_content_This():
e = hr.Element("this")
print (e.content)
assert "this" in e.content
def test_tag():
e = hr.Element("this")
assert hr.Element.tag == 'html'
def test_append():
e = hr.Element('this')
e.append('that')
assert 'that' in e.content
assert 'this' in e.content
def test_render():
e = hr.Element("this")
e.append('that')
f = StringIO()
e.render(f)
f.seek(0)
text = f.read().strip()
assert text.startswith("<html>")
assert text.endswith("</html>")
assert 'this' in text
assert 'that' in text
def test_body():
e = hr.Body('this')
f = StringIO()
e.render(f)
f.seek(0)
text = f.read().strip()
assert text.startswith("<body>")
assert text.endswith("</body>")
assert 'this' in text
def test_p():
e = hr.Element("this")
e.append(hr.P('paragraph of text'))
e.append(hr.P('another paragraph of text'))
f = StringIO()
e.render(f)
f.seek(0)
text = f.read().strip()
print(text)
assert '<p>' in text
assert '</p>' in text
assert '<html>' in text
assert '</html>' in text
assert 'paragraph of text' in text
def test_nest():
e = hr.Element()
body = hr.Body()
e.append(body)
body.append(hr.P('a paragraph of text'))
f = StringIO()
e.render(f)
f.seek(0)
text = f.read().strip()
#use regex to remote pretty printing spaces and new line carriage returns
text = re.sub('[\n ]','',text)
print(text)
assert text.startswith('<html><body>')
assert text.endswith('</body></html>')
def test_title():
e = hr.Title('this')
f = StringIO()
e.render(f)
f.seek(0)
text = f.read().strip()
assert text.startswith("<title>")
assert text.endswith("</title>")
assert 'this' in text
def test_paragraph_with_style_string():
e = hr.Element()
body = hr.Body()
body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text", style="text-align: center; font-style: oblique;"))
f = StringIO()
e.render(f)
f.seek(0)
text = f.read().strip()
assert text.find("<p style=\"text-align: center; font-style: oblique;\">")
def test_hr():
html = hr.Html()
head = hr.Head()
head.append( hr.Title("PythonClass = Revision 1087:") )
html.append(head)
body = hr.Body()
horizontal_rule = hr.Hr()
body.append(horizontal_rule)
html.append(body)
f = StringIO()
html.render(f)
f.seek(0)
text = f.read().strip()
print(text)
assert text.find("<hr />")!= -1
def test_a():
html = hr.Html()
head = hr.Head()
head.append( hr.Title("PythonClass = Revision 1087:") )
html.append(head)
body = hr.Body()
horizontal_rule = hr.Hr()
body.append(horizontal_rule)
body.append(hr.A("http://google.com", "link"))
html.append(body)
f = StringIO()
html.render(f)
f.seek(0)
text = f.read().strip()
print(text)
assert text.find("<a href=\"http://google.com\">link</a>")!= -1
#assert True
def test_h2():
html = hr.Html()
body = hr.Body()
body.append(hr.H(2, "sample"))
html.append(body)
f = StringIO()
html.render(f)
f.seek(0)
text = f.read().strip()
print(text)
assert text.find("<h2>sample</h2>") != -1
| true
|
52d586eddcc1772e92c3c4e7d0ce140336db702c
|
Python
|
Jayant1234/Marsh_Ann
|
/marsh_plant_dataset.py
|
UTF-8
| 2,755
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import cv2
import csv
import torch
from torch.utils.data import Dataset
from PIL import Image
class MarshPlant_Dataset_pa(Dataset):
def __init__(self, infile,transform=None):
#initialize dataset
self.imgfiles = []
self.anns = []
self.transform= transform
with open(infile,'r') as f:
reader = csv.reader(f,delimiter='\t')
for row in reader:
#print(row[0])
self.imgfiles.append(row[0])
ann = list(map(int,row[1:8]))
self.anns.append(ann)
def __len__(self):
#return length of dataset
return len(self.imgfiles)
def __getitem__(self, idx):
#return dataset[idx]
#print(self.imgfiles[idx])
y = torch.tensor(self.anns[idx]).float() #annotations
im = Image.open(self.imgfiles[idx])
#im = cv2.imread(self.imgfiles[idx])
#dst = cv2.fastNlMeansDenoisingColored(im,None,10,10,7,21)
#im = cv2.resize(im, (512,512)) #resize patch
#mean_value= im.mean()
#subtracting each pixel of the image from mean
#im= mean_value - im
#im = np.transpose(im,(2,0,1))/ 255.
#im = np.expand_dims(im, axis=0)
#denoising instead of image mean centering
#x = torch.from_numpy(im).float()
x = self.transform(im)
return {'X': x, 'Y': y}
class MarshPlant_Dataset_pc(Dataset):
def __init__(self, infile,transform=None):
#initialize dataset
self.imgfiles = []
self.anns = []
self.transform= transform
with open(infile,'r') as f:
reader = csv.reader(f,delimiter='\t')
for row in reader:
#print(row[0])
self.imgfiles.append(row[0])
ann = list(map(int,row[1:10])) # change to 1:8 for pa and 1:10 for percent cover
self.anns.append(ann)
def __len__(self):
#return length of dataset
return len(self.imgfiles)
def __getitem__(self, idx):
#return dataset[idx]
#print(self.imgfiles[idx])
y = torch.tensor(self.anns[idx]).float() #annotations
im = Image.open(self.imgfiles[idx])
#im = cv2.imread(self.imgfiles[idx])
#dst = cv2.fastNlMeansDenoisingColored(im,None,10,10,7,21)
#im = cv2.resize(im, (512,512)) #resize patch
#mean_value= im.mean()
#subtracting each pixel of the image from mean
#im= mean_value - im
#im = np.transpose(im,(2,0,1))/ 255.
#im = np.expand_dims(im, axis=0)
#denoising instead of image mean centering
#x = torch.from_numpy(im).float()
x = self.transform(im)
return {'X': x, 'Y': y}
| true
|
c5501f6d2d9984127876f628dbae91e0a39129d1
|
Python
|
pkaff/Simulation_Tools_Project_2
|
/task3.py
|
UTF-8
| 1,521
| 2.515625
| 3
|
[] |
no_license
|
from numpy import cos, sin, array, zeros, arange
from numpy.linalg import norm
from scipy.optimize import fsolve
from squeezer import *
def gf(q):
beta, gamma, phi, delta, omega, epsilon = q
#beta = q[0]
theta = 0
#theta = q[1]
#gamma = q[1]
#phi = q[2]
#delta = q[3]
#omega = q[4]
#epsilon = q[5]
cobe = cos(beta)
cobeth = cos(beta + theta)
siga = sin(gamma)
sibe = sin(beta)
sibeth = sin(beta + theta)
coga = cos(gamma)
siphde = sin(phi + delta)
cophde = cos(phi + delta)
coomep = cos(omega + epsilon)
siomep = sin(omega + epsilon)
code = cos(delta)
side = sin(delta)
siep = sin(epsilon)
coep = cos(epsilon)
rr = 7.e-3
d = 28.e-3
ss = 35.e-3
e = 2.e-2
zf = 2.e-2
zt = 4.e-2
u = 4.e-2
xb,yb=-0.03635,.03273
xa,ya=-.06934,-.00227
g=zeros((6,))
g[0] = rr*cobe - d*cobeth - ss*siga - xb
g[1] = rr*sibe - d*sibeth + ss*coga - yb
g[2] = rr*cobe - d*cobeth - e*siphde - zt*code - xa
g[3] = rr*sibe - d*sibeth + e*cophde - zt*side - ya
g[4] = rr*cobe - d*cobeth - zf*coomep - u*siep - xa
g[5] = rr*sibe - d*sibeth - zf*siomep + u*coep - ya
return g
q0 = array([-0.36, # beta
0.1, # gamma
0.12, # phi
0.680, # delta
-0.5, # Omega
1.53])
q0 = arange(6)
q0 = zeros([6, 1])
q = fsolve(gf, q0)
y, yp = init_squeezer()
y = y.tolist()
y = y[:7]
del y[1]
#y = np.array(y[0] + y[2:7])
print("y: ", y)
print("q: ", q)
print("y - q: ", y - q)
print(norm(y - q))
| true
|
9d8586dc78a8818fcd44087c69c2da63267ea420
|
Python
|
diegoami/datacamp-courses-PY
|
/pandas_13/pandas13_2.py
|
UTF-8
| 204
| 2.53125
| 3
|
[] |
no_license
|
import pandas as pd
revenue = pd.read_csv('../data/revenue.csv')
managers = pd.read_csv('../data/managers_2.csv')
combined = pd.merge(revenue, managers, left_on='city', right_on='branch')
print(combined)
| true
|
f14e337fd43e126d0e1ae1496d0d67c79bd21a30
|
Python
|
Andyeexx/-algorithm015
|
/Week_06/621-task-scheduler.py
|
UTF-8
| 238
| 2.578125
| 3
|
[] |
no_license
|
def leastInterval(self, tasks: List[str], n: int) -> int:
mp=collections.Counter(tasks)
all_task=list(mp.values())
all_task.sort()
return max(len(tasks), (all_task[-1]-1)*(n+1)+all_task.count(all_task[-1]))
| true
|
8e83c3085dfd0fe041e51613cca55e3254e48266
|
Python
|
VitamintK/AlgorithmProblems
|
/leetcode/b38/c.py
|
UTF-8
| 850
| 3.03125
| 3
|
[] |
no_license
|
from collections import defaultdict
class Solution:
def countSubstrings(self, s: str, t: str) -> int:
tstrings = defaultdict(int)
for i in range(len(t)):
for j in range(i+1, len(t)+1):
tp = t[i:j]
# for k in range(len(tp)):
# tstrings.add(tp[:k]+tp[k+1:])
tstrings[tp]+=1
ans = 0
for i in range(len(s)):
for j in range(i+1, len(s)+1):
sp = s[i:j]
for k in range(len(sp)):
for letter in 'abcdefghijklmnopqrstuvwxyz':
if letter == sp[k]:
continue
mutant = sp[:k]+letter+sp[k+1:]
if mutant in tstrings:
ans += tstrings[mutant]
return ans
| true
|
f638f245473224874feb74cba595299153f9a54e
|
Python
|
samudero/PyNdu001
|
/RootFinding.py
|
UTF-8
| 1,828
| 4.03125
| 4
|
[] |
no_license
|
# Root Finding [202]
"""
Created on Fri 23 Mar 2018
@author: pandus
"""
# ------------------------ Using bisection algorithm from scipy
from scipy.optimize import bisect
def f(x):
"""returns f(x) = x^3 - 2x^2. Has roots at x = 0 (double root) and x = 2"""
return x ** 3 - 2 * x ** 2
# Main program starts here
x = bisect(f, a = 1.5, b = 3, xtol = 1e-6)
print("Root x is approx. x = {:14.12g}." .format(x))
print("The error is less than 1e-6.")
print("The exact error is {}." .format(2 - x))
# ------------------------ The Newton method
from scipy.optimize import newton
def f(x):
"""returns f(x) = x^3 - 2x^2. Has roots at x = 0 (double root) and x = 2"""
return x ** 3 - 2 * x ** 2
# main program starts here
x = newton(f, x0 = 1.6)
print("Root x is approx. x = {:14.12g}." .format(x))
print("The error is less than 1e-6.")
print("The exact error is {}." .format(2 - x))
# ------------------------ Using BrentQ algorithm from scipy
from scipy.optimize import brentq
def f(x):
"""returns f(x) = x^3 - 2x^2. Has roots at x = 0 (double root) and x = 2"""
return x ** 3 - 2 * x ** 2
# main program starts here
x = brentq(f, a = 1.5, b = 3, xtol = 1e-6)
print("Root x is approx. x = {:14.12g}." .format(x))
print("The error is less than 1e-6.")
print("The exact error is {}." .format(2 - x))
# ------------------------ Using fsolve algorithm from scipy
from scipy.optimize import fsolve # multidimensional solver
def f(x):
"""returns f(x) = x^3 - 2x^2. Has roots at x = 0 (double root) and x = 2"""
return x ** 3 - 2 * x ** 2
# main program starts here
x = fsolve(f, x0 = [1.6])
print("Root x is approx. x = {}." .format(x))
print("The error is less than 1e-6.")
print("The exact error is {}." .format(2 - x[0]))
| true
|
5f7153e6f48499a0f2b5be730e9490f333e541eb
|
Python
|
cleuton/pythondrops
|
/curso/licao5/semexcept.1.py
|
UTF-8
| 91
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
try:
a=open('arquivo.txt')
print(a.read())
except:
print('Arquivo inexistente')
| true
|
87d66b4a6ab3057f9b758222aedb7add9e10c6eb
|
Python
|
uhh-lt/taxi
|
/graph_pruning/methods/m_hierarchy.py
|
UTF-8
| 7,296
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
import networkx as nx
import methods.util.write_graph as write_graph
import methods.util.util as util
from .zhenv5.remove_cycle_edges_by_hierarchy_greedy import scc_based_to_remove_cycle_edges_iterately
from .zhenv5.remove_cycle_edges_by_hierarchy_BF import remove_cycle_edges_BF_iterately
from .zhenv5.remove_cycle_edges_by_hierarchy_voting import remove_cycle_edges_heuristic
SUPPORTED_SCORE_METHODS = ["pagerank", "socialagony", "trueskill"]
SUPPORTED_RANKING_METHODS = ["greedy", "forward", "backward", "voting"]
g = nx.DiGraph()
def prepare(line):
g.add_edge(line[1], line[2])
def do(filename_out, delimiter, mode, gephi_out, filename_in=None):
inputs = mode.split("_")
edges_to_remove = None
if len(inputs) < 2:
raise Exception(
"No score method provided (e.g. 'hierarchy_pagerank_voting'). Supported: ensemble, pagerank, socialagony, trueskill")
if len(inputs) == 2:
raise Exception(
"Score method '%s' not supported." % inputs[1])
if len(inputs) != 2 and (len(inputs) < 3 or inputs[2] not in SUPPORTED_RANKING_METHODS):
raise Exception("Ranking method '%s' not supported. Supported: %s" % (inputs[2], SUPPORTED_RANKING_METHODS))
score_name = inputs[1]
ranking = inputs[2]
print("Score method: %s" % score_name)
print("Ranking method: %s" % ranking)
score_names = SUPPORTED_SCORE_METHODS if score_name == "ensemble" else [score_name]
votings = []
for mode in score_names:
print("--------------")
print("Starting mode: %s" % mode)
players_score_dict = computing_hierarchy(filename_in, mode, filename_in)
edges_to_remove, e1, e2, e3, e4 = compute_ranking(ranking, mode, players_score_dict)
if e1 is not None:
votings.append(set(e1))
if e2 is not None:
votings.append(set(e2))
if e3 is not None:
votings.append(set(e3))
print("Mode '%s' recommends to remove %s edges." % (mode, len(edges_to_remove)))
if score_name == "ensemble" and ranking == "voting":
edges_to_remove = remove_cycle_edges_by_voting(votings)
print("Remove edges...")
cycles_removed = util.remove_edges_from_network_graph(g, edges_to_remove)
write_graph.network_graph(filename_out, g, gephi_out=gephi_out, delimiter=delimiter)
return cycles_removed
def dir_tail_name(file_name):
import os.path
dir_name = os.path.dirname(file_name)
head, tail = os.path.split(file_name)
print("dir name: %s, file_name: %s" % (dir_name, tail))
return dir_name, tail
def get_edges_voting_scores(set_edges_list):
total_edges = set()
for edges in set_edges_list:
total_edges = total_edges | edges
edges_score = {}
for e in total_edges:
edges_score[e] = len(filter(lambda x: e in x, set_edges_list))
return edges_score
def compute_ranking(ranking, score_name, players_score_dict):
edges_to_remove, remove_greedy, remove_forward, remove_backward, remove_voting = None, None, None, None, None
if ranking == "voting" or ranking == "greedy":
print("Compute edges to remove with ranking 'greedy'.")
remove_greedy = scc_based_to_remove_cycle_edges_iterately(g.copy(), players_score_dict)
edges_to_remove = remove_greedy
if ranking == "voting" or ranking == "forward":
print("Compute edges to remove with ranking 'forward'.")
remove_forward = remove_cycle_edges_BF_iterately(g.copy(), players_score_dict, is_Forward=True,
score_name=score_name)
edges_to_remove = remove_forward
if ranking == "voting" or ranking == "backward":
print("Compute edges to remove with ranking 'backward'.")
remove_backward = remove_cycle_edges_BF_iterately(g.copy(), players_score_dict, is_Forward=False,
score_name=score_name)
edges_to_remove = remove_backward
if ranking == "voting":
print("Compute edges to remove with ranking 'voting'.")
remove_voting = remove_cycle_edges_by_voting([set(remove_greedy), set(remove_forward), set(remove_backward)])
edges_to_remove = remove_voting
return edges_to_remove, remove_greedy, remove_forward, remove_backward, remove_voting
def remove_cycle_edges_strategies(graph_file, nodes_score_dict, score_name="socialagony", nodetype=int):
# greedy
cg = g.copy()
e1 = scc_based_to_remove_cycle_edges_iterately(cg, nodes_score_dict)
# forward
cg = g.copy()
e2 = remove_cycle_edges_BF_iterately(cg, nodes_score_dict, is_Forward=True, score_name=score_name)
# backward
cg = g.copy()
e3 = remove_cycle_edges_BF_iterately(cg, nodes_score_dict, is_Forward=False, score_name=score_name)
return e1, e2, e3
def remove_cycle_edges_by_voting(set_edges_list, nodetype=int):
edges_score = get_edges_voting_scores(set_edges_list)
e = remove_cycle_edges_heuristic(g.copy(), edges_score, nodetype=nodetype)
return e
def remove_cycle_edges_by_hierarchy(graph_file, nodes_score_dict, score_name="socialagony"):
e1, e2, e3 = remove_cycle_edges_strategies(graph_file, nodes_score_dict, score_name=score_name)
e4 = remove_cycle_edges_by_voting([set(e1), set(e2), set(e3)])
return e1, e2, e3, e4
def computing_hierarchy(graph_file, players_score_func_name, filename_in=None):
import os.path
if players_score_func_name == "socialagony":
# agony_file = graph_file[:len(graph_file)-6] + "_socialagony.txt"
# from compute_social_agony import compute_social_agony
# players = compute_social_agony(graph_file,agony_path = "agony/agony ")
if False:
# if os.path.isfile(agony_file):
print("load pre-computed socialagony from: %s" % agony_file)
players = read_dict_from_file(agony_file)
else:
print("start computing socialagony...")
from zhenv5.compute_social_agony import compute_social_agony
players = compute_social_agony(graph_file)
return players
if players_score_func_name == "pagerank":
# print("computing pagerank...")
players = nx.pagerank(g.copy(), alpha=0.85)
return players
elif players_score_func_name == "trueskill":
output_file = graph_file[:len(graph_file) - 6] + "_trueskill.txt"
output_file_2 = graph_file[:len(graph_file) - 6] + "_trueskill.pkl"
# from true_skill import graphbased_trueskill
# players = graphbased_trueskill(g)
# from file_io import write_dict_to_file
# write_dict_to_file(players,output_file)
'''
if os.path.isfile(output_file):
print("load pre-computed trueskill from: %s" % output_file)
players = read_dict_from_file(output_file,key_type = int, value_type = float)
elif os.path.isfile(output_file_2):
print("load pre-computed trueskill from: %s" % output_file_2)
players = read_from_pickle(output_file_2)
'''
if True:
print("start computing trueskill...")
from zhenv5.true_skill import graphbased_trueskill
players = graphbased_trueskill(g.copy())
return players
| true
|
9171964e9f9946b2078d7435ff1148b91dd64153
|
Python
|
degerli/qcc
|
/src/lib/tensor_test.py
|
UTF-8
| 1,121
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# python3
from absl.testing import absltest
from src.lib import tensor
class TensorTest(absltest.TestCase):
def test_pow(self):
t = tensor.Tensor([1.0, 1.0])
self.assertLen(t.shape, 1)
self.assertEqual(t.shape[0], 2)
t0 = t.kpow(0.0)
self.assertEqual(t0, 1.0)
t1 = t.kpow(1)
self.assertLen(t1.shape, 1)
self.assertEqual(t1.shape[0], 2)
t2 = t.kpow(2)
self.assertLen(t2.shape, 1)
self.assertEqual(t2.shape[0], 4)
m = tensor.Tensor([[1.0, 1.0], [1.0, 1.0]])
self.assertLen(m.shape, 2)
self.assertEqual(m.shape[0], 2)
self.assertEqual(m.shape[1], 2)
m0 = m.kpow(0.0)
self.assertEqual(m0, 1.0)
m1 = m.kpow(1)
self.assertLen(m1.shape, 2)
self.assertEqual(m1.shape[0], 2)
self.assertEqual(m1.shape[1], 2)
m2 = m.kpow(2)
self.assertLen(m2.shape, 2)
self.assertEqual(m2.shape[0], 4)
self.assertEqual(m2.shape[1], 4)
def test_hermitian(self):
t = tensor.Tensor([[2.0, 0.0], [0.0, 2.0]])
self.assertTrue(t.is_hermitian())
self.assertFalse(t.is_unitary())
if __name__ == '__main__':
absltest.main()
| true
|
be7524f0580fb95cb7e2ef363032afdacea6ca7a
|
Python
|
dodoyuan/leetcode_python
|
/剑指offer/51_构建乘积数组.py
|
UTF-8
| 524
| 3.234375
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
class Solution:
def multiply(self, A):
# write code here
if len(A) < 2:
return A
length = len(A)
B = [1 for _ in range(length)]
tempB = [1 for _ in range(length)]
for i in xrange(1, length):
B[i] = B[i-1] * A[i-1]
for j in xrange(length-2, -1, -1):
tempB[j] = tempB[j+1] * A[j+1]
for i in xrange(length):
B[i] *= tempB[i]
return B
s = Solution()
print s.multiply([1,2,3,4,5])
| true
|
551a9b9faee8dc5a81bff74fd55caa31ad70e748
|
Python
|
yashpatel3645/Python-Apps
|
/Youtube Video Downloder/Single Video Downloader.py
|
UTF-8
| 426
| 2.609375
| 3
|
[] |
no_license
|
from pytube import YouTube
Save_Path = "D:/Youtube Downloaded Video"
link = input("Enter the Link : ")
try:
yt = YouTube(link)
except:
print("Connection Problem....!!!")
# print(yt.streams.all())
mp4file = yt.streams.get_highest_resolution()
print("Please Wait Your video is being downloading.......")
try:
mp4file.download(Save_Path)
except:
print('There will be some error.')
print('Task Completed!')
| true
|
85876c896946f1f1eb43c5b5cc5241a637a20108
|
Python
|
wankhede04/python-coding-algorithm
|
/arrays/fill-blanks.py
|
UTF-8
| 298
| 3.15625
| 3
|
[] |
no_license
|
array1 = [1,None,2,3,None,None,5,None]
def solution(nums):
valid = 0
res = []
for i in nums:
if i is not None:
res.append(i)
valid = i
else:
res.append(valid)
return res
print(solution(array1))
| true
|
f55c1e363c4d64728bbcc8b7544aef56a268519a
|
Python
|
oikkoikk/ALOHA
|
/important_problems/1541_lost_bracket.py
|
UTF-8
| 539
| 3.6875
| 4
|
[] |
no_license
|
string = input().split('-') # -를 기준으로 식 분할
# ex. 60-30+29-90-10+80 -> ['60', '30+29', '90', '10+80']
# 따라서 맨 처음 원소만 더해주고, 나머지는 계속 빼주면 된다
num = []
result = 0
for token in string:
tempSum = 0
if token.find('+'):
pl = token.split('+') # + 연산 직접!
for temp in pl:
tempSum += int(temp)
else:
tempSum = int(token)
num.append(tempSum)
result += num[0]
for i in range(1, len(num)):
result -= num[i]
print(result)
| true
|
b99e6948b3ae4e59344c6942524e8d9071311d76
|
Python
|
abiraja2004/IntSys-Sentiment-Summary
|
/bert_finetune/BERTEval.py
|
UTF-8
| 3,570
| 2.578125
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm, trange
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig
from .finetune_utils import InputExample, InputFeatures, convert_examples_to_features
class BERTpredictor():
def __init__(self, config, sentences):
self.model = config['BERT_finetune_model']
self.batch_size = config['BERT_batchsize']
self.device = config['device']
self.sentences = [s.capitalize().replace(' .', '.') for s in sentences]
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.length_penalty_range = config['opt_dict']['length_penalty_range']
self.length_range = config['opt_dict']['length_range']
self.length_penalty_order = config['length_penalty_order']
def preprocess(self, candidate_ixs):
cand_reviews = []
for cand_ix, cand in enumerate(candidate_ixs):
cand_rev = []
for ix in cand:
cand_rev.append(self.sentences[ix])
full_review = ' '.join(cand_rev)
rev_example = InputExample(cand_ix, full_review, label=0)
cand_reviews.append(rev_example)
features = convert_examples_to_features(cand_reviews, [None], 512, self.tokenizer, 'regression')
return features
def evaluate(self, candidate_ixs):
predict_features = self.preprocess(candidate_ixs)
all_input_ids = torch.tensor([f.input_ids for f in predict_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in predict_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in predict_features], dtype=torch.long)
predict_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
sampler = SequentialSampler(predict_data)
dataloader = DataLoader(predict_data, sampler=sampler, batch_size=self.batch_size)
self.model.to(self.device)
self.model.eval()
predictions = []
for batch in dataloader:
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids = batch
with torch.no_grad():
preds = self.model(input_ids, segment_ids, input_mask, labels=None)
if len(predictions) == 0:
predictions.append(preds.detach().cpu().numpy())
else:
predictions[0] = np.append(
predictions[0], preds.detach().cpu().numpy(), axis=0)
predictions = predictions[0].flatten()
for i in range(len(candidate_ixs)):
predictions[i] *= self.__get_length_penalty_factor(len(candidate_ixs[i]), self.length_penalty_order)
return predictions
def __get_length_penalty_factor(self, length, order):
'''
len_frac = (length - self.length_range[0])/(self.length_range[1] - self.length_range[0])
return self.length_penalty_range[0] + (1-len_frac)*(self.length_penalty_range[1] - self.length_penalty_range[0])
'''
min_len = self.length_range[0]
max_len = self.length_range[1]
min_weight = self.length_penalty_range[0]
max_weight = self.length_penalty_range[1]
return min_weight + (1 - ((length - min_len)/(max_len - min_len))**order)*(max_weight - min_weight)
| true
|
174179c73c6887743f8c70a9f0d80376b04079c7
|
Python
|
LaurenM2918/362-Sprint_Final
|
/Collab_Filter.py
|
UTF-8
| 7,884
| 3.265625
| 3
|
[] |
no_license
|
# =============================DATA PREPARATION ==============================
# ============================================================================
# Imports dataset
import numpy as np
import pandas as pd
# Load dataset
data = pd.read_csv('tmdb_5000_credits.csv')
data.head()
# Load second dataset
data2 = pd.read_csv('tmdb_5000_movies.csv')
data2.head()
# Merges datasets on id column
data.columns = ['id', 'title', 'cast', 'crew']
data2 = data2.merge(data, on = 'id')
data2.head()
# Using IMDB weighted rating
# Mean rating for listed movies
avg = data2['vote_average'].mean()
# Minimum threshold for votes listed
min_listed_votes = data2['vote_count'].quantile(0.9)
# Getting the size of the listed movies based on requirement
movies_list = data2.copy().loc[data2['vote_count'] >= min_listed_votes]
# Define the metric function for each qualified movie
def weighted_rating(x, min_listed_votes=min_listed_votes, avg=avg ):
v = x['vote_count']
R = x['vote_average']
# Caclculation based on IMDB formula
return (v/(v+min_listed_votes)*R) + (min_listed_votes/(min_listed_votes+v)*avg)
#Defines new feature score and calculate its value with the weighted rating
# Score is computed with a minimal tolerance for vote counts
movies_list['score'] = movies_list.apply(weighted_rating, axis=1)
# Sort the movie list
movies_list = movies_list.sort_values('score', ascending = False)
# =============================COLLABORATIVE FILTER ==========================
# ============================================================================
import matplotlib.pyplot as plt
# Visualize trending movies
pop = data2.sort_values('popularity', ascending = False)
# plt.figure(figsize=(12,4))
# plt.barh(pop['title_x'].head(6), pop['popularity'].head(6), align='center', color='r')
# plt.gca().invert_yaxis()
# plt.xlabel('Popularity')
# plt.title('Popular Trending Movies')
# High Budget Movies
# budget = data2.sort_values('budget', ascending = False)
# plt.figure(figsize=(12,4))
# plt.barh(budget['title_x'].head(6), budget['budget'].head(6), align='center', color='b')
# plt.gca().invert_yaxis()
# plt.xlabel('Popularity')
# plt.title('High Budget Movies')
# Machine Learning stuff
movies_list.drop(['title_y'], axis=1, inplace=True)
# movies_list.shape
# print(data2['overview'].head(10))
from sklearn.feature_extraction.text import TfidfVectorizer
# Define TF-IDF object and remove stop words
tfidf = TfidfVectorizer(stop_words='english')
# Replaces NaN with empty string
data2['overview'] = data2['overview'].fillna('')
# Construct TF-IDF matrix by fitting and transforming data
tfidf_matrix = tfidf.fit_transform(data2['overview'])
# import Linear kernel
from sklearn.metrics.pairwise import linear_kernel
# Compute cosine similarity on the tfidf matrix
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
# Construct reverse map of indices and movie titles
indices = pd.Series(data2.index, index = data2['title_x']).drop_duplicates()
# =============================USER FUNCTIONS ================================
# ============================================================================
# Function takes in movie title as input and outputs similar movies
def GetRecommendations(title, cosine_sim = cosine_sim):
# Find index of the movie that matches the title
idx = indices[title]
# Get pairwise similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key = lambda x: x[1], reverse = True)
# Get scores of 10 most similar movies
sim_scores = sim_scores[1:11]
# Get movie indices
movie_indices = [i[0] for i in sim_scores]
# Return top 10 most similar movies
return data2['title_x'].iloc[movie_indices]
#===========================================
# Add Error Handling
# Do not let user add duplicates
#===========================================
# # Main Page
# import sys
# main = ''
# title = ''
# # userList = ['Inception']
# userList = []
# List = movies_list[['title_x', 'vote_average', 'score']]
# # Work on exit condition
# while True:
# # Precondition: User's List has to be empty for new user
# if bool(userList) == False:
# # First time user is prompted add a movie to their list
# temp = 0
# while True:
# print(f'Select a Movie to begin or type New for more options\n=============================')
# print(f'Popular Movies:\n\n {List[temp:temp + 9].to_string(index=False)}')
# title = input('\nType in Movie title to add to list: ')
# # Updates displayed movies
# if title.lower() == 'new':
# temp += 10
# elif title.lower() != 'new' and (data2['title_x'] == title).any():
# userList.append(title)
# break
# elif title.lower() == 'exit':
# raise SystemExit
#
# while True:
# # Precondition: User List has to be filled
# import random
#
# print('========== Main Menu ==========\n==============================\n')
# if len(userList) != 0:
# # Randomly recommends based on list size
# idx = random.randrange(0, len(userList))
# print(f'Movies based on your list:\n\n {GetRecommendations(userList[idx]).to_string(index=False)}')
# # print(f'Movies based on your list:\n\n {GetRecommendations(userList[0]).to_string(index=False)}')
# else:
# print(f'Popular Movies: \n {print(List[0:9].to_string(index=False))}')
# print(f'\nYour Movie List: {userList}\n')
# print('Type the number of the option')
# print('1:Search 2:Delete 3:Insert 4:Exit')
# main = int(input('$\: '))
#
# # Search algorithm with embedded loop
# if main == 1:
# while True:
# title = input('\nType in Movie title: ')
# user = GetRecommendations(title)
# print(f'Recommended based on your Search:\n {user.to_string(index=False)}\n')
# print('Type the number of the option')
# print('1. Add to list 2. Search 3. Main Menu\n')
# option = int(input('$\: '))
# # User adds preferred title to their list
# if option == 1:
# title = input('\nType in Movie title to add to list: ')
# # Goes back to main menu if user adds title
# if (data2['title_x'] == title).any():
# userList.append(title)
# break
# else:
# print('Invalid Title')
# # Continue searching
# elif option == 2:
# print('...')
# # Return to Main Menu
# elif option == 3:
# print('Returning...')
# break
# # Delete title from list
# elif main == 2:
# print(f'\nYour Movie List: {userList}\n')
# title = input('Type movie name to delete from list: ')
# idx = userList.index(title)
# del userList[idx]
# # Add title from recommended list
# elif main == 3:
# # If list is empty, show popular movies
# if len(userList) != 0:
# title = input('\nType in Movie title to add to list: ')
# if (data2['title_x'] == title).any():
# userList.append(title)
# else:
# print(f'Empty List... ')
# # Logout from main menu
# elif main == 4:
# print('Logging out...')
# raise SystemExit
# else:
# raise SystemError
| true
|
c6f5c5c93f30ea2c08ee59a85f9f8b2a34f31c93
|
Python
|
Gitlittlerubbish/SNS
|
/exe9_1.py
|
UTF-8
| 98
| 2.671875
| 3
|
[] |
no_license
|
#! usr/bin/python3
import numpy as np
even_num_array = np.arange(0, 12, 2)
print(even_num_array)
| true
|
6ede7cd44626d1270c8bd7dfc64591f8fbc9fca4
|
Python
|
treejames/mt-assignments
|
/hw2/decoder/decode_lagrange
|
UTF-8
| 4,961
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import optparse
import sys
import models
from collections import namedtuple
# Helper adapted from grade.py
def bitmap(sequence):
""" Generate a coverage bitmap for a sequence of indexes """
return reduce(lambda x,y: x|y, map(lambda i: long('1'+'0'*i,2), sequence), 0)
optparser = optparse.OptionParser()
optparser.add_option("-i", "--input", dest="input", default="data/input", help="File containing sentences to translate (default=data/input)")
optparser.add_option("-t", "--translation-model", dest="tm", default="data/tm", help="File containing translation model (default=data/tm)")
optparser.add_option("-l", "--language-model", dest="lm", default="data/lm", help="File containing ARPA-format language model (default=data/lm)")
optparser.add_option("-n", "--num_sentences", dest="num_sents", default=sys.maxint, type="int", help="Number of sentences to decode (default=no limit)")
optparser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode (default=off)")
opts = optparser.parse_args()[0]
# TM contains tuples of words
tm = models.TM(opts.tm, opts.k)
lm = models.LM(opts.lm)
french = [tuple(line.strip().split()) for line in open(opts.input).readlines()[:opts.num_sents]]
def extract_english(h):
return "" if h.predecessor is None else "%s%s " % (extract_english(h.predecessor), h.phrase[2].english)
# tm should translate unknown words as-is with probability 1
for word in set(sum(french,())):
if (word,) not in tm:
tm[(word,)] = [models.phrase(word, 0.0)]
sys.stderr.write("Decoding %s...\n" % (opts.input,))
# Some helpers
#
def delta(t, s):
# This is the distortion function
return math.abs(t + 1 - s)
def yi(y, length):
# Given a translation, calculate values of y[i]
yi = [0] * length
current = y
while(current.predecessor != None):
s, t, phrase = current.phrase
for val in range(s, t + 1):
yi[val] += 1
current = current.predecessor
return yi
def L(u, y):
# calculate the dual value for a given u, y
return y.logprob +
# Translated => N, number of words translated in the source
# last_span => (l, m) last span of words translated in the last part. l is the index of first word, m is index of last
# end_prev => End index of previous phrase
# phrase => (s, t, phrase), current phrase
# predecessor => previous y
# Constraints Met => Bitmap |C| of constraints met
y = namedtuple("y", "logprob", "lm_state", "translated", "last_span", "end_prev", "phrase", "predecessor", "bc")
def argmax(u, french):
# Use viterbi to find best y given u
ih = y(0.0, lm.begin(), 0, (0, 0), 0, None, None, 0)
current = ih
while current.translated < len(french):
for i in range(0, len(french)):
for j in range(i+1, len(french) + 1):
if french[i:j] in lm:
translated = current.translated + (j - i)
for phrase in tm[french_sentence[i:j]]:
logprob = current.logprob + phrase.logprob
current_lm_state = current.lm_state
for word in phrase.english.split():
(current_lm_state, word_logprob) = lm.score(current_lm_state, word)
logprob += word_logprob
logprob += lm.end(current_lm_state) if j == len(french) else 0.0
current = y(logprob, current_lm_state, translated, (i, j), 0, (i, j, phrase), current, 0)
return ih
for french_sentence in french:
N = len(french_sentence)
K = 10
G = 3
alpha = 0.1 # TODO: calculate using provided step formula
epsilon = 0.002
u = [0] * N
C = set() # Constraints
yOptimal = None
exitRound = False
while not exitRound:
converged = False
optimizeC = False
t = 0
bestL, secondBestL, bestT, secondBestT = 0, 0, 0, 0
while not converged:
# Greedily Find best y
y_star = argmax(u, french_sentence)
yi_star = yi(y_star, N)
if all(el==1 for el in yi_star):
exitRound = True
yOptimal = y_star
else:
for i in range(0, N):
u[i] = u[i] - alpha * (yi_star[i] - 1)
# Check convergence
converged = (float(bestL) - secondBestL) / (bestT - secondBestT) < epsilon
# Increment round id
t += 1
if exitRound:
break;
# Were' in the constraints case
sys.stderr.write("Entering constraint optimization!\n"
icounts = [0] * N
G = set()
for r in range(0, K):
sys.stderr.write("Constraint optimizing round %i\n" % r)
# Find best y
y_star = argmax(u, french_sentence)
yi_star = yi(y_star, N)
if all(el==1 for el in yi_star):
exitRound = True
yOptimal = y_star
break;
for yi_value in yi_star:
if yi_value != 1:
passed = False
if passed:
optimizeC = False
break;
if exitRound:
break;
# Optimize C
C = C.union(G)
# Output the sentence
print extract_english(yOptimal)
| true
|
e9f5bb1f47e00fe1d70f7314c3ac623fb2ccc941
|
Python
|
Pro4tech/Machine-Learning
|
/Basics/import.py
|
UTF-8
| 351
| 2.84375
| 3
|
[] |
no_license
|
import tensorflow as tf
import cv2
import matplotlib as plt
from time import sleep #Sleep Function is seletively being imported from the time Library
for i in range(5):
print(i)
sleep(5) #delay of 5 seconds
print(tf.__version__) #Version Check
print(cv2.__version__) #Version Check
print(plt.__version__) #Version Check
| true
|
daf9eb0deb94ea7d813c2a3f33a1896c52d18488
|
Python
|
wangzi2000/factor
|
/UMD/coding/王俊杰-coding-1.py
|
UTF-8
| 7,718
| 2.65625
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
#导入文件
file = pd.read_csv('e658fe9354eaef36.csv', sep=',')
df = pd.DataFrame(file)
#df1 = df.head(30000)
df1 = df
dict_date = list(set((df1['date'].values)))
dict_date.sort()
#df为可用数据集 -- 未处理缺失值
df2 = df1[['date','PRC','PERMNO',"RET","SHROUT"]].set_index('date')
#价格为正
df2['PRC_real'] = abs(df2['PRC'])
df2['date_real'] = abs(df2.index)
#计算市值
df2["market_cap"] = df2['PRC_real']*df2["SHROUT"]
#删掉PRC行
df3 = df2.drop('PRC',axis = 1)
# 删除RET中有字母的行,,df0为准备好的数据
df0 = df3[~df3['RET'].str.contains("[a-zA-Z]").fillna(False)]
##主程序
mom_eq = {}
mom_w = {}
for k in range(22,60):
#第k月
target_date = dict_date[k]
df_target_date = df0.loc[target_date]
df_target_date['avg_return'] = np.nan
df_target_date = df_target_date[~(df_target_date['RET'].isnull())]
df_target_date = df_target_date[~(df_target_date['PRC_real'].isnull())]
for stk in df_target_date["PERMNO"].values:
return_avg_12 = 1
for i in range(2,14):
flag = 0
tip = 0
if (k - i >= 0):
# bug: 如果有些股票不存在对应的天数怎么办?
# 例如13653股票没有19591130的条目
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - i]):
if (df0[df0.PERMNO == stk].RET[dict_date[k - i]] != np.nan):
return_avg_12 = return_avg_12*(float(df0[df0.PERMNO == stk].RET[dict_date[k - i]])+ 1)
else:
flag = 1
if flag == 0:
df_target_date.loc[df_target_date.PERMNO == stk, 'avg_return' ] = return_avg_12 - 1
# #按照return排序
p_30 = int(len(df_target_date)*0.3)
df_L = df_target_date.sort_values(by = 'avg_return').head(p_30)
df_H = df_target_date.sort_values(by = 'avg_return').tail(p_30)
#按照市值排列,取中位数
p_50 = int(len(df_target_date)/2)
df_S = df_target_date.sort_values(by = 'market_cap').head(p_50)
n_left = len(df_target_date) - p_50
df_B = df_target_date.sort_values(by = 'market_cap').tail(n_left)
# 构建四个portfolio: SL,SH,BL,BH
df_SL = pd.merge(df_S, df_L)
df_SH = pd.merge(df_S, df_H, how = 'inner').drop_duplicates()
df_BL = pd.merge(df_B, df_L, how = 'inner').drop_duplicates()
df_BH = pd.merge(df_B, df_H, how = 'inner').drop_duplicates()
#若portfolio里面没有资产,默认return = 0
#把RET为nan的都去掉
df_BH =df_BH[~(df_BH['RET'].isnull())]
df_BL =df_BL[~(df_BL['RET'].isnull())]
df_SH =df_SH[~(df_SH['RET'].isnull())]
df_SL =df_SL[~(df_SL['RET'].isnull())]
#等权重法
return_BH_eq = 0
if len(df_BH) == 0:
return_BH_eq = 0
else:
for stk in df_BH['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
#print(float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))
return_BH_eq = return_BH_eq + float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]])
#print(return_BH_eq)
#print(float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))
return_BH_eq = return_BH_eq/len(df_BH)
return_BL_eq = 0
if len(df_BL) == 0:
return_BL_eq = 0
else:
for stk in df_BL['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
return_BL_eq = return_BL_eq + float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]])
return_BL_eq = return_BL_eq/len(df_BL)
return_SL_eq = 0
if len(df_SL) == 0:
return_SL_eq = 0
else:
for stk in df_SL['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
return_SL_eq = return_SL_eq + float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]])
return_SL_eq = return_SL_eq/len(df_SL)
return_SH_eq = 0
if len(df_SH) == 0:
return_SH_eq = 0
else:
for stk in df_SH['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
return_SH_eq = return_SH_eq + float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]])
return_SH_eq = return_SH_eq/len(df_SH)
#加权法
return_BH_w = 0
tot_mark_cap = 0
for stk in df_BH['PERMNO'].values:
tot_mark_cap = tot_mark_cap + float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])
if len(df_BH) == 0:
return_BH_eq = 0
else:
for stk in df_BH['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
return_BH_w = return_BH_w + (float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))*(float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])/tot_mark_cap)
#print(float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))
return_SH_w = 0
tot_mark_cap = 0
for stk in df_SH['PERMNO'].values:
tot_mark_cap = tot_mark_cap + float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])
if len(df_SH) == 0:
return_SH_eq = 0
else:
for stk in df_SH['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
return_SH_w = return_SH_w + (float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))*(float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])/tot_mark_cap)
return_SL_w = 0
tot_mark_cap = 0
for stk in df_SL['PERMNO'].values:
tot_mark_cap = tot_mark_cap + float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])
if len(df_SL) == 0:
return_SL_eq = 0
else:
for stk in df_SL['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
return_SL_w = return_SL_w + (float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))*(float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])/tot_mark_cap)
return_BL_w = 0
tot_mark_cap = 0
for stk in df_BL['PERMNO'].values:
tot_mark_cap = tot_mark_cap + float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])
if len(df_BL) == 0:
return_BL_eq = 0
else:
for stk in df_BL['PERMNO'].values:
if np.any(np.array(df0[df0.PERMNO == stk].index) == dict_date[k - 1]):
if pd.isnull((float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))) == False :
return_BL_w = return_BL_w + (float(df0[df0.PERMNO == stk].RET[dict_date[k - 1]]))*(float(df0[df0.PERMNO == stk].market_cap[dict_date[k]])/tot_mark_cap)
mom_eq[dict_date[k]] = 0.5*((return_SH_eq - return_BH_eq) - 0.5*(return_SL_eq - return_BL_eq))
mom_w[dict_date[k]] = 0.5*((return_SH_w - return_BH_w) - 0.5*(return_SL_w - return_BL_w))
print(mom_eq)
print(mom_w)
| true
|
88abfd42ca93ecca3d50b27e51e005160228d0c3
|
Python
|
AlexeyZavalin/algorithm_python_learn
|
/lesson-1/task-5.py
|
UTF-8
| 170
| 3.515625
| 4
|
[] |
no_license
|
number = int(input("Введите номер буквы в английском алфавите: ")) + 96
symbol = chr(number)
print(f"Ваша буква - {symbol}")
| true
|
4943bd6c69773653b787add67dc40e9e357bdeee
|
Python
|
codecandiescom/TechFramework-1.2
|
/scripts/ftb/Carrier.py
|
UTF-8
| 2,151
| 2.625
| 3
|
[] |
no_license
|
# Carrier
# March 27, 2002
#
# by Evan Light aka sleight42
#
# All rights reserved
# Permission to redistribute this code as part of any other packaging requires
# the explicit permission of the author in advance.
##############################################################################
from Registry import Registry
import ftb.LaunchShip
import ftb.Ship
# TODO: Createa a default launch group for people who don't care
class Carrier( ftb.Ship.Ship):
"A Ship subclass that carries a series of launchers carrying other ships/objects"
def __init__( self, pShip):
ftb.Ship.Ship.__init__( self, pShip)
# TODO: change self.launchers to a Registry
self.launchers = Registry()
def AddLauncher( self, launcherName, launcher):
if( launcherName != None and launcher != None):
self.launchers.Register( launcher, launcherName)
def GetLauncher( self, launcherName):
if( launcherName != None and self.launchers.has_key( launcherName)):
return self.launchers.GetName( launcherName)
def GetLaunchers( self):
return self.launchers
def GetNumLaunches( self, launchName):
"Iterates over all of a Carriers launchers and tallies up the number of a particular Launch aboard"
retval = 0
if( launchName != None):
for launcherName in self.launchers._keyList:
launcher = self.launchers[launcherName]
retval = retval + launcher.GetNumLaunches( launchName)
return retval
def HasMoreLaunches( self, shuttle):
return self.GetNumLaunches( shuttle)
def GetLaunchType( self, launcherName):
return self.launchers.GetName( launcherName).GetLaunchType()
def NextLaunchType( self, launcherName):
return self.launchers.GetName( launcherName).NextLaunchType()
def LaunchShip( self, shuttle, launcherName):
return self.Launchers.GetName( launcherName).LaunchShip( shuttle)
def LaunchShip( self, shuttle, launcherIndex):
return self.Launchers[launcherIndex].LaunchShip( shuttle)
| true
|
f1c5b8942613795a8d72eec5ecf328173e5b8747
|
Python
|
anaypaul/LeetCode
|
/UniqueMorseCodeWords.py
|
UTF-8
| 735
| 3.359375
| 3
|
[] |
no_license
|
class Solution:
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
ll = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
word = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
mydict = {}
for i in range(len(ll)):
mydict[word[i]] = ll[i]
s = set()
for i in range(len(words)):
x = ""
for each in words[i]:
x += mydict[each]
s.add(x)
return len(s)
| true
|
3b72bcf8d60e174c6c798ac835bb5a49a96b8d09
|
Python
|
DaniilHryshko/QA_SELENIUM
|
/Основные методы/exmple.py
|
UTF-8
| 628
| 2.875
| 3
|
[] |
no_license
|
from selenium import webdriver
from time import sleep
import math
browser = webdriver.Chrome()
link = "http://suninjuly.github.io/redirect_accept.html"
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
browser.get(link)
button = browser.find_element_by_tag_name("button.trollface")
button.click()
second = browser.window_handles[1]
browser.switch_to.window(second)
x = browser.find_element_by_css_selector('span[id="input_value"]').text
answer = browser.find_element_by_id('answer')
answer.send_keys(calc(x))
button = browser.find_element_by_css_selector("button.btn")
button.click()
sleep(5)
browser.quit()
| true
|
8fe64673805e64179e85fe21e23ec7fc56881442
|
Python
|
CMEI-BD/ml
|
/python-ml/hello.py
|
UTF-8
| 976
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 11:14:36 2018
@author: meicanhua
"""
import tensorflow as tf
import input_data
sess = tf.Session()
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x:batch[0], y_:batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.global_variables_initializer())
print accuracy.eval(feed_dict={x:mnist.test.images, y_: mnist.test.lables})
| true
|
e3623b8a82b610a57bbb3c2de62d420327591ec1
|
Python
|
MDBarbier/Python
|
/machine_learning/ML-AZ/Part 1 - Data Preprocessing/DataPreProcessingTemplate.py
|
UTF-8
| 950
| 3.328125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Data Preprocessing Template
- Created on Friday Dec 6 2019 @author: matth
"""
#Import required libraries
import pandas as pd
from sklearn.model_selection import train_test_split
#### Parameters
columnsToRemove = -1
dependantVariableVector = 3
datafilePath = 'Data.csv'
testSize = 0.2
randomState = 0
#### Importing and preparing data
dataset = pd.read_csv(datafilePath)
X = dataset.iloc[:, :columnsToRemove].values
Y = dataset.iloc[:, dependantVariableVector].values
##### Splitting training data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = testSize, random_state = randomState)
#### Feature scaling
"""sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
#### Tests
assert X_train[0][0] == "Germany" and X_train[0][1] == 40.0, "Unexpected value in first element"
assert len(X_train) == 8, "Unexpected number of items in the training data"
| true
|
4b1e7efb58938740aa5c3b732e9cb4dbe420cc2a
|
Python
|
hufi1/PassGen-with-Tkinter
|
/PassGen.py
|
UTF-8
| 8,778
| 2.78125
| 3
|
[] |
no_license
|
#####################################################
### hier entsteht mein eigener Passwort-Generator ###
### Hufi ###
### 21/06/2020 ###
#####################################################
######################################################################
### IMPORTE ###
import tkinter
from tkinter import *
from PIL import Image, ImageTk
import random
import pyperclip
######################################################################
### GUI ###
root = tkinter.Tk()
root.title("Password Generator")
root.geometry("600x150")
root.iconbitmap("images/PassGen Icon.ico")
root.configure(bg="grey")
######################################################################
### BILDER ###
generateImg = Image.open("images/generateButton.png")
resGenImg = generateImg.resize((90, 30),Image.ANTIALIAS)
resizedGenerate = ImageTk.PhotoImage(resGenImg)
zwischenImg = Image.open("images/zwischenablageButton.png")
resZwiImg = zwischenImg.resize((90, 30), Image.ANTIALIAS)
resizedZwi = ImageTk.PhotoImage(resZwiImg)
beendenImg = Image.open("images/beendenButton.png")
resBeeImg = beendenImg.resize ((90, 30), Image.ANTIALIAS)
resizedBee = ImageTk.PhotoImage(resBeeImg)
######################################################################
### VARIABLEN ###
uppercase_list = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
lowercase_list = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
zahlen_list = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
sonderzeichen_list = ["#", "+", "*", "~", "?", "&", "%", "$", "§", "!", "@", ">", "<", ",", ".", ";", ":", "_", "-"]
### diese Vars ermöglichen die Inbetriebnahme der Radiobuttons und Checkboxes
uppercasecheckvar = BooleanVar()
lowercasecheckvar = BooleanVar() #Boolean, also True/False-Werte
SZvar = BooleanVar()
zahlencheckvar = BooleanVar()
stellenvariable = IntVar() #Intvar, um 8, 10 oder 12 einzutragen und damit die Längedes PWs festzusetzen.
password = "-" ##variable,um Passwort als String zu setzen, Bindestrich wird nachher gelöscht.
######################################################################
### FUNKTIONEN FÜR BUTTONS ###
def generate():
pw_ausgabe.delete(0, 40)
laengebestimmen()
pw_ausgabe.insert(0, password1)
labelgen()
def zwischenablage():
finalPW = pw_ausgabe.get()
pyperclip.copy(finalPW)
spam = pyperclip.paste()
labelzwi()
######################################################################
### BUTTONS, LABELS & FELDER ###
passlabel = Label(root, text="Ihr Passwort:", bg="grey").grid(row=0, column=0)
pw_ausgabe = Entry(root, width=20, font=("Times New Roman", 18))
pw_ausgabe.grid(row=0, column=1, columnspan=2)
generateButton = Button(root, bg="grey", image=resizedGenerate, command=generate).grid(row=0, column=4)
zwischenablageButton = Button(root, bg="grey", image=resizedZwi, command=zwischenablage).grid(row=1, column=4)
beendenButton = Button(root, image=resizedBee, bg="grey", command=quit).grid(row=2, column=4)
achterradio = Radiobutton(root, text="8 Stellen", bg="grey", variable=stellenvariable, value=8).grid(row=1, column=0)
zehnerradio = Radiobutton(root, text="10 Stellen", bg="grey", variable=stellenvariable, value=10).grid(row=1, column=1)
zwoelferradio = Radiobutton(root, text="12 Stellen", bg="grey", variable=stellenvariable, value=12).grid(row=1, column=2)
lowercasecheck = Checkbutton(root,text = "inkl. Kleinbuchstaben", bg="grey", variable=lowercasecheckvar).grid(row=2, column=0)
uppercasecheck = Checkbutton(root,text = "inkl. Großbuchstaben", bg="grey", variable=uppercasecheckvar).grid(row=2, column=1)
sonderzeichencheck = Checkbutton(root, text = "inkl. Sonderzeichen", bg="grey", variable=SZvar).grid(row=2, column=2)
zahlencheck = Checkbutton(root,text = "inkl. Zahlen", bg="grey", variable=zahlencheckvar).grid(row=2, column=3)
######################################################################
### FUNKTIONEN ###
### dieser Block ermöglicht, die Listen je nach Auswahl zusammenzuführen.
def dazutun():
global uppercasecheckvar, lowercasecheckvar, SZvar, zahlencheckvar, zahlen_list, sonderzeichen_list, uppercase_list, lowercase_list
if uppercasecheckvar.get() == True and lowercasecheckvar.get() == False and SZvar.get() == False and zahlencheckvar.get() == False:
passlist = uppercase_list
return passlist
elif uppercasecheckvar.get() == True and lowercasecheckvar.get() == True and SZvar.get() == False and zahlencheckvar.get() == False:
passlist = uppercase_list + lowercase_list
return passlist
elif uppercasecheckvar.get() == True and lowercasecheckvar.get() == True and SZvar.get() == True and zahlencheckvar.get() == False:
passlist = uppercase_list + lowercase_list + sonderzeichen_list
return passlist
elif uppercasecheckvar.get() == True and lowercasecheckvar.get() == True and SZvar.get() == False and zahlencheckvar.get() == True:
passlist = uppercase_list + lowercase_list + zahlen_list
return passlist
elif uppercasecheckvar.get() == False and lowercasecheckvar.get() == False and SZvar.get() == True and zahlencheckvar.get() == False:
passlist = sonderzeichen_list
return passlist
elif uppercasecheckvar.get() == False and lowercasecheckvar.get() == True and SZvar.get() == True and zahlencheckvar.get() == False:
passlist = lowercase_list + sonderzeichen_list
return passlist
elif uppercasecheckvar.get() == False and lowercasecheckvar.get() == True and SZvar.get() == True and zahlencheckvar.get() == True:
passlist = lowercase_list + sonderzeichen_list + zahlen_list
return passlist
elif uppercasecheckvar.get() == False and lowercasecheckvar.get() == True and SZvar.get() == False and zahlencheckvar.get() == True:
passlist = lowercase_list + zahlen_list
return passlist
elif uppercasecheckvar.get() == False and lowercasecheckvar.get() == True and SZvar.get() == False and zahlencheckvar.get() == False:
passlist = lowercase_list
return passlist
elif uppercasecheckvar.get() == True and lowercasecheckvar.get() == True and SZvar.get() == True and zahlencheckvar.get() == True:
passlist = uppercase_list + lowercase_list + sonderzeichen_list + zahlen_list
return passlist
elif uppercasecheckvar.get() == False and lowercasecheckvar.get() == False and SZvar.get() == False and zahlencheckvar.get() == True:
passlist = zahlen_list
return passlist
elif uppercasecheckvar.get() == True and lowercasecheckvar.get() == False and SZvar.get() == True and zahlencheckvar.get() == False:
passlist = uppercase_list + sonderzeichen_list
return passlist
elif uppercasecheckvar.get() == True and lowercasecheckvar.get() == False and SZvar.get() == True and zahlencheckvar.get() == True:
passlist = uppercase_list + sonderzeichen_list + zahlen_list
return passlist
elif uppercasecheckvar.get() == False and lowercasecheckvar.get() == False and SZvar.get() == True and zahlencheckvar.get() == True:
passlist = sonderzeichen_list + zahlen_list
return passlist
elif uppercasecheckvar.get() == True and lowercasecheckvar.get() == False and SZvar.get() == False and zahlencheckvar.get() == True:
passlist = uppercase_list + zahlen_list
return passlist
### dieser Block ermöglicht, das Passwort in der gewünschten Länge zu erstellen. eigentlich HAUPTFUNKTION
def laengebestimmen():
global password, password1
passlist= dazutun()
for x in range(0, stellenvariable.get()):
password += random.choice(passlist)
password1 = password[-stellenvariable.get():]
### mit den nächsten 2 Funktionen werden jeweils die Labels generiert, die nach Druck des jeweiligen Buttons ausgegeben werden.
def labelgen():
Ausgabe = Label(root, text="Bitte vergiss nicht, dein Passwort sicher aufzubewahren!", font=("Helvetica 11 bold") , fg="firebrick",bg="grey")
Ausgabe.grid(row=3, column=0, columnspan=4)
def labelzwi():
gespeichertMsg = Label(root, text="Dein Passwort ist in der Zwischenablage.", font=("Helvetica 9"), fg="white", bg="grey")
gespeichertMsg.grid(row=4, column=0, columnspan=4)
######################################################################
### TATSÄCHLICHER RUN ###
root.mainloop()
| true
|
3aba9607d72b0c6d5d01d1dd45f825f8ef7313cb
|
Python
|
JRai-de/ProtectoJuegoPython1
|
/DuckHuntV1.py
|
UTF-8
| 5,001
| 2.859375
| 3
|
[] |
no_license
|
# Librerias Necesarias.
import random
import os
import background
import pygame
VidaJugador = 4
Puntuacion = 00
Animales = ['patito', 'pato1', 'bomb', 'b', 'd']
width = 1000
height = 620
Fps = 15
pygame.init()
pygame.display.set_caption('MataPato')
gameDisplay = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
# Colores
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
background = pygame.image.load('back.jpg')
# game background
font = pygame.font.Font(os.path.join(os.getcwd(), 'comic.ttf'), 42)
score_text = font.render('P : ' + str(Puntuacion), True, (255, 255, 255)) #score display
lives_icon = pygame.image.load('images/white_lives.png')
def productor_fruta(animal):
animal_path = "images/" + animal + ".png"
data[animal] = {
'img': pygame.image.load(animal_path),
'x' : random.randint(100,500),
'y' : 800,
'speed_x': random.randint(-10,10),
'speed_y': random.randint(-80, -60),
'throw': False,
't': 0,
'hit': False,
}
if random.random() >= 0.75:
data[animal]['throw'] = True
else:
data[animal]['throw'] = False
data = {}
for animal in Animales:
productor_fruta(animal)
def hide_cross_lives(x, y):
gameDisplay.blit(pygame.image.load("images/red_lives.png"), (x, y))
font_name = pygame.font.match_font('comic.ttf')
def draw_text(display, text, size, x, y):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
gameDisplay.blit(text_surface, text_rect)
def draw_lives(display, x, y, VidaJugador, image) :
for i in range(VidaJugador) :
img = pygame.image.load(image)
img_rect = img.get_rect()
img_rect.x = int(x + 35 * i)
img_rect.y = y
display.blit(img, img_rect)
def hide_cross_lives(x, y):
gameDisplay.blit(pygame.image.load("images/red_lives.png"), (x, y))
def show_gameover_screen():
gameDisplay.blit(background, (0,0))
draw_text(gameDisplay, "Duck Hunt with Others", 64, width / 2, height / 4)
if not game_over :
draw_text(gameDisplay, "P : " + str(score), 50, width / 2, height / 2)
draw_text(gameDisplay, "Press :/", 24, width / 2, height * 3 / 4)
pygame.display.flip()
waiting = True
while waiting:
clock.tick(Fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYUP:
waiting = False
first_round = True
game_over = True
game_running = True
while game_running:
if game_over:
if first_round:
show_gameover_screen()
first_round = False
game_over = False
VidaJugador = 4
draw_lives(gameDisplay, 690, 5, VidaJugador, 'images/red_lives.png')
score = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
gameDisplay.blit(background, (0, 0))
gameDisplay.blit(score_text, (0, 0))
draw_lives(gameDisplay, 690, 5, VidaJugador, 'images/red_lives.png')
for key, value in data.items():
if value['throw']:
value['x'] += value['speed_x']
value['y'] += value['speed_y']
value['speed_y'] += (1 * value['t'])
value['t'] += 1
if value['y'] <= 800:
gameDisplay.blit(value['img'], (value['x'], value['y']))
else:
productor_fruta(key)
current_position = pygame.mouse.get_pos()
if not value['hit'] and current_position[0] > value['x'] and current_position[0] < value['x'] + 60 \
and current_position[1] > value['y'] and current_position[1] < value['y'] + 60:
if key == 'bomb':
VidaJugador -= 1
if VidaJugador == 0:
hide_cross_lives(690, 15)
elif VidaJugador == 1:
hide_cross_lives(725, 15)
elif VidaJugador == 2:
hide_cross_lives(760, 15)
if VidaJugador < 0:
show_gameover_screen()
game_over = True
half_fruit_path = "images/explosion.png"
else:
half_fruit_path = "images/" + "half_" + key + ".png"
value['img'] = pygame.image.load(half_fruit_path)
value['speed_x'] += 10
if key != 'bomb':
Puntuacion += 1
score_text = font.render('Puntos : ' + str(Puntuacion), True, (255, 255, 255))
value['hit'] = True
else:
productor_fruta(key)
pygame.display.update()
clock.tick(Fps)
pygame.quit()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| true
|
4f7d299be425ddb8126d461abe4c4bc815df16bf
|
Python
|
goudarzi8/boston_housing
|
/exploredata.py
|
UTF-8
| 1,041
| 3.546875
| 4
|
[] |
no_license
|
import pandas as pd
#Importing pandas to use analytical tools
from sklearn.datasets import *
from sklearn.linear_model import LinearRegression
# Applying simple regression analysis
data = pd.read_csv('data/data.csv')
# loading the data file
print("MIN Value for each Attribute is")
print(data.min())
print("First, Seccond and Third Quantile Value for each Attribute is")
print(data.quantile([0.25]))
print("Median Value for each Attribute is")
print(data.median())
print("Third Quantile Value for each Attribute is")
print(data.quantile([0.75]))
print("Max Value for each Attribute is")
print(data.max())
print("Average Value for each Attribute is")
print(data.mean())
print("Standard Deviation Value for each Attribute is")
print(data.std())
prices = data['MEDV']
# loading prices for easier calculations
attributes = data.drop('MEDV', axis = 1)
# Removing MEDV from the data to control them separately
model = LinearRegression()
model.fit(attributes, prices)
print model.__dict__
print model.score(attributes,prices)
| true
|
62381c409a602d23a9ca7bf594bbb20ec45b7d48
|
Python
|
mkgharbi/ST5-EI-Simulation
|
/mainPerformanceSimulation.py
|
UTF-8
| 8,933
| 3.078125
| 3
|
[] |
no_license
|
from Machine import Machine
from SharedFunctions import *
from System import System
from Buffer import *
from indicateurs_de_performance import *
MAXSIMULATIONBUFFERINCREMENTED = 50
def creationSystemCommonBuffer(numberMachine, machineTable, bufferTable):
for i in range(numberMachine-1) :
bufferTable.append(Buffer(Buffer.Type.MIDDLE,1,'Buffer'+str(i+1)))
for j in range(numberMachine):
breakdown_prob = probabilityBreakdownInput(j)
repair_prob = probabilityRepairInput(j)
if (j == 0):
machineTable.append(Machine(breakdown_prob,repair_prob,INPUT_CNT_BUF,bufferTable[j],"Machine1"))
elif (j == numberMachine-1):
machineTable.append(Machine(breakdown_prob,repair_prob,bufferTable[j-1],OUTPUT_CNT_BUF,"Machine"+str(j+1)))
else:
machineTable.append(Machine(breakdown_prob,repair_prob,bufferTable[j-1],bufferTable[j],"Machine"+str(j+1)))
return System(numberMachine, machineTable, bufferTable)
def incrementBufferSize(system):
for bufferElement in system.getBuffers():
bufferElement.incrementCapacity()
def incrementMachineProbability(system,indexInMachineTable ,variableChoice, probabilityValue):
if (variableChoice % 2 == 1):
system.getMachines()[indexInMachineTable].setBreakdownProbability(probabilityValue)
else:
system.getMachines()[indexInMachineTable].setRepairProbability(probabilityValue)
def generateVariableOptions(machineNumber):
possibleOptions = machineNumber*2
machineIndex = 1
for i in range(1,possibleOptions+1):
if (i % 2 == 1):
print("Click " + str(i) + " : Variate only the breakdown probability of the machine " + str(machineIndex))
else :
print("Click " + str(i) + " : Variate only the repair probability of the machine " + str(machineIndex))
machineIndex += 1
def ChoosingPrabilities(indexInMachineTable, choiceVariable, numberMachine, machineTable, bufferTable):
for j in range(numberMachine):
breakdown_prob = 0
repair_prob = 0
if (j == indexInMachineTable):
if (choiceVariable % 2 == 1):
breakdown_prob = 0.01
repair_prob = probabilityRepairInput(j)
else:
repair_prob = 0.01
breakdown_prob = probabilityBreakdownInput(j)
else:
breakdown_prob = probabilityBreakdownInput(j)
repair_prob = probabilityRepairInput(j)
if (j == 0):
machineTable.append(Machine(breakdown_prob,repair_prob,INPUT_CNT_BUF,bufferTable[j],"Machine1"))
elif (j == numberMachine-1):
machineTable.append(Machine(breakdown_prob,repair_prob,bufferTable[j-1],OUTPUT_CNT_BUF,"Machine"+str(j+1)))
else:
machineTable.append(Machine(breakdown_prob,repair_prob,bufferTable[j-1],bufferTable[j],"Machine"+str(j+1)))
return machineTable
def main():
print("------Simulation------")
print("---Creating the System")
numberMachine = int(input("Enter the number of machines: "))
machineTable = []
bufferTable = []
historicSimulations = []
while(True):
print("---Choose type of calculation")
print("--Click A: Increment buffer size by one unit")
print("--Click B: Choose which probability to variate")
print("--Click Q: Exit")
choice = input("Make your choice: ").upper()
if (choice == "A"):
bufferTable.clear()
machineTable.clear()
historicSimulations.clear()
system = creationSystemCommonBuffer(numberMachine, machineTable, bufferTable)
timeSlot = int(input("Enter time slot of the simulation: "))
simulationCounter = 1
while(simulationCounter <= MAXSIMULATIONBUFFERINCREMENTED):
system.setCommonCapacity(simulationCounter)
simulation = 0
while(simulation < 50):
print("Simulation: ")
print("T = 0")
print(generateStringState(system))
for buf in system.getBuffers():
buf.reset()
for machine in system.getMachines():
machine.reset()
system.resetHistoric()
instantT = 0
while(instantT < timeSlot):
copyOutputValue = OUTPUT_CNT_BUF.getCurrent()
copyInputValue = abs(INPUT_CNT_BUF.getCurrent())
for machine in system.getMachines():
machine.phase_1_rand()
for machine in system.getMachines():
machine.phase_2()
print("T = " + str(instantT+1))
print(generateStringState(system))
differenceOutput = OUTPUT_CNT_BUF.getCurrent() - copyOutputValue
differenceInput = abs(INPUT_CNT_BUF.getCurrent()) - copyInputValue
summarizedState = generateSummarizedState(system,differenceOutput,differenceInput)
summarizedStateCopy = summarizedState[:]
system.getHistoricState().append(summarizedStateCopy)
instantT +=1
historicStateCopy = system.getHistoricState()[:]
historicSimulations.append(historicStateCopy)
simulation += 1
simulationCounter +=1
elif(choice == "B"):
bufferTable.clear()
machineTable.clear()
historicSimulations = []
for i in range(numberMachine-1) :
sizei = bufferInput(i)
bufferTable.append(Buffer(Buffer.Type.MIDDLE,sizei,'Buffer'+str(i+1)))
print("Choose which variable to change")
generateVariableOptions(numberMachine)
while(True):
choiceVariable = int(input("Choose which variable: "))
if choiceVariable in range(1, numberMachine*2 + 1):
indexMachine = choiceVariable
if (choiceVariable % 2 == 1): # Breakdown proba
indexMachine = choiceVariable + 1
indexInMachineTable = int(indexMachine / 2) - 1
print(indexInMachineTable)
machineTable = ChoosingPrabilities(indexInMachineTable,choiceVariable,numberMachine,machineTable,bufferTable)
system = System(numberMachine,machineTable,bufferTable)
timeSlot = int(input("Enter time slot of the simulation: "))
probabilityValue = 0.02
while(probabilityValue <= 1):
print("----------")
print("Simulation: " + str(probabilityValue))
incrementMachineProbability(system,indexInMachineTable ,choiceVariable, probabilityValue)
nbSimulation = 0
while(nbSimulation < 1000):
for buf in system.getBuffers():
buf.reset()
for machine in system.getMachines():
machine.reset()
system.resetHistoric()
instantT = 0
print("T = 0")
print(generateStringState(system))
while(instantT < timeSlot):
copyOutputValue = OUTPUT_CNT_BUF.getCurrent()
copyInputValue = abs(INPUT_CNT_BUF.getCurrent())
simulatingAStep(system)
print("T = " + str(instantT+1))
print(generateStringState(system))
differenceOutput = OUTPUT_CNT_BUF.getCurrent() - copyOutputValue
differenceInput = abs(INPUT_CNT_BUF.getCurrent()) - copyInputValue
summarizedState = generateSummarizedState(system,differenceOutput,differenceInput)
summarizedStateCopy = summarizedState[:]
system.getHistoricState().append(summarizedStateCopy)
instantT +=1
historicStateCopy = system.getHistoricState()[:]
historicSimulations.append(historicStateCopy)
nbSimulation += 1
probabilityValue += 0.01
break
else:
print("Choose a number from those proposed ")
elif(choice == "Q"):
plt.show()
break
if __name__ == "__main__":
main()
| true
|
02bd92de642ab13124c137fff14587464b44a4e2
|
Python
|
denisov93/AA-20-21
|
/assignment1/TP1.py
|
UTF-8
| 9,120
| 3.3125
| 3
|
[] |
no_license
|
'''
Assignment 1 by
Alexander Denisov (44592)
Samuel Robalo (41936)
AA 20/21
TP4 Instructor: Joaquim Francisco Ferreira da Silva
Regency: Ludwig Krippahl
'''
'''
TP1 Test & Train File contents
Features(4) + ClassLabels(1):
1) Variance
2) Skewness
3) Curtosis
4) Entropy
5) Class Label [0=RealBankNotes & 1=FakeBankNotes]
Classifiers(3) needed for the project:
> Logistic Regression
> Naïve Bayes Classifier using Kernel Density Estimation
> Gaussian Naïve Bayes classifier
Comparing classifiers:
> Normal test (95% confidence)
> McNemar's test (95% confidence)
Observations:
> "Não esqueçam que, no caso do Naive Bayes, kde.fit deve ser feito para cada "par" (Classe, Atributo)."
'''
##Region Imports
#
import math
import numpy as np
import matplotlib.pyplot as plt
#
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KernelDensity #reminder: Needs to find the optimum value for the bandwitdh parameter of the kernel density estimators
from sklearn.naive_bayes import GaussianNB #reminder: no parameter adjustment
#
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, StratifiedKFold
def sep(text):
print("~~~~~~~ "+text+" ~~~~~~~~~~~~~~~~~")
def sepn(text):
sep(text)
print("\n")
def testMc(estim1,estim2,test):
e1 = 0
e10 = 0
for i in range(len(pred_bayes)):
if ( estim1[i] != test[i] and estim2[i] == test[i] ) :
e1+=1
if ( estim1[i] == test[i] and estim2[i] != test[i] ):
e10+=1
val = McNemarTest(e1,e10)
return val
def aproxNormalTest(N:int, X:float) -> float:
'''Aprox Normal Distribution
@params:
X - Required : measured number of errors (Float)
N - Required : size of test set (Int)
@return:
Z - aprox normal distribution (float)'''
return N*(1-X)
def calcDeviation(X:float,N:int) -> float:
'''σ of the normal destribution
@params:
X - Required : measured number of errors (Float)
N - Required : size of test set (Int)
@return:
σ - aprox normal distribution (float)'''
dev = 1.96*(math.sqrt((N*X)*(1-X)))
return dev
def McNemarTest(e01,e10) -> float:
'''Value of Estatisticly Diferent Mistakes done by 2 classifiers
with 95% confidence level of 3.84
@params:
e01 - Required : n of examples the classifers got wrong and right (Int)
e10 - Required : n of examples the classifers got wrong and right (Int)
@return:
X - value'''
X = ((abs(e01-e10)-1)**2)/(e01+e10)
print("[McNemar's Test] Classifiers are likely different if "+str(X)+" >= 3.84")
return X
#Logistic Regression Calc Folds
def calc_fold_logistic(X,Y, train_ix,valid_ix,C):
reg = LogisticRegression(C=C, tol=1e-10)
reg.fit(X[train_ix],Y[train_ix])
erroVal = 1 - reg.score(X[valid_ix],Y[valid_ix])
erroTreino = 1 - reg.score(X[train_ix],Y[train_ix])
return (erroTreino,erroVal)
#File Loading
def load_file(file):
matrix = np.loadtxt(file,delimiter='\t')
return matrix
tests = load_file("TP1_test.tsv")
train = load_file("TP1_train.tsv")
#Shuffle
tests = shuffle(tests)
train = shuffle(train)
#Standardizing
sep("Standardizing")
#Train
Ys = train[:,4].astype(int)
Xs = train[:,0:4]
means = np.mean(Xs,axis=0)
stdevs = np.std(Xs,axis=0)
Xs = (Xs-means)/stdevs
#Tests
Y_finaltest = tests[:,4].astype(int)
X_finaltest = tests[:,0:4]
X_finaltest = (X_finaltest-means)/stdevs
print("Preparing training set test/validation")
X_train,X_test,Y_train,Y_test = train_test_split(Xs, Ys, test_size=0.33, stratify = Ys)
folds = 5
stratKf = StratifiedKFold( n_splits = folds)
sep("Logistic Regression")
'''Logistic Regression Area Code '''
#Create array of C
c_par = []
c_n = 1e-3
for x in range(16):
c_par.append(c_n)
c_n *=10
errorTrain_l = []
errorValidation_l = []
counter = 0
ind = 0
smaller = 1
cs = []
for c in c_par:
tr_err = va_err = 0
for tr_ix, val_ix in stratKf.split(Y_train, Y_train):
r, v = calc_fold_logistic(X_train, Y_train, tr_ix, val_ix,c)
tr_err += r
va_err += v
cs.append(c)
if(smaller>va_err/folds):
smaller = va_err/folds
ind = counter
counter+=1
errorTrain_l.append(tr_err/folds)
errorValidation_l.append(va_err/folds)
print("Best of C :", cs[ind])
plt.figure(figsize=(8,4), frameon=True)
plt.title("Logistic Regression with best C: "+str(cs[ind]))
line1, = plt.plot(errorTrain_l, label="Train Err", linestyle='-', color='blue')
line2, = plt.plot(errorValidation_l, label="Validation Err", linestyle='-', color='green')
legend = plt.legend(handles=[line1,line2], loc='upper right')
ax = plt.gca().add_artist(legend)
#plt.show()
plt.savefig('LR.png', dpi=300)
plt.close()
reg = LogisticRegression(C=cs[ind], tol=1e-10)
reg.fit(Xs, Ys)
erroVal = 1 - reg.score(X_finaltest,Y_finaltest)
print("resultado do teste erro de avaliação:",erroVal)
sep("Gaussian")
gaus = GaussianNB()
gaus.fit(Xs, Ys)
erroVal = 1 - gaus.score(X_finaltest,Y_finaltest)
print("resultado do teste erro de avaliação:",erroVal)
'''
All Code For Bayes
'''
sep("Naive Bayes")
def calc_folds_bayes(X,Y, train_ix, val_ix, bandwidth):
X_r = X[train_ix]
Y_r = Y[train_ix]
X_v = X[val_ix]
Y_v = Y[val_ix]
r,v = bayes(X_r,Y_r, X_v, Y_v, bandwidth)
return r,v
def bayes(X_r,Y_r, X_v, Y_v, bandwidth):
kde = KernelDensity(bandwidth=bandwidth,kernel='gaussian')
t_0 = X_r[Y_r == 0,:] #real
t_1 = X_r[Y_r == 1,:] #fakes
v_0 = X_v[Y_v == 0, :]
v_1 = X_v[Y_v == 1, :]
# log(A/ (A + B ) )
p_0 = np.log( t_0.shape[0] / X_r.shape[0] )
p_1 = np.log( t_1.shape[0] / X_r.shape[0] )
pv_0 = np.log( v_0.shape[0] / X_v.shape[0] )
pv_1 = np.log( v_1.shape[0] / X_v.shape[0] )
sum_logs_t_0 = np.ones(X_r.shape[0]) * p_0
sum_logs_t_1 = np.ones(X_r.shape[0]) * p_1
sum_logs_v_0 = np.ones(X_v.shape[0]) * pv_0
sum_logs_v_1 = np.ones(X_v.shape[0]) * pv_1
classes = np.zeros(X_r.shape[0])
classes_n = np.zeros(X_v.shape[0])
for i in range(X_r.shape[1]):
kde.fit(t_0[:,[i]])
sum_logs_t_0 += kde.score_samples(X_r[:,[i]])
sum_logs_v_0 += kde.score_samples(X_v[:,[i]])
kde.fit(t_1[:,[i]])
sum_logs_t_1 += kde.score_samples(X_r[:,[i]])
sum_logs_v_1 += kde.score_samples(X_v[:,[i]])
classes[(sum_logs_t_1 > sum_logs_t_0)] = 1
classes_n[(sum_logs_v_1 > sum_logs_v_0 )] = 1
return classes,classes_n
errorTrain_b = []
errorValidation_b = []
best_err = 1e12
best_bw = 1
bws = [round(b,3) for b in np.arange(0.02,0.6,0.02) ]
for bandwidth in bws:
tr_err = va_err = 0
for tr_ix, val_ix in stratKf.split(Y_train, Y_train):
r,v = calc_folds_bayes(X_train,Y_train, tr_ix,val_ix, bandwidth)
tr_err += 1 - accuracy_score(r , Y_train[tr_ix])
va_err += 1 - accuracy_score(v , Y_train[val_ix])
tr_err = tr_err/folds
va_err = va_err/folds
errorTrain_b.append(tr_err)
errorValidation_b.append(va_err)
if va_err < best_err:
best_err = va_err
best_bw = bandwidth
plt.figure(figsize=(8,4), frameon=True)
plt.title("Naive Bayes with best Bandwidth: "+str(best_bw))
line1, = plt.plot(bws,errorTrain_b, label="Train Err", linestyle='-', color='blue')
line2, = plt.plot(bws,errorValidation_b, label="Validation Err", linestyle='-', color='green')
legend = plt.legend(handles=[line1,line2], loc='lower right')
ax = plt.gca().add_artist(legend)
#plt.show()
plt.savefig('NB.png', dpi=300)
plt.close()
r,pred_bayes = bayes(Xs, Ys, X_finaltest, Y_finaltest, best_bw)
error = 1 - accuracy_score(pred_bayes, Y_finaltest)
print("Best Bandwidth Found "+str(best_bw)+" with Error of",error)
pred_logistic = reg.predict(X_finaltest)
pred_gaussian = gaus.predict(X_finaltest)
print("McNemar Test")
t_p_l = testMc(pred_bayes,pred_logistic,Y_finaltest)
print("NB vs LR:",round(t_p_l,2))
t_l_g = testMc(pred_logistic,pred_gaussian,Y_finaltest)
print("LR vs GS:",round(t_l_g,2))
t_g_p = testMc(pred_gaussian,pred_bayes,Y_finaltest)
print("GS vs NB:",round(t_g_p,2))
print("True Error")
t_err_lg = np.mean(pred_logistic - Y_finaltest)**2
print("LR:",round(t_err_lg,5))
t_err_gs = np.mean(pred_gaussian - Y_finaltest)**2
print("GS:",round(t_err_gs,5))
t_err_nb = np.mean(pred_bayes - Y_finaltest)**2
print("NB:",'%f' % round(t_err_nb,9))
size = len(Y_finaltest)
aprox_NT_l = aproxNormalTest(size , reg.score(X_finaltest,Y_finaltest))
dev_l = calcDeviation(t_err_lg,size)
aprox_NT_g = aproxNormalTest(size, gaus.score(X_finaltest,Y_finaltest))
dev_g = calcDeviation(t_err_gs,size)
aprox_NT_b = aproxNormalTest(size, accuracy_score(pred_bayes, Y_finaltest))
dev_b = calcDeviation(t_err_nb,size)
print("Aprox Normal Test")
print("LR: "+str(round(aprox_NT_l,2))+" ± "+str(round(dev_l,3)))
print("GS: "+str(round(aprox_NT_g,2))+" ± "+str(round(dev_g,3)))
print("NB: "+str(round(aprox_NT_b,2))+" ± "+str(round(dev_b,3)))
| true
|
4c1045e7200900fc5a26de967882b89c4679bee6
|
Python
|
hannbusann/fw_xbot_zdzn
|
/src/xbot_s/script/input_keypoint.py
|
UTF-8
| 3,061
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#coding=utf-8
import rospy, sys, termios, tty
import yaml
from geometry_msgs.msg import Pose, PoseStamped
from visualization_msgs.msg import Marker, MarkerArray
from move_base_msgs.msg import MoveBaseActionResult
class office_lady():
"""docstring for office_lady"""
def __init__(self):
self.total_coll = input('请输入所有关键点的个数:\n')
# print type(self.total_coll)
# print self.total_coll
self.num_coll = 0
self.coll_position_dic = {}
self.marker=Marker()
self.marker.color.r=1.0
self.marker.color.g=0.0
self.marker.color.b=0.0
self.marker.color.a=1.0
self.marker.ns='office_lady'
self.marker.scale.x=1
self.marker.scale.y=0.1
self.marker.scale.z=0.1
self.marker.header.frame_id='map'
self.marker.type=Marker.SPHERE_LIST
self.marker.action=Marker.ADD
self.arraymarker = MarkerArray()
self.markers_pub = rospy.Publisher('/coll_position',MarkerArray,queue_size=1)
self.goal_sub = rospy.Subscriber('/mark_coll_position',PoseStamped, self.mark_coll_positionCB)
# self.goal_result_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult, self.goal_resultCB)
# f = open('param/col_pos.yaml')
# self.col_poses = yaml.load(f)
# if col_poses is not NULL:
# #push col_poses.pos to markerarray
# else:
# print 'please input the pose of every colleague with 2DNavi goal...'
# print 'the first colleague is '+ col_poses.pose[0]
tip = '请输入第 '+ str(self.num_coll) + '个关键点的名称(带引号):\n'
self.name = input(tip)
tip = '请在rviz当中使用鼠标点击第 ' + str(self.num_coll) +' 个目标点的位置。'
print tip
rospy.spin()
def mark_coll_positionCB(self, pos):
if self.num_coll < self.total_coll:
self.coll_position_dic[self.name] = [[pos.pose.position.x,pos.pose.position.y,pos.pose.position.z],[pos.pose.orientation.x,pos.pose.orientation.y,pos.pose.orientation.z,pos.pose.orientation.w]]
self.num_coll+=1
print 'added '+str(self.num_coll)+' colleagues'
print self.coll_position_dic
self.marker.header.stamp =rospy.Time.now()
self.marker.pose = pos.pose
self.marker.id = self.num_coll - 1
self.arraymarker.markers.append(self.marker)
self.markers_pub.publish(self.arraymarker)
if self.num_coll < self.total_coll:
tip = '请输入第 '+ str(self.num_coll) + '个关键点的名称(带引号):\n'
self.name = input(tip)
tip = '请在rviz当中使用鼠标点击第' + str(self.num_coll) +' 个目标点的位置。'
print tip
else:
f=open('keypoint.yaml', 'w')
yaml.dump(self.coll_position_dic, f)
f.close()
print '您已完成所有关键点的录入,关键点文件已成功存入运行目录下的keypoint.yaml文件,请使用ctrl+c退出程序即可。'
def goal_resultCB(self, result):
pass
if __name__ == '__main__':
rospy.init_node('office_lady_serve')
try:
rospy.loginfo('office lady initialized...')
office_lady()
except rospy.ROSInterruptException:
rospy.loginfo('office lady initialize failed, please retry...')
| true
|
27d2d269a43c902e4bfce748b308e5a102d5ce5c
|
Python
|
murakami10/atc_python
|
/not_solved/03/abc188_do______________________________________イモス法.py
|
UTF-8
| 517
| 2.8125
| 3
|
[] |
no_license
|
from typing import List
from typing import Tuple
N, C = map(int, input().split())
events: List[Tuple[int, int]] = []
for _ in range(N):
a, b, c = map(int, input().split())
a -= 1
events.append((a, c))
events.append((b, -c))
events.sort(key=lambda x: x[0])
top: int = 0
ans: int = 0
tmp: int = 0
for event in events:
if event[0] != top:
ans += min(C, tmp) * (event[0] - top)
top = event[0]
tmp += event[1]
print(ans)
# https://atcoder.jp/contests/abc188/tasks/abc188_d
| true
|
c70030a22976f3db3ee39070003b158aa16c5b45
|
Python
|
Skar0/proba
|
/codes/Histogrammes.py
|
UTF-8
| 1,362
| 2.9375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import matplotlib.pyplot as plt
from numpy.random import normal
from numpy.random import randn
#On prend les lignes, fait un tableau pour chaque ram et chaque nombre d'acces
currentFirstColVal = 10
data = [[0 for x in range(4)] for x in range(1000)]
i = 0
with open("../donnees/Q12_donnee.dat") as f:
for line in f:
split_line = line.split()
data[i][0] = split_line[0]
data[i][1] = split_line[1]
data[i][2] = split_line[2]
data[i][3] = split_line[3]
i+=1
techno = 0
taille = 0
dataToPlot = []
i = 1 #On va de 1 à 11 (10 itérations) pour les 10 tailles de mem
while(i <= 10):
j = 1 #On fait trois technologies (1 à 3) j est aussi l'indice dans le tableau qui donne la technologie
while (j <= 3):
iterationNbr = 0 #On a 100 données par techno
while (iterationNbr < 100):
dataToPlot.append(float(data[iterationNbr+100*(i-1)][j]))
iterationNbr+=1
plt.clf()
plt.hist(dataToPlot, bins=10)
plt.title("Histogramme : technologie n°"+str(j)+", taille "+str(i*10))
plt.xlabel("Temps d'attente (ms)")
plt.ylabel("Nombre d'occurences")
plt.savefig("../img/"+str(j)+"-"+str(i*10)+".png", bbox_inches='tight')
dataToPlot = []
j+=1
i+=1
| true
|
d6239065f962ffbb0cc2683a5de5e98aa741fa12
|
Python
|
samgeen/hackandslash
|
/src/CmdVars.py
|
UTF-8
| 589
| 2.640625
| 3
|
[] |
no_license
|
'''
Created on 2 Mar 2014
@author: samgeen
'''
import Animals
class Player(object):
def __init__(self):
self._name = "punk"
def Name(self, newname=None):
'''
If newname is not None, set the Player's name to newname
Return: player's name
'''
if not newname is None:
self._name = newname
return self._name
def __str__(self):
return "player"
animalname = "llama"
cmdvars = {"punk": Player(),
"animalname": animalname,
"level": None,
"inventory": []}
| true
|
d3608c993b1c44d3bddc8c3a6e14e8f14c6ad5a9
|
Python
|
noobwei/moeCTF_2021
|
/Reverse/Realezpy/EZpython.py
|
UTF-8
| 931
| 2.5625
| 3
|
[] |
no_license
|
import time
#flag = 'moectf{Pyth0n_1s_so0o0o0o0_easy}'
c = [119, 121, 111, 109, 100, 112, 123, 74, 105, 100, 114, 48, 120, 95, 49, 99, 95, 99, 121, 48, 121, 48, 121, 48, 121, 48, 95, 111, 107, 99, 105, 125]
def encrypt(a):
result = []
for i in range(len(a)):
if ord('a') <= ord(a[i]) <= ord('z'):
result.append((ord(a[i]) + 114 - ord('a')) % 26 + ord('a'))
elif ord('A') <= ord(a[i]) <= ord('Z'):
result.append((ord(a[i]) + 514 - ord('A')) % 26 + ord('A'))
else:
result.append(ord(a[i]))
return result
ipt = input("Plz give me your flag:")
out = encrypt(ipt)
if len(ipt) !=len(c):
print("Wrong lenth~")
exit()
else:
for i in range(len(c)):
if out[i] != c[i]:
print("Plz try again?")
exit()
print('Congratulations!!!')
time.sleep(1)
print("enjoy the beauty of python ~~~ ")
import this
| true
|
e829dd55ec71e0e17246c1929f5f959ea1529491
|
Python
|
openstax/pdf-distribution
|
/app/src/config.py
|
UTF-8
| 2,618
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
import boto3
class Config(object):
def __init__(self, region_name, table_name):
self.region_name = region_name
self.table_name = table_name
configs = Config.get_configs_from_dynamodb(
region_name=self.region_name,
table_name=self.table_name,
)
## The highest version numbered current config
## is the one we want.
def get_version_number(config):
version_string = config['config_id']['S'] ## e.g., 'ver_123'
version_number = int(version_string[4:])
return version_number
config = sorted(configs, key=get_version_number)[-1]
self.config_data = config
def get_version(self):
return self.config_data['config_id']['S']
def get_ugly_uri(self, pretty_uri):
ugly_uri = None
new_uri = pretty_uri
while True:
new_uri = self.config_data['uri_map']['M'] \
.get(new_uri,{}) \
.get('S', None)
if new_uri is None:
break
elif new_uri == ugly_uri:
break
else:
ugly_uri = new_uri
return ugly_uri
def access_is_allowed(self, user, ugly_uri):
allowed = self.config_data['access_map']['M'] \
.get(ugly_uri,{}) \
.get('S', '-') \
.strip().split()
return user in allowed
@staticmethod
def get_configs_from_dynamodb(region_name, table_name):
session = boto3.session.Session(region_name=region_name)
ddb_client = session.client('dynamodb')
try:
## Fetch all configs with is_current set to true.
## NOTE: This assumes that DynamoDb does not
## process more than 1MB of data to find
## the desired items.
ddb_response = ddb_client.scan(
TableName=table_name,
ExpressionAttributeValues={
':c': {
'BOOL': True,
}
},
FilterExpression='is_current = :c',
)
configs = ddb_response['Items']
except Exception as error:
raise RuntimeError('could not get configs (region_name={}, table_name={}): {}'.format(
region_name,
table_name,
str(error),
))
if len(configs) == 0:
raise RuntimeError('unable to find any table items with is_current == true')
return configs
| true
|
3df6186f8b9ce51265925bc60ff3615170c5d008
|
Python
|
carlacarov/d-wave-projects
|
/quantum-svm-toy.py
|
UTF-8
| 4,226
| 2.890625
| 3
|
[] |
no_license
|
import dimod
import neal
import numpy as np
import itertools as it
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.svm import SVC
x = np.array([[-6,2],[-2,2],[2,2],[5,2]])
y = np.array([1,1,-1,-1])
plt.scatter(x[:, 0], x[:, 1], marker='o', c=y, s=25, edgecolor='k')
model = SVC(kernel="linear", C=2)
model.fit(x,y)
#plot taken from scikit documentation:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = model.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])
# plot support vectors
sv = model.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k')
plt.show()
linear = {}
linear["a0(q0)"] = -1
linear["a0(q1)"] = -2
linear["a1(q0)"] = -1
linear["a1(q1)"] = -2
linear["a2(q0)"] = -1
linear["a2(q1)"] = -2
linear["a3(q0)"] = -1
linear["a3(q1)"] = -2
def dot_kernel(x_n,x_m):
dot = x_n[0]*x_m[0] + x_n[1]*x_m[1]
return dot
quadratic = {}
quadratic[("a0(q0)","a1(q0)")] = y[0]*y[1]*dot_kernel(x[0],x[1])
quadratic[("a0(q0)","a1(q1)")] = 2*y[0]*y[1]*dot_kernel(x[0],x[1])
quadratic[("a0(q1)","a1(q0)")] = 2*y[0]*y[1]*dot_kernel(x[0],x[1])
quadratic[("a0(q1)","a1(q1)")] = 2*2*y[0]*y[1]*dot_kernel(x[0],x[1])
quadratic[("a0(q0)","a2(q0)")] = y[0]*y[2]*dot_kernel(x[0],x[2])
quadratic[("a0(q0)","a2(q1)")] = 2*y[0]*y[2]*dot_kernel(x[0],x[2])
quadratic[("a0(q1)","a2(q0)")] = 2*y[0]*y[2]*dot_kernel(x[0],x[2])
quadratic[("a0(q1)","a2(q1)")] = 2*2*y[0]*y[2]*dot_kernel(x[0],x[2])
quadratic[("a0(q0)","a3(q0)")] = y[0]*y[3]*dot_kernel(x[0],x[3])
quadratic[("a0(q0)","a3(q1)")] = 2*y[0]*y[3]*dot_kernel(x[0],x[3])
quadratic[("a0(q1)","a3(q0)")] = 2*y[0]*y[3]*dot_kernel(x[0],x[3])
quadratic[("a0(q1)","a3(q1)")] = 2*2*y[0]*y[3]*dot_kernel(x[0],x[3])
quadratic[("a1(q0)","a2(q0)")] = y[1]*y[2]*dot_kernel(x[1],x[2])
quadratic[("a1(q0)","a2(q1)")] = 2*y[1]*y[2]*dot_kernel(x[1],x[2])
quadratic[("a1(q1)","a2(q0)")] = 2*y[1]*y[2]*dot_kernel(x[1],x[2])
quadratic[("a1(q1)","a2(q1)")] = 2*2*y[1]*y[2]*dot_kernel(x[1],x[2])
quadratic[("a1(q0)","a3(q0)")] = y[1]*y[3]*dot_kernel(x[1],x[3])
quadratic[("a1(q0)","a3(q1)")] = 2*y[1]*y[3]*dot_kernel(x[1],x[3])
quadratic[("a1(q1)","a3(q0)")] = 2*y[1]*y[3]*dot_kernel(x[1],x[3])
quadratic[("a1(q1)","a3(q1)")] = 2*2*y[1]*y[3]*dot_kernel(x[1],x[3])
quadratic[("a2(q0)","a3(q0)")] = y[2]*y[3]*dot_kernel(x[2],x[3])
quadratic[("a2(q0)","a3(q1)")] = 2*y[2]*y[3]*dot_kernel(x[2],x[3])
quadratic[("a2(q1)","a3(q0)")] = 2*y[2]*y[3]*dot_kernel(x[2],x[3])
quadratic[("a2(q1)","a3(q1)")] = 2*2*y[2]*y[3]*dot_kernel(x[2],x[3])
#option with epsilon that you set:
epsilon = 0
linear["a0(q0)"] =+ epsilon*(y[0]**2)
linear["a0(q1)"] =+ epsilon*(y[0]**2)*4
linear["a1(q0)"] =+ epsilon*(y[1]**2)
linear["a1(q1)"] =+ epsilon*(y[1]**2)*4
linear["a2(q0)"] =+ epsilon*(y[2]**2)
linear["a2(q1)"] =+ epsilon*(y[2]**2)*4
linear["a3(q0)"] =+ epsilon*(y[3]**2)
linear["a3(q1)"] =+ epsilon*(y[3]**2)*4
quadratic[("a0(q0)","a0(q1)")] = epsilon*2*2*(y[0]**2)
quadratic[("a1(q0)","a1(q1)")] = epsilon*2*2*(y[1]**2)
quadratic[("a2(q0)","a2(q1)")] = epsilon*2*2*(y[2]**2)
quadratic[("a3(q0)","a3(q1)")] = epsilon*2*2*(y[3]**2)
bqm = dimod.BinaryQuadraticModel(linear, quadratic, 0, 'BINARY')
sampler = neal.SimulatedAnnealingSampler()
num_iter = int(100)
sampleset = sampler.sample(bqm, num_reads=num_iter)
sampleset_iterator = sampleset.samples(num_iter)
print(sampleset)
classic_lagrange_multipliers = np.abs(model.dual_coef_)
print(classic_lagrange_multipliers)
lagrange_multipliers = {}
lagrange_multipliers["a0"] = sampleset_iterator[0]["a0(q0)"] + sampleset_iterator[0]["a0(q1)"]*2
lagrange_multipliers["a1"] = sampleset_iterator[0]["a1(q0)"] + sampleset_iterator[0]["a1(q1)"]*2
lagrange_multipliers["a2"] = sampleset_iterator[0]["a2(q0)"] + sampleset_iterator[0]["a2(q1)"]*2
lagrange_multipliers["a3"] = sampleset_iterator[0]["a3(q0)"] + sampleset_iterator[0]["a3(q1)"]*2
print(lagrange_multipliers)
| true
|
3877f8a3a04281f6476dfc3e09d244cf95204ec5
|
Python
|
linuxhenhao/python-scripts
|
/dpkgPackage.py
|
UTF-8
| 10,193
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
###################################################
#Using dpkg.log to get what packages are installed
#Because apt.log will miss packages dealed by dpkg
#command
###################################################
import os
import time
#packages that will be triggered every time
def dict_item_add(dic,package,datetime,version,state=None):
dic['packages'].append(package)
dic['datetimes'].append(datetime)
dic['versions'].append(version)
if(state!=None):
dic['state'].append(state)
def dict_item_remove(dic,index,state=None):#remove the index of the dict
dic['packages'].pop(index)
dic['datetimes'].pop(index)
dic['versions'].pop(index)
if(state!=None):
dic['state'].pop(index)
def datetime2sec(string):
t1=time.strptime(string,"%Y-%m-%d %H:%M:%S")
return time.mktime(t1)
def sec2datetime(sec):
t=localtime(sec)
return time.strftime("%Y-%m-%d %H:%M:%S",t)
class Filter:
def __init__(self,name):
self.name=name
if(self.name=='upgrade'):
self.items={'packages':list(),'datetimes':list(),'versionChange':list()}
else:
self.items={'packages':list(),'datetimes':list(),'versions':list()}
def add_item(self,package,datetime,version):
index=self.is_in_filter(package,datetime,version)
if(index!=None):#already in this filter,overwrite
self.items['datetimes'][index]=datetime
if(self.name=='upgrade'):
self.items['versionChange'][index]=version
else:
self.items['versions'][index]=version
else: #not in filter
self.items['packages'].append(package)
self.items['datetimes'].append(datetime)
if(self.name=='upgrade'):
self.items['versionChange'].append(version)
else:
self.items['versions'].append(version)
def delete_item(self,index):
self.items['packages'].pop(index)
self.items['datetimes'].pop(index)
if(self.name=='upgrade'):
self.items['versionChange'].pop(index)
else:
self.items['versions'].pop(index)
def index_of_package(self,package_name):
try:
index=self.items['packages'].index(package_name)
return index
except:
return None
def is_in_filter(self,package_name,datetime,version):
index=self.index_of_package(package_name)
if(index!=None and datetime-self.items['datetimes'][index]<6000):
if(self.name=='upgrade'):
if(self.items['versionChange'][index][1]==version):
return index
else:
if(self.items['versions'][index]==version):
return index
return None
def get_removed_packages(lines):
result_dict={"packages":list(),"datetimes":list(),"versions":list()}
len_of_lines=len(lines)
gcount=-1
for line in lines:
gcount+=1
items=line.split(" ")
datetime=datetime2sec(items[0]+" "+items[1])
if(items[2]=='status'):
package=items[4]
version=items[5].strip()
try:
index=result_dict['packages'].index(package)
if(result_dict['datetimes'][index]<=datetime):
result_dict['datetimes'][index]=datetime
result_dict['versions'][index]=version
except:
dict_item_add(result_dict,package,datetime,version=version)
elif(items[2]=='remove'):#in some conditions,only remove line can be found,no not-installed
#followed,so add it to removed packages dict,too
if(gcount+1<len_of_lines):
next_items=lines[gcount+1].split()
if(next_items[3]!='not-installed'):
package=items[3]
version=items[4]
try:
index=result_dict['packages'].index(package)
if(result_dict['datetimes'][index]<=datetime):
result_dict['datetimes'][index]=datetime
result_dict['versions'][index]=version
except:
dict_item_add(result_dict,package,datetime,version=version)
else:
package=items[3]
version=items[4]
try:
index=result_dict['packages'].index(package)
if(result_dict['datetimes'][index]<=datetime):
result_dict['datetimes'][index]=datetime
result_dict['versions'][index]=version
except:
dict_item_add(result_dict,package,datetime,version=version)
return result_dict
def get_installed_packages(lines):
filter_trigproc=Filter('trigproc')
filter_upgrade=Filter('upgrade')
filter_remove=Filter('remove')
#filter_trigproc={'packages':list(),'datetimes':list(),'versions':list()}
#filter_upgrade={'packages':list(),'datetimes':list(),'versionChange':list()}
result_dict={"state":list(),"packages":list(),"datetimes":list(),"versions":list()}
def add_to_result_dict(result_dict,package,datetime,version):
try:
index=result_dict['packages'].index(package)
result_dict['versions'][index]=version
result_dict['datetimes'][index]=datetime
except:
#really new package
dict_item_add(result_dict,package,datetime,version,state="new")
len_of_lines=len(lines)
gcount=-1
for line in lines:
gcount+=1
items=line.split(" ")
datetime=datetime2sec(items[0]+" "+items[1])
if(items[2]=="status"): #installed packages,should filt before put in quene
package=items[4]
version=items[5].strip()
index=filter_remove.is_in_filter(package,datetime,version)
if(index!=None):#in filter remove
filter_remove.delete_item(index)
continue
index=filter_upgrade.is_in_filter(package,datetime,version)
if(index!=None):#in filter upgrade
filter_upgrade.delete_item(index)
dict_item_add(result_dict,package,datetime,version,state="upgrade")
continue
index=filter_trigproc.is_in_filter(package,datetime,version)
if(index!=None):#in filter trigproc
filter_trigproc.delete_item(index)
continue
add_to_result_dict(result_dict,package,datetime,version)
elif(items[2]=='install'):
if(gcount+1<len_of_lines):
next_items=lines[gcount+1].split()
if(next_items[3]!='installed'):
package=items[3]
version=items[4]
add_to_result_dict(result_dict,package,datetime,version)
else:
package=items[3]
version=items[4]
add_to_result_dict(result_dict,package,datetime,version)
elif(items[2]=="upgrade"):
package=items[3]
version_old=items[4].strip()
version_new=items[5].strip()
#add to filter
filter_upgrade.add_item(package,datetime,(version_old,version_new))
elif(items[2]=="trigproc"):
package=items[3].strip()
version=items[4].strip()
filter_trigproc.add_item(package,datetime,version)
elif(items[2]=="remove"):
package=items[3].strip()
version=items[4].strip()
filter_remove.add_item(package,datetime,version)
return result_dict
log_file="/var/log/dpkg.log"
installed_results=os.popen("grep -Ea '(\ installed|trigproc|upgrade|remove|install)' "+log_file)
removed_results=os.popen("grep -Ea '(\ not-installed|remove)' "+log_file)
installed_lines=installed_results.readlines()
removed_lines=removed_results.readlines()
installed_packages_dict=get_installed_packages(installed_lines)
removed_packages_dict=get_removed_packages(removed_lines)
#debug
dict_count=len(installed_packages_dict['packages'])
for i in range(dict_count):
if(installed_packages_dict['packages'][i].find("linux-modules-4.2-rc7")!=-1):
print(("DEBUG"+installed_packages_dict['packages'][i],installed_packages_dict['versions'][i],installed_packages_dict['datetimes'][i]))
#end debug
removed_index=-1
for removed_packages_name in removed_packages_dict['packages']:
removed_index+=1
#debug
if(removed_packages_name.find("linux-modules-4.2")!=-1):
print(("DEBUG remove",removed_packages_dict['packages'][removed_index],removed_packages_dict['versions'][removed_index],removed_packages_dict['datetimes'][removed_index]))
#end debug
try:
index=installed_packages_dict['packages'].index(removed_packages_name)
if(removed_packages_dict['datetimes'][removed_index]>=installed_packages_dict['datetimes'][index]):
#removed datetime > datetime
dict_item_remove(installed_packages_dict,index,state=True)
except:
print(("removed package "+removed_packages_name+" not found in installed_packages_list"))
#dict to list
count=len(installed_packages_dict['packages'])
installed_packages_list=list()
for i in range(count):
installed_packages_list.append([installed_packages_dict['state'][i],installed_packages_dict['packages'][i],installed_packages_dict['datetimes'][i],installed_packages_dict['versions'][i]])
#now installed packages became the real installed packages in the system now
def compare_package_line_by_date(line1,line2):
if(line1[2]==line2[2]):
return 0
elif(line1[2]>line2[2]):
return 1
else:
return -1
installed_packages_list.sort(key=lambda li: li[2])
count=len(installed_packages_list)
print(installed_packages_list[0][0],installed_packages_list[0][1])
for i in range(count-1):
t1=installed_packages_list[i][2]
t2=installed_packages_list[i+1][2]
if(t2-t1>600):
print('\n')
print("{} {}".format(installed_packages_list[i+1][0],installed_packages_list[i+1][1]))
| true
|
70c53241bed1fe676f5068bef2db226f237e6723
|
Python
|
militska/coursera-soup
|
/index.py
|
UTF-8
| 948
| 3.40625
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
def exec():
for page_number in range(1, 92):
req = requests.get('https://www.coursera.org/directory/courses?page=' + str(page_number))
html = req.text
soup = BeautifulSoup(html, 'lxml')
list_courses = soup.find('div', {"class": "rc-LinksContainer"})
items = soup.find_all('li', list_courses)
for item in items:
print(item)
print(item.find('a')['href'])
print("Название курса ")
print(item.find('a').contents)
print_base_info(item)
def print_base_info(course):
req_courses = requests.get('https://www.coursera.org' + course.find('a')['href'])
html_courses = req_courses.text
soup_courses = BeautifulSoup(html_courses, 'lxml')
about = soup_courses.find('div', {"class": "content-inner"})
print("Описание курса ")
print(about)
exec()
| true
|
00fed5caf9eafd6d6a582c3e47b83b4beccbe12d
|
Python
|
axelkennedal/kexjobbet
|
/playground/machine_learning/SVM/breast_cancer_custom.py
|
UTF-8
| 536
| 2.875
| 3
|
[] |
no_license
|
import pandas as pd
import SVM
import numpy as np
df = pd.read_csv('../test_data/breast-cancer-wisconsin.data')
df.replace('?', -9999, inplace=True)
df.drop(['id'], 1, inplace=True)
fullData = df.astype(float).values.tolist()
dataDict = { -1: np.array([[1, 7],
[2, 8],
[3, 8],]),
1: np.array([[5, 1],
[6, -1],
[7, 3],])}
svm = SVM.SVM()
svm.fit(dataDict)
svm.predict([2, 5])
svm.predict([8, 5])
svm.visualize()
| true
|
b809ff8e39cae08874b69ae0b7dcd9a66ca15cfb
|
Python
|
skaurl/baekjoon-online-judge
|
/1105.py
|
UTF-8
| 233
| 2.828125
| 3
|
[] |
no_license
|
n,m = map(str,input().split())
ret = 0
if len(n) != len(m):
print(0)
else:
for i in range(len(n)):
if n[i] == m[i]:
if n[i] == '8':
ret += 1
else:
break
print(ret)
| true
|
a189183c02c65cbd431c0a0955827b1bbb2576dd
|
Python
|
prathamesh-mutkure/python-learn
|
/GUI/turtle_events/main.py
|
UTF-8
| 609
| 4.15625
| 4
|
[] |
no_license
|
from turtle import Turtle, Screen
def move_forwards():
turtle.forward(10)
def move_backwards():
turtle.backward(10)
def move_clockwise():
turtle.setheading(turtle.heading() - 5)
def move_counter_clockwise():
turtle.setheading(turtle.heading() + 5)
def clear():
turtle.clear()
turtle.penup()
turtle.home()
turtle.pendown()
turtle = Turtle()
screen = Screen()
screen.listen()
screen.onkey(move_forwards, "w")
screen.onkey(move_backwards, "s")
screen.onkey(move_counter_clockwise, "a")
screen.onkey(move_clockwise, "d")
screen.onkey(clear, "c")
screen.exitonclick()
| true
|
a9ac6e9cf6a4c446a53f02a72a1976b1962eae84
|
Python
|
lopezpdvn/pydsalg
|
/pydsalg/datastruct/heap.py
|
UTF-8
| 742
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
def heapsort0(a):
def heapify(a):
count = len(a)
ilastnode = len(a) - 1
start = (ilastnode - 1) // 2
while start >= 0:
trickle_down(a, start, count)
start -= 1
def trickle_down(a, start, count):
root = start
while root * 2 + 1 < count:
child = root * 2 + 1
if child + 1 < count and a[child] < a[child+1]:
child += 1
if a[root] < a[child]:
a[root], a[child] = a[child], a[root]
root = child
else:
return
heapify(a)
end = len(a) - 1
while end > 0:
a[end], a[0] = a[0], a[end]
trickle_down(a, 0, end)
end -= 1
| true
|
bf4f9445d1c66fcd633abdc346b98da7c03a2a6c
|
Python
|
MuskanValmiki/Dictionary
|
/w3_Q23.py
|
UTF-8
| 338
| 3.4375
| 3
|
[] |
no_license
|
item_list=[{'item': 'item1','amount': 400}, {'item': 'item2', 'amount': 300}, {'item': 'item1', 'amount': 750},{'item':'item2','amount':100}]
d1={}
sum=0
sum1=0
for i in item_list:
if "item1" in i["item"]:
sum=sum+i["amount"]
d1["item1"]=sum
else:
sum1=sum1+i["amount"]
d1["item2"]=sum1
print(d1)
| true
|
12dba68f7c98a23ffd533221f9d7efb039c62fa7
|
Python
|
chloeward00/Python-Flask-Web-Apps
|
/Book-Reviewing-Web-App/final-flaskVersion/app.py
|
UTF-8
| 9,252
| 2.609375
| 3
|
[] |
no_license
|
import json
from flask import Flask, render_template, url_for, request, redirect, flash
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask_login import login_user, current_user, logout_user, LoginManager, UserMixin
# Forms Page take classes from file formsPage
from formsPage import LoginForm, RegistrationForm
app = Flask(__name__)
# this secret key is needed for regestering and loggin in users
app.config["SECRET_KEY"] = "33ab45e64b227041277b2c71483cc154"
login_manager = LoginManager(app)
# JSON dataset for books, reviews, and group members
# Books from: https://github.com/bvaughn/infinite-list-reflow-examples/blob/master/books.json
books = "amazonBooks.json"
reviews = "reviews.json"
members = "members.json"
# storing bookset in data variable
with open(books, "r") as jsonf:
bookDataset = json.load(jsonf)
# storing reviews in data variable
with open(reviews, "r") as reviewf:
reviewSet = json.load(reviewf)
# storing member data in data variable
with open(members, "r") as membersf:
memberSet = json.load(membersf)
# route to home page
@app.route("/")
def index():
return render_template("index.html", books=bookDataset)
# route to search results page
@app.route('/searchResults', methods=['GET', "POST"])
def results():
match = []
# if user makes a POST request
if request.method == 'POST':
# access data inside
query = request.form.get('query')
qSet = set(query.lower().split())
for book in bookDataset:
currTags = set(book["tags"])
if len(qSet.intersection(currTags)) > 0:
match.append(book)
# bring them to results.html displaying query results
return render_template('results.html', query=query, matches=match, books=bookDataset)
# handling "contact us" in footer
return redirect("/")
# route for displaying all books
@app.route("/books")
def viewBooks():
# for now, showing first 10 books for testing purposes...
return render_template("books.html", books=bookDataset)
# route for displaying a book with reviews
@app.route('/books/book/<int:bid>', methods=['GET', 'POST'])
def book(bid):
for b in bookDataset:
if bid == b["bookID"]:
break
return render_template("book.html", book=b, reviews=reviewSet, bID=bid)
# route for displaying "About Us" info
@app.route('/about')
def about_page():
return render_template("about.html", memberData=memberSet)
# route for displaying "Contact Us" info
@app.route('/contact')
def contact_us():
return render_template("contact.html", memberData=memberSet)
@app.route('/contact_submit')
def contact_submit():
return render_template("contact_submit.html", memberData=memberSet)
# creating file called "forums.db", storing forum posts in database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///forums.db'
db = SQLAlchemy(app)
# creation of database
class ForumPost(db.Model):
# making forum posts unique via id, and setting other attributes
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
content = db.Column(db.Text, nullable=False)
# if author not specified, set default as N/A
author = db.Column(db.String(20), nullable=False, default='N/A')
# sets date at which post was made
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
# print out whenever we create new forum post so we recognise it
def __repr__(self):
return 'Forum post ' + str(self.id)
# route for displaying forum posts (Jedi and Sith Archive)
@app.route('/forums', methods=['GET', 'POST'])
def forums():
# obtaining user input from HTML and adding to database
if request.method == 'POST':
post_title = request.form['title']
post_content = request.form['content']
post_author = request.form['author']
newPost = ForumPost(title=post_title, content=post_content, author=post_author)
# adding new post to the current session of database
db.session.add(newPost)
# saves the database "permanently", so even when exiting/re-running program,
# database still stores record of forum posts
db.session.commit()
# redirecting back to same page
return redirect('/forums')
# else we're not POSTing
else:
# getting all forum posts from database (ordered by data posted)
all_posts = ForumPost.query.order_by(ForumPost.date_posted).all()
return render_template('forums.html', posts=all_posts)
# route for creating a new post
@app.route('/forums/new', methods=['GET', 'POST'])
def newPost():
# if user is making a POST request (i.e making a new post)
if request.method == 'POST':
post.title = request.form['title']
post.author = request.form['author']
post.content = request.form['content']
# adding the new post to the database, and commiting it
newPost = ForumPost(title=post_title, content=post_content, author=post_author)
db.session.add(newPost)
db.session.commit()
return redirect('/forums')
# else user is making GET request to go to new page to create posts
else:
return render_template('newPost.html')
# route for editing a forum post (editing means we POST)
@app.route('/forums/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
# creates a new forum post instance in database
post = ForumPost.query.get_or_404(id)
# if user posting changes, we over-write the current post by querying to appropriate attributes
if request.method == 'POST':
post.title = request.form['title']
post.author = request.form['author']
post.content = request.form['content']
# commit changes
db.session.commit()
# then redirect to post page
return redirect('/forums')
# else we display the edit page for the user to edit a post
else:
return render_template('editPost.html', post=post)
# deletes forum post from database based on id
@app.route('/forums/delete/<int:id>')
def delete(id):
# gets post to delete via id if it exists (don't want it to break)
post = ForumPost.query.get_or_404(id)
# deletes from database
db.session.delete(post)
# commits update to database
db.session.commit()
# redirect to same page
return redirect('/forums')
# creation of account database
class User(db.Model, UserMixin):
# making forum posts unique via id, and setting other attributes
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.Text, unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
# print out whenever we create new forum post so we recognise it
def __repr__(self):
return f"User('{self.username}', {self.email})"
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# redirects user to a login form
@app.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for("index"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user == None:
flash(f"We cannot find your information in the database please try again", "danger")
elif not (user.username == form.username.data) & (user.email == form.email.data) & (user.password == form.password.data):
flash(f"We cannot find your information in the database please try again", "danger")
else:
login_user(user, remember=False)
flash(f"Welcome Back {form.username.data}!", "success")
return redirect(url_for("index"))
return render_template("login.html", form=form)
# redirects user to a registeration form
@app.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect(url_for("index"))
form = RegistrationForm()
if form.validate_on_submit():
if User.query.filter_by(username=form.username.data) != None or User.query.filter_by(email=form.email.data) != None:
flash(f"Sorry the username/email you have chosen have already been used please try again with a different username/email", "danger")
else:
user = User(username=form.username.data, email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
login_user(user, remember=False)
flash(f"You have sucessfully created an account {form.username.data}!", "success")
return redirect(url_for("index"))
return render_template("register.html", form=form)
@app.route("/logout", methods=["GET", "POST"])
def logOut():
logout_user()
flash(f"You have sucessfully logged out of your account", "success")
return redirect(url_for("index"))
# checks if we have db, if not, db.create_all()
# then debugging
if __name__ == '__main__':
import os
if not os.path.exists('db.sqlite'):
db.create_all()
app.run(debug=True)
| true
|
c737e0fd6f36383dc04349246f7e0accec6bdff4
|
Python
|
navjotk/error_propagation
|
/plot_gradient_multi_nt.py
|
UTF-8
| 2,665
| 2.890625
| 3
|
[] |
no_license
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import tikzplotlib
import click
@click.command()
@click.option('--gradient-results-file', help='File containing gradient results')
@click.option('--direct-results-file', default="direct_compression_results.csv", help='File containing direct compression results')
@click.option('--plot-tol/--no-plot-tol', default=True)
@click.option('--plot-cf/--no-plot-cf', default=True)
def draw_plots(gradient_results_file, direct_results_file="direct_compression_results.csv", plot_tol=True, plot_cf=True):
gradient_results = pd.read_csv(gradient_results_file)
results_1000 = gradient_results[gradient_results["tn"]==1000][["tolerance", "angle"]].sort_values("tolerance")
results_2000 = gradient_results[gradient_results["tn"]==2000][["tolerance", "angle"]].sort_values("tolerance")
results_4000 = gradient_results[gradient_results["tn"]==4000][["tolerance", "angle"]].sort_values("tolerance")
if plot_tol:
plt.clf()
plt.plot(results_1000["tolerance"], results_1000["angle"], "r", label="NT=1000")
plt.plot(results_2000["tolerance"], results_2000["angle"], "g--", label="NT=2000")
plt.plot(results_4000["tolerance"], results_4000["angle"], "b:", label="NT=4000")
plt.xscale('log')
plt.xlabel("atol")
plt.ylabel("Angle with perfect gradient (radians)")
plt.title("Angular deviation of gradient with increasing checkpoint compression")
plt.legend()
tikzplotlib.save("gradient_angle_atol.tex")
if plot_cf:
direct_results = pd.read_csv(direct_results_file)
def get_cf_for_tolerance(atol):
if atol not in range(1, 17):
atol = - int(np.log10(atol))
return direct_results.iloc[atol]['cf']
results_1000["cf"] = [get_cf_for_tolerance(x) for x in results_1000["tolerance"]]
results_2000["cf"] = [get_cf_for_tolerance(x) for x in results_2000["tolerance"]]
results_4000["cf"] = [get_cf_for_tolerance(x) for x in results_4000["tolerance"]]
plt.clf()
plt.plot(results_1000["cf"], results_1000["angle"], "r", label="NT=1000")
plt.plot(results_2000["cf"], results_2000["angle"], "g--", label="NT=2000")
plt.plot(results_4000["cf"], results_4000["angle"], "b:", label="NT=4000")
plt.xscale('log')
plt.xlabel("cf")
plt.ylabel("Angle with perfect gradient (radians)")
plt.title("Angular deviation of gradient with increasing checkpoint compression")
plt.legend()
tikzplotlib.save("gradient_angle_cf.tex")
if __name__ == '__main__':
draw_plots()
| true
|
1ec1dec68afb675a743eaefbfa7535addb0b2ed5
|
Python
|
YOOY/leetcode_notes
|
/problem/permutations.py
|
UTF-8
| 848
| 3.375
| 3
|
[] |
no_license
|
from copy import deepcopy
def permute(nums):
limit = len(nums)
result = []
backtracking(nums, [], limit, result)
return result
def backtracking(nums, container, limit, result):
if len(container) == limit:
result.append(deepcopy(container))
return container
for i in nums:
if i not in container:
container.append(i)
backtracking(nums, container, limit, result)
container.remove(i)
def permute_dfs(nums):
res = []
dfs(nums, [], res)
return res
def dfs(nums, path, res):
if not nums:
res.append(path)
# return # backtracking
for i in range(len(nums)):
print(f"i is {i} / nums is {nums} / path = {path}")
dfs(nums[:i]+nums[i+1:], path+[nums[i]], res)
print(permute([1,2,3]))
print(permute_dfs([1,2,3]))
| true
|
b091773ab37b6f0e9134226192d1610b0dd5cd38
|
Python
|
mich2k/OOP
|
/Python/M2/set.py
|
UTF-8
| 1,027
| 3.609375
| 4
|
[] |
no_license
|
def basics():
print(type({})) # {} is also the operator for dics, indeed this will print out dictionary
print(type({1}))
my_set = {1}
my_set.add(2)
my_set.update([2, 4, 5]) # the 2 is already in the set, it will guarantee the uniqueness indeed
print(my_set)
my_set.discard(2) # if there isnt the element given it will just return NoNe
try:
my_set.remove(2) # remove will return an error if not present
except KeyError as e:
print('the set remove method returned an error indeed')
# sets fits very well operations on ensembles (union, intersection, difference and symmetric difference)
# frozen sets are immutable, hence can be used like dic keys
my_frozen_set = frozenset([1,2,3])
return
def main_set():
print('Set M2')
# they need immutable elements inside them
# insertion order: not respected
# the 'in' (membership) operator is way faster on sets, because is O(1)
# immutable !
# allow dups: NO
basics()
return
| true
|
74844e5fa28ee916ebf58e517729ff860d5fc9c3
|
Python
|
Oscaria/stock-prediction
|
/代码/个股走势预测/newsMatrix.py
|
UTF-8
| 2,056
| 2.609375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 24 22:35:04 2019
@author: wuzix
"""
import pandas as pd
import numpy as np
import json
import csv
labels = json.loads(open('../新闻事件分类器/训练结果/trained_results_1594750510/labels.json').read())
#file='D:\\毕业设计\\代码\\newtry\\新闻事件预测\\格力新闻矩阵.csv'
col=[]
col.append('日期')
for i in range (len(labels)):
col.append(labels[i])
#file='D:\\毕业设计\\代码\\newtry\\预测结果\\predicted_results_1582632892\\predictions_all_2020-03-04.csv'
#cf=pd.read_csv(file,encoding='utf-8',sep='|')
#date=cf['Date'][0]
#print(date)
#df.to_csv('D:\\毕业设计\\代码\\newtry\\新闻事件预测\\中兴新闻矩阵.csv')
with open('../新闻事件分类器/预测结果/predicted_results_1594750510/predictions_all_2020-07-15-3.csv','r',encoding='utf-8') as f :
reader =pd.read_csv(f,sep='|') #原来的sep = ','
date=reader.ix[:,2]
descript=reader.ix[:,1]
predicted=reader.ix[:,0]
length=len(date)
alldate=[]
alldate.append(date[0])
numdate=[]
numdate.append(0)
save=[]
with open ('./中兴新闻矩阵_3.csv','a+',newline='', encoding = 'utf-8')as cf:
writer=csv.writer(cf)
writer.writerow(col)
for i in range(1,length):
if date[i]!=date[i-1]:
alldate.append(date[i])
numdate.append(i)
for k in range(1,len(numdate)):
count=[0]*len(labels)
a=numdate[k-1]
b=numdate[k]
for i in range(a,b):
for j in range(len(labels)):
if predicted[i]==labels[j]:
count[j]+=1
save.append(count)
for i in range(0,len(save)):
total=[]
total.append(alldate[i])
for j in range(len(save[i])):
total.append(save[i][j])
writer.writerow(total)
| true
|
c64eb9f1a53459d7dfe298a1f018bce3ae9280b3
|
Python
|
bhavana10/InstaBot
|
/instabot.py
|
UTF-8
| 19,088
| 2.984375
| 3
|
[] |
no_license
|
import requests,urllib
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
'''
GLOBAL VARIABLE TO STORE BASE URL AND ACCESS TOKEN OF THE USER.
'''
APP_ACCESS_TOKEN = '1718452521.5a7caf4.ff77bf36d87b4737911ee28c72a5c190'
BASE_URL = 'https://api.instagram.com/v1/'
'''
FUNCTION DECLARATION TO FETCH OWN DETAILS. IT DO NOT ACCEPT ANY INPUT PARAMETER.
'''
def self_info():
request_url = (BASE_URL + 'users/self/?access_token=%s') % (APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
# get call to fetch user details
user_info = requests.get(request_url).json()
if user_info['meta']['code'] == 200:
if len(user_info['data']):
print 'Username: %s' % (user_info['data']['username'])
print 'No. of followers: %s' % (user_info['data']['counts']['followed_by'])
print 'No. of people you are following: %s' % (user_info['data']['counts']['follows'])
print 'No. of posts: %s' % (user_info['data']['counts']['media'])
else:
print 'User does not exist!'
else:
print 'Status code other than 200 received!'
'''
FUNCTION TO GET THE USER ID FROM THE GIVEN USERNAME.
'''
def get_user_id( insta_username ) :
request_url = (BASE_URL + 'users/search?q=%s&access_token=%s') % ( insta_username, APP_ACCESS_TOKEN)
user_info = requests.get(request_url).json()
if user_info['meta']['code'] == 200:
if len(user_info['data']):
return user_info['data'][0]['id']
else:
return None
else:
print 'Status code other than 200 received!'
exit()
'''
FUNCTION DECLARATION TO FETCH DETAILS OF THE GIVEN USERNAME. IT ACCEPT USERNAME AS INPUT PARAMETER.
'''
def get_user_info(insta_username):
user_id = get_user_id(insta_username)
if user_id == None:
print 'User does not exist!'
exit()
request_url = (BASE_URL + 'users/%s?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
user_info = requests.get(request_url).json()
if user_info['meta']['code'] == 200:
if len(user_info['data']):
print 'Username: %s' % (user_info['data']['username'])
print 'No. of followers: %s' % (user_info['data']['counts']['followed_by'])
print 'No. of people you are following: %s' % (user_info['data']['counts']['follows'])
print 'No. of posts: %s' % (user_info['data']['counts']['media'])
else:
print 'There is no data for this user!'
else:
print 'Status code other than 200 received!'
'''
FUNCTION DECLARATION TO GET THE ID OF THE RECENT POST OF A USER BY USERNAME
'''
def get_post_id(insta_username):
user_id = get_user_id(insta_username)
if user_id == None:
print 'User does not exist!'
exit()
request_url = (BASE_URL + 'users/%s/media/recent/?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
#print 'GET request url : %s' % (request_url)
user_media = requests.get(request_url).json()
if user_media['meta']['code'] == 200:
if len(user_media['data']):
return user_media['data'][0]['id']
else:
print 'There is no recent post of the user!'
exit()
else:
print 'Status code other than 200 received!'
exit()
'''
FUNCTION DECLARATION TO GET YOUR RECENT POST
'''
def get_own_post():
request_url = (BASE_URL + 'users/self/media/recent/?access_token=%s') % (APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
own_media = requests.get(request_url).json()
if own_media['meta']['code'] == 200:
if len(own_media['data']):
image_name = own_media['data'][0]['id'] + '.jpeg'
image_url = own_media['data'][0]['images']['standard_resolution']['url']
urllib.urlretrieve(image_url, image_name)
print 'Your image has been downloaded!'
print "Recent post id: " + own_media['data'][0]['id']
else:
print 'Post does not exist!'
else:
print 'Status code other than 200 received!'
'''
FUNCTION DECLARATION TO GET THE RECENT POST OF A USER BY USERNAME
'''
def get_user_post(insta_username):
user_id = get_user_id(insta_username)
if user_id == None:
print 'User does not exist!'
exit()
request_url = (BASE_URL + 'users/%s/media/recent/?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
user_media = requests.get(request_url).json()
if user_media['meta']['code'] == 200:
if len(user_media['data']):
media_id = get_post_id(insta_username)
image_name = user_media['data'][0]['id'] + '.jpeg'
image_url = user_media['data'][0]['images']['standard_resolution']['url']
#download the recent post
urllib.urlretrieve(image_url, image_name)
print 'Your image has been downloaded!'
print 'Recent Post ID: ' + media_id
else:
print 'Post does not exist!'
else:
print 'Status code other than 200 received!'
'''
FUNCTION SEARCH A POST HAVING MINIMUM LIKE OF A USER
'''
def post_with_min_like(insta_username):
user_id = get_user_id(insta_username)
if user_id == None:
print 'User does not exist!'
exit()
request_url = (BASE_URL + 'users/%s/media/recent/?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
user_media = requests.get(request_url).json()
if user_media['meta']['code'] == 200:
if len(user_media['data']):
min = user_media['data'][0]['likes']['count']
for i in range(len(user_media['data'])):
if min > user_media['data'][i]['likes']['count']:
min = user_media['data'][i]['likes']['count']
pos= i
print "Likes on post : %d" %user_media['data'][pos]['likes']['count']
print "Image URL :" + user_media['data'][pos]['images']['standard_resolution']['url']
print "Post ID : " + user_media['data'][pos]['id'] + "\n"
else:
print 'There is no post of the user!'
exit()
else:
print 'Status code other than 200 received!'
exit()
'''
FUNCTION TO SEARCH A POST BY INPUTING CAPTION
'''
def get_post_by_caption(word,insta_username):
user_id = get_user_id(insta_username)
if user_id == None:
print 'User does not exist!'
exit()
request_url = (BASE_URL + 'users/%s/media/recent/?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
user_media = requests.get(request_url).json()
flag = False
if user_media['meta']['code'] == 200:
if len(user_media['data']):
for i in range(len(user_media['data'])):
caption_text = user_media['data'][i]['caption']['text']
if word in caption_text:
print "Caption on the post : " + user_media['data'][i]['caption']['text']
print "Image URL :" + user_media['data'][i]['images']['standard_resolution']['url']
print "Post ID : " + user_media['data'][i]['id'] +"\n"
flag = True
if(flag == False):
print "No caption related post found"
else:
print 'There is no required post of the user!'
exit()
else:
print 'Status code other than 200 received!'
exit()
'''
FUNCTION TO TRY AND CHOOSE THE POST IN A CREATIVE WAY,
1- WITH MINIMUM NUMBER LIKES
2-WHOSE CAPTION HAS A PARTICULAR TEXT
'''
def try_creative_ways(insta_username):
print "Choose a way for getting a post of a user:"
print "1- with minimum number likes"
print "2-whose caption has a particular text"
print "3- exit"
while True:
choice= raw_input("Enter your choice:")
choice=int(choice)
if choice==1:
post_with_min_like(insta_username)
elif choice == 2:
word = raw_input("Enter a particular text to be searched in caption")
get_post_by_caption(word,insta_username)
elif choice == 3:
break
else:
print "Invalid Choice"
'''
FUNCTION TO GET THE LIST OF USERS LIKED THE RECENT POST OF THE USER
'''
def get_like_list(insta_username):
media_id = get_post_id(insta_username)
request_url = (BASE_URL + 'media/%s/likes?access_token=%s') % (media_id,APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
like_list = requests.get(request_url).json()
if like_list['meta']['code']==200:
if len(like_list['data']):
print "Post liked by :"
for i in range(len(like_list['data'])):
print "%d)" %(i+1)+ like_list['data'][i]['username']
else:
print "No one liked the recent post of user"
else:
print 'Status code other than 200 received!'
'''
FUNCTION DECLARATION TO LIKE THE RECENT POST OF A USER
'''
def like_a_post(insta_username):
media_id = get_post_id(insta_username)
request_url = (BASE_URL + 'media/%s/likes') % (media_id)
payload = {"access_token": APP_ACCESS_TOKEN}
print 'POST request url : %s' % (request_url)
post_a_like = requests.post(request_url, payload).json()
if post_a_like['meta']['code'] == 200:
print 'You successfully liked the post!'
else:
print 'Failed to like the post. Try again!'
'''
FUNCTION TO FETCH THE COMMENTS ON A POST OF GIVEN USERNAME.
'''
def get_comment_list(insta_username):
media_id = get_post_id(insta_username)
request_url = (BASE_URL + 'media/%s/comments?access_token=%s') % (media_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
comment_list = requests.get(request_url).json()
if comment_list['meta']['code'] == 200:
if len(comment_list['data']):
print "Post liked by :"
for i in range(len(comment_list['data'])):
print "%d)" % (i + 1) + comment_list['data'][i]['text']
else:
print "No one commented the recent post of user"
else:
print 'Status code other than 200 received!'
'''
FUNCTION DECLARATION TO MAKE A COMMENT ON THE RECENT POST OF THE USER
'''
def post_a_comment(insta_username):
media_id = get_post_id(insta_username)
comment_text = raw_input("Your comment: ")
payload = {"access_token": APP_ACCESS_TOKEN, "text" : comment_text}
request_url = (BASE_URL + 'media/%s/comments') % (media_id)
print 'POST request url : %s' % (request_url)
make_comment = requests.post(request_url, payload).json()
if make_comment['meta']['code'] == 200:
print "Successfully added a new comment!"
else:
print "Unable to add comment. Try again!"
'''
FUNCTION DECLARATION TO MAKE DELETE NEGATIVE COMMENTS FROM THE RECENT POST
'''
def delete_negative_comment(insta_username):
media_id = get_post_id(insta_username)
request_url = (BASE_URL + 'media/%s/comments/?access_token=%s') % (media_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
comment_info = requests.get(request_url).json()
if comment_info['meta']['code'] == 200:
if len(comment_info['data']):
#Here's a naive implementation of how to delete the negative comments :)
for i in range(len(comment_info['data'])):
comment_id = comment_info['data'][i]['id']
comment_text = comment_info['data'][i]['text']
blob = TextBlob(comment_text, analyzer=NaiveBayesAnalyzer())
if (blob.sentiment.p_neg > blob.sentiment.p_pos):
print 'Negative comment : %s' % (comment_text)
delete_url = (BASE_URL + 'media/%s/comments/%s/?access_token=%s') % (media_id, comment_id, APP_ACCESS_TOKEN)
print 'DELETE request url : %s' % (delete_url)
delete_info = requests.delete(delete_url).json()
if delete_info['meta']['code'] == 200:
print 'Comment successfully deleted!\n'
else:
print 'Unable to delete comment!'
else:
print 'Positive comment : %s\n' % (comment_text)
else:
print 'There are no existing comments on the post!'
else:
print 'Status code other than 200 received!'
'''
FUCTION TO SEARCH A WORD IN COMMENT AND DELETE THE PERTICULAR COMMENT OF A USER
'''
def search_delete_comment(word,insta_username):
media_id = get_post_id(insta_username)
request_url = (BASE_URL + 'media/%s/comments/?access_token=%s') % (media_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
comment_info = requests.get(request_url).json()
if comment_info['meta']['code'] == 200:
if len(comment_info['data']):
#Here's a naive implementation of how to delete the negative comments :)
for i in range(len(comment_info['data'])):
comment_id = comment_info['data'][i]['id']
comment_text = comment_info['data'][i]['text']
if(word in comment_text):
print "Comment found:" + comment_text
delete_url = (BASE_URL + 'media/%s/comments/%s/?access_token=%s') % (media_id, comment_id, APP_ACCESS_TOKEN)
print 'DELETE request url : %s' % (delete_url)
delete_info = requests.delete(delete_url).json()
if delete_info['meta']['code'] == 200:
print 'Comment successfully deleted!\n'
else:
print 'Unable to delete comment!'
else:
print '%s not found in the comment : ' %(word) +comment_text
else:
print 'There are no existing comments on the post!'
else:
print 'Status code other than 200 received!'
'''
FUNCTION TO RETURN A POST ID OF A PERTICULAR POST OF A USER
'''
def get_total_post(insta_username):
user_id = get_user_id(insta_username)
if user_id == None:
print 'User does not exist!'
exit()
request_url = (BASE_URL + 'users/%s?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
user_info = requests.get(request_url).json()
if user_info['meta']['code'] == 200:
if len(user_info['data']):
print '%s has %s total no. of posts.' % (user_info['data']['username'],user_info['data']['counts']['media'])
else:
print 'There is no data for this user!'
else:
print 'Status code other than 200 received!'
request_url = (BASE_URL + 'users/%s/media/recent/?access_token=%s') % (user_id, APP_ACCESS_TOKEN)
# print 'GET request url : %s' % (request_url)
user_media = requests.get(request_url).json()
user_info = requests.get(request_url).json()
if user_media['meta']['code'] == 200:
if len(user_media['data']):
post_number=raw_input("Choose a post's number:")
post_number=int(post_number)
return user_media['data'][post_number-1]['id']
else:
print 'There is no post of the user!'
exit()
else:
print 'Status code other than 200 received!'
exit()
'''
FUNCTION TO GET A USER'S POST ID, AND ITERATE THROUGH THE COMMENTS
'''
def get_post_comments(post_id):
request_url = (BASE_URL + 'media/%s/comments?access_token=%s') % (post_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
comment_list = requests.get(request_url).json()
if comment_list['meta']['code'] == 200:
if len(comment_list['data']):
print "Post liked by :"
for i in range(len(comment_list['data'])):
print "%d)" % (i + 1) + comment_list['data'][i]['text']
else:
print "No one commented the recent post of user"
else:
print 'Status code other than 200 received!'
'''
************************************************ M E N U **********************************************
'''
def start_bot():
while True:
print '\n'
print 'Hey! Welcome to instaBot!'
print 'Here are your menu options:'
print "a.Get your own details"
print "b.Get details of a user by username"
print "c.Get your own recent post"
print "d.Get the recent post of a user by username"
print "e. Get the post in a creative way"
print "f.Get a list of people who have liked the recent post of a user"
print "g.Like the recent post of a user"
print "h.Get a list of comments on the recent post of a user"
print "i.Make a comment on the recent post of a user"
print "j.Delete negative comments from the recent post of a user"
print "k.Search a word and delete the comment from the recent post of a user"
print "l.Get a user's posts, and iterate through the comments"
print "m.Exit"
choice=raw_input("Enter you choice: ")
if choice=="a":
self_info()
elif choice=="b":
insta_username = raw_input("Enter the username of the user: ")
get_user_info(insta_username)
elif choice=="c":
get_own_post()
elif choice=="d":
insta_username = raw_input("Enter the username of the user: ")
get_user_post(insta_username)
elif choice == "e":
insta_username = raw_input("Enter the username of the user: ")
try_creative_ways(insta_username)
elif choice=="f":
insta_username = raw_input("Enter the username of the user: ")
get_like_list(insta_username)
elif choice=="g":
insta_username = raw_input("Enter the username of the user: ")
like_a_post(insta_username)
elif choice=="h":
insta_username = raw_input("Enter the username of the user: ")
get_comment_list(insta_username)
elif choice=="i":
insta_username = raw_input("Enter the username of the user: ")
post_a_comment(insta_username)
elif choice=="j":
insta_username = raw_input("Enter the username of the user: ")
delete_negative_comment(insta_username)
elif choice=='k':
insta_username = raw_input("Enter the username of the user: ")
word= raw_input("Enter the word to be searched in the comments:")
search_delete_comment(word,insta_username)
elif choice=='l':
insta_username = raw_input("Enter the username of the user: ")
post_id = get_total_post(insta_username)
get_post_comments(post_id)
elif choice=="m":
exit()
else:
print "wrong choice"
'''
CALLING THE START_BOT() TO GET THE MENU AND PERFORM VARIOUS OPERATIONS
'''
start_bot()
| true
|
80df4033ba15ac98d8345d89ed7c25acf2e66e1f
|
Python
|
chainsmokers-hackaton/IoT-Service-Vendor-Server
|
/ServerApp/DBWrapper.py
|
UTF-8
| 1,347
| 2.578125
| 3
|
[] |
no_license
|
import sqlite3
SELECT_QUERY_BASE = "SELECT * FROM %s"
SELECT_MOBILE_TOKEN_BY_CLIENT_ID = SELECT_QUERY_BASE + " WHERE client_id='%s'"
SELECT_CLIENT_ID_BY_AP_UUID = SELECT_QUERY_BASE + " WHERE ap_uuid='%s'"
SELECT_AP_IP_PORT_BY_AP_UUID = SELECT_QUERY_BASE + " WHERE ap_uuid='%s'"
class DBWrapper:
def __init__(self, dbPath="./client.db"):
self._con = sqlite3.connect(dbPath, check_same_thread=False)
self._cursor = self._con.cursor()
def select_mobile_token_by_ap_uuid(self, uuid):
query = SELECT_CLIENT_ID_BY_AP_UUID % ("ap_info", uuid)
self._cursor.execute(query)
ap_infos = self._cursor.fetchall()
mobile_tokens = list()
for ap_info in ap_infos:
query = SELECT_MOBILE_TOKEN_BY_CLIENT_ID % ("client_info", ap_info[0])
self._cursor.execute(query)
client_infos = self._cursor.fetchall()
for client_info in client_infos:
mobile_tokens.append(client_info[1])
return mobile_tokens
def select_ap_ip_port_by_ap_uuid(self, uuid):
query = SELECT_AP_IP_PORT_BY_AP_UUID % ("ap_info", uuid)
self._cursor.execute(query)
rows = self._cursor.fetchall()
for row in rows:
return row[2], row[3]
def __del__(self):
self._cursor.close()
self._con.close()
| true
|
7211e03cc680eeddcdaa28edd1feb569e5177856
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2983/59140/290380.py
|
UTF-8
| 437
| 3.15625
| 3
|
[] |
no_license
|
n=int(input())
s=input()
temp=0
for i in s:
if s.count(i)%2==1:temp+=1
if temp%2==0 and temp!=0:
print("Impossible")
else:
step=0
while len(s)>1:
if s.count(s[0:1])==1:
step += len(s) // 2
s=s[len(s)//2:len(s)//2+1]+s[1:len(s)//2]+s[0:1]+s[len(s)//2+1:]
else:
step += len(s) - s.rfind(s[0:1]) - 1
s=s[1:s.rfind(s[0:1])]+s[s.rfind(s[0:1])+1:]
print(step)
| true
|
0cb6b7a1bed0a7998e1a7b1470ddea2dd55ba652
|
Python
|
s-Akhil-krishna/Competitive-coding
|
/GeeksforGeeks/Anagram of String.py
|
UTF-8
| 318
| 2.84375
| 3
|
[] |
no_license
|
from collections import Counter
def remAnagram(str1,str2):
cnt1 = Counter(str1)
cnt2 = Counter(str2)
ans = 0
for x in cnt1:
if cnt1[x] > cnt2[x]:
ans += cnt1[x] - cnt2[x]
for x in cnt2:
if cnt2[x] > cnt1[x]:
ans += cnt2[x] - cnt1[x]
return ans
| true
|
d4a5faf6472c551f3cd8b3d398ed38cc1da0029f
|
Python
|
yang03265/jason
|
/networking/rip router/rip_router.py
|
UTF-8
| 6,866
| 2.875
| 3
|
[] |
no_license
|
from sim.api import *
from sim.basics import *
'''
Create your RIP router in this file.
'''
class RIPRouter (Entity):
def __init__(self, tablea = None):
# Add your code here!
self.table = {} # routing table key: destination -> Value: secondHashMap. secondHashMap: key: firstHop -> Value: distance
self.firstHopPorts = {} # key: firstHop-> Value: portNumber
def handle_rx (self, packet, port):
if isinstance(packet, DiscoveryPacket):
# handling discovery packets- link up or link down
if packet.is_link_up:
self.table[packet.src] = {} # initialize the second hashTable here
self.table[packet.src][packet.src] = 1
self.firstHopPorts[packet.src] = port
self.send_updates_to_neighbors()
else:
# link-down cases
routingTable_updated = False
for dest in self.table.keys():
if packet.src in self.table[dest]:
shortestHop = self.shortest_hop(dest)
if shortestHop == packet.src:
routingTable_updated = True
del self.table[dest][packet.src]
if packet.src in self.firstHopPorts:
del self.firstHopPorts[packet.src]
routingTable_updated = True
if routingTable_updated:
self.send_updates_to_neighbors()
elif isinstance(packet, RoutingUpdate):
routingTable_updated = False
destinations = []
deledes = []
for destination in packet.all_dests():
if destination == self:
continue
destinations.append(destination)
if packet.get_distance(destination) < 100:
if destination not in self.table:
self.table[destination] = {}
self.table[destination][packet.src] = 1 + packet.get_distance(destination)
routingTable_updated = True
else:
if packet.src not in self.table[destination]:
self.table[destination][packet.src] = 1 + packet.get_distance(destination)
shortestHop = self.shortest_hop(destination)
if shortestHop == packet.src:
routingTable_updated = True
else:
if 1 + packet.get_distance(destination) < self.table[destination][packet.src]:
self.table[destination][packet.src] = 1 + packet.get_distance(destination)
routingTable_updated = True
else:
if destination in self.table:
if packet.src in self.table[destination]:
del self.table[destination][packet.src]
if self.table[destination] == {}:
deledes.append(destination)
destDelete = []
for dest in self.table.keys():
if dest not in destinations:
if packet.src in self.table[dest]:
shortestHop = self.shortest_hop(dest)
if shortestHop == packet.src:
routingTable_updated = True
del self.table[dest][packet.src]
if routingTable_updated:
self.send_updates_to_neighbors()
else:
# for normal packets, just send the packet to the specified port
if packet.dst != self:
self.send_or_forward_packets(packet, port)
def shortest_hop(self, destination):
shortest_dis = 100
shortestHop = None
for firstHop in self.table[destination].keys():
if self.table[destination][firstHop] < shortest_dis:
shortestHop=firstHop
shortest_dis=self.table[destination][firstHop]
elif self.table[destination][firstHop] == shortest_dis and shortest_dis != 100:
if self.firstHopPorts[shortestHop] > self.firstHopPorts[firstHop]:
shortestHop=firstHop
shortest_dis =self.table[destination][firstHop]
return shortestHop
def send_or_forward_packets(self, packet, port):
firstHop = self.shortest_hop(packet.dst)
if firstHop != None:
if port != self.firstHopPorts[firstHop]:
self.send(packet, self.firstHopPorts[firstHop])
def find_shortest_paths(self):
# returns a list of shortest paths with (destination, distance) as each element node
shortestList = []
for destination in self.table.keys():
shortestDistance = 100
shortestfirstHop = None
for firstHop in self.table[destination].keys():
if self.table[destination][firstHop] < shortestDistance:
shortestDistance = self.table[destination][firstHop]
shortestfirstHop = firstHop
elif self.table[destination][firstHop] == shortestDistance and shortestDistance !=100:
if self.firstHopPorts[shortestfirstHop] > self.firstHopPorts[firstHop]:
shortestfirstHop = firstHop
shortestDistance = self.table[destination][firstHop]
if shortestDistance != 100:
shortestList.append((destination, shortestDistance, shortestfirstHop))
return shortestList
def send_updates_to_neighbors(self):
shortestList = self.find_shortest_paths()
for firstHop in self.firstHopPorts.keys():
shortestDistanceList = []
for (destination, shortestDistance, shortestfirstHop) in shortestList:
if destination == firstHop:
continue
dist = shortestDistance
if shortestfirstHop == firstHop:
dist = 100
shortestDistanceList.append((destination, dist))
packet = RoutingUpdate()
for (dest, dist) in shortestDistanceList:
print (dest, dist)
packet.add_destination(dest, dist)
self.send(packet, self.firstHopPorts[firstHop])
| true
|
9dc2b50c3bd606a4b2af5b41e404b950d7a315d1
|
Python
|
bansheerubber/dungeon-generator
|
/generator.py
|
UTF-8
| 7,201
| 2.734375
| 3
|
[] |
no_license
|
import time
import random
import math
from file import File
from roomtype import RoomType
from PIL import Image
from room import Room
from chunk import get_chunk
from a_star import a_star
class Generator:
def __init__(self):
self.room_types = []
self.difficulties = []
self.difficulties_map = {}
self.reset()
def get_chunk(self, position):
return get_chunk(position, chunk_map=self.chunk_map)
def add_room_type(self, roomtype):
self.room_types.append(roomtype)
return roomtype
def add_difficulty(self, range, difficulty):
self.difficulties.append(range)
self.difficulties_map[range] = difficulty
def reset(self):
self.chunk_map = {}
self.rooms = set()
self.room_map = {}
self.hallways = set()
self.hallway_map = {}
self.collections = []
self.color_by_difficulty = True
self.difficulty_colors = {}
def generate(self, width, rows):
self.reset()
# place row of rooms
y = 0
max_y = 0
start = time.time()
room_index = 0
for row in range(0, rows):
percent = 0.5
y = math.floor((y * percent) + (max_y * (1 - percent)))
max_y = 0
for x in range(0, width):
position = (int(x + random.randint(-5, 5) + 5), int(y + random.randint(-5, 5) + 5))
room_type = random.sample(self.room_types, 1)[0]
while room_type.can_place(position) == False:
room_type = random.sample(self.room_types, 1)[0]
room = Room(position, room_type, self)
if room.position[1] > max_y:
max_y = room.position[1]
# try to create tendrils at the end of the dungeon
# for x in range(0, width, 20):
# new_y = y
# for i in range(0, random.randint(5, 70)):
# position = (int(x + random.randint(-5, 5) + 5), int(y))
# test_position = (int(x + random.randint(-5, 5) + 5), int(new_y))
# room_type = random.sample(self.room_types, 1)[0]
# while room_type.can_place(position) == False:
# room_type = random.sample(self.room_types, 1)[0]
# room = Room(position, room_type, self)
# new_y = room.position[1]
print(f"Created {len(self.rooms)} rooms in {int((time.time() - start) * 1000)}ms")
start = time.time()
for room in self.rooms:
room.place_hallways()
print(f"Created {len(self.hallways)} hallways in {int((time.time() - start) * 1000)}ms")
start = time.time()
# repair rooms
largest_collection_count = 0
largest_collection = None
for collection in self.collections:
if len(collection.rooms) > largest_collection_count:
largest_collection_count = len(collection.rooms)
largest_collection = collection
deleted_rooms = set()
fixed_rooms = 0
for room in self.rooms:
if room.collection != largest_collection:
room.place_hallways(max_dist=15)
if room.collection != None and len(room.collection.rooms) >= largest_collection_count:
largest_collection = room.collection
largest_collection_count = len(room.collection.rooms)
if room.collection == largest_collection:
fixed_rooms = fixed_rooms + 1
# final pruning
for room in self.rooms:
if room.collection != largest_collection:
deleted_rooms.add(room)
for room in deleted_rooms:
room.destroy()
print(f"Deleted {len(deleted_rooms)} rooms and fixed {fixed_rooms} in {int((time.time() - start) * 1000)}ms")
# find the room furthest down and create the boss room there
Boss = self.add_room_type(
RoomType(
(10, 10),
name="Final Boss",
is_special=True,
)
.add_color((255, 0, 0))
)
furthest_y = 0
furthest_y_room = None
for room in self.rooms:
if room.position[1] + room.size[1] > furthest_y:
furthest_y = room.position[1] + room.size[1]
furthest_y_room = room
room = Room((furthest_y_room.position[0] + random.randint(-2, 2), furthest_y + 2), Boss, self)
room.place_hallways(max_dist=50)
# give rooms indices, used for file saving
room_index = 0
for room in self.rooms:
room.index = room_index
room_index = room_index + 1
if len(room.hallways) == 0:
print("FINAL BOSS HAS NO CONNECTIONS")
for room_type in self.room_types:
print(f"{room_type.name}: {len(room_type.rooms)}")
return self
def save(self, file_name, blockland=False):
file = File()
for room_type in self.room_types:
room_type.serialize(file)
file.write_section()
rooms = list(self.rooms)
rooms.sort(key=lambda room: room.index)
for room in rooms:
room.serialize(file)
file.write_section()
for hallway in self.hallways:
hallway.serialize(file)
file.write_section()
# write room connections
for room in rooms:
for neighbor in room.connected_rooms:
file.write(neighbor.index, 4)
file.write_break()
file.write_section()
if blockland == False:
file.save(file_name)
else:
file.save_blockland(file_name)
return self
def save_image(self, file_name):
image_x = 0
image_y = 0
for room in self.rooms:
if room.position[0] + room.size[0] > image_x:
image_x = room.position[0] + room.size[0]
if room.position[1] + room.size[1] > image_y:
image_y = room.position[1] + room.size[1]
image = Image.new("RGB", (image_x + 10, image_y + 10), color=(255,255,255,0))
for room in self.rooms:
room.draw(image)
for hallway in self.hallways:
hallway.draw(image)
image.save(file_name)
return self
def _color_path(self, path):
for index in range(1, len(path)):
previous_room = path[index - 1]
room = path[index]
room.overwrite_color = (255, 100, 0)
if previous_room in room.hallway_map:
room.hallway_map[previous_room].overwrite_color = (255, 100, 0)
# tries to path from spawn to the boss room
def shortest_path(self):
# find the boss room
boss = None
for room in self.rooms:
if room.room_type.name == "Final Boss":
boss = room
break
# find the spawn room
spawn = None
for room in self.rooms:
if room.room_type.name == "Spawn":
spawn = room
break
path = a_star(spawn, boss)
if path != None:
self._color_path(path)
print(f"Path traverses {len(path)} rooms")
else:
print("Path not found")
return self
# tries to path to every single occurance of this room type on its way to the boss
def path_to_all_room_types(self, room_types):
rooms = []
chances = {}
for (room_type, chance) in room_types:
rooms = rooms + list(room_type.rooms)
chances[room_type] = chance
rooms.sort(key=lambda room: room.position[1])
# remove random rooms
for room in rooms:
if room.room_type in chances and chances[room.room_type] < random.randint(0, 100):
rooms.remove(room)
# find the spawn room
spawn = None
for room in self.rooms:
if room.room_type.name == "Spawn":
spawn = room
break
rooms.insert(0, spawn)
# find the boss room
boss = None
for room in self.rooms:
if room.room_type.name == "Final Boss":
boss = room
break
rooms.append(boss)
total_path = []
last_room = rooms[0]
for index in range(1, len(rooms)):
room = rooms[index]
if room not in total_path:
path = a_star(last_room, room)
if path != None:
total_path = total_path + path
last_room = room
print(f"Path traverses {len(total_path)} rooms")
self._color_path(total_path)
return self
| true
|
7e3c56cbb6bd986d947267fcb722cfbd2167a881
|
Python
|
egeyosunkaya/floppy-birdie
|
/src/game.py
|
UTF-8
| 5,247
| 2.84375
| 3
|
[] |
no_license
|
import pygame
import logging
import random
from enum import Enum
from pygame import init
from pygame.math import Vector2
from pygame import Rect
from commands import JumpCommand
from background_state import BackgroundState
from bird_state import BirdState
from global_vars import GlobalVars
from collision_checker import CollisionChecker
from configuration import Configuration
from menu import Menu
class GameState:
def __init__(self):
self.world_size = GlobalVars.get_world_size()
self.bird_state = BirdState()
self.background_state = BackgroundState()
self.collision_checker = CollisionChecker(self.background_state, self.bird_state)
self.is_alive = True
self.score = 0
def update(self, move_command):
self.bird_state.update(move_command)
self.background_state.update()
if self.collision_checker.check_collision():
self.is_alive = False
def get_bird_position(self):
return self.bird_state.bird_position
class GameStatus(Enum):
START_MENU = 1
INGAME_MENU = 2
STARTED = 3
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption("Floppy Birdie")
self.clock = pygame.time.Clock()
self.game_state = GameState()
self.move_command = None
self.cell_size = GlobalVars.get_cell_size()
self.game_window = pygame.display.set_mode(
(int(self.cell_size.x * self.game_state.world_size.x),
int(self.cell_size.y * self.game_state.world_size.y))
)
self.running = True
self.game_status = GameStatus.START_MENU
self.window_size = self.game_state.world_size.elementwise() * self.cell_size
self.game_menu = Menu(self.start_game, self.quit_game)
def start_game(self):
print("Start Game Called")
self.game_menu.menu.disable()
self.game_status = GameStatus.STARTED
self.game_state = GameState()
self.running = True
def quit_game(self):
print("Quit Game Called")
self.game_menu.menu.disable()
self.running = False
def processInput(self):
self.move_command = None
for event in pygame.event.get():
if event.type == pygame.constants.QUIT:
self.running = False
break
elif event.type == pygame.constants.KEYDOWN:
if event.key == pygame.constants.K_ESCAPE:
if self.game_status is GameStatus.STARTED:
self.game_status = GameStatus.INGAME_MENU
break
elif event.key == pygame.constants.K_SPACE or event.key == pygame.constants.K_w:
self.move_command = JumpCommand()
def update(self):
if self.game_status is GameStatus.STARTED:
self.game_state.update(self.move_command)
if self.game_state.is_alive is False:
self.game_status = GameStatus.START_MENU
def draw_background(self):
self.game_window.blit(
self.game_state.background_state.background_image.sprite,
Vector2(0,0),
self.game_state.background_state.background_image.texture_rect
)
def draw_game(self):
# Draw Background
self.draw_background()
# Draw Bird
self.game_window.blit(
self.game_state.bird_state.bird_sprite,
self.game_state.get_bird_position().elementwise() * self.cell_size,
self.game_state.bird_state.texture_rect
)
# Draw Pipes
for pipe_tuple in self.game_state.background_state.pipe_list:
for pipe in pipe_tuple:
self.game_window.blit(
pipe.sprite,
pipe.location.elementwise() * self.cell_size,
pipe.texture_rect
)
# Collision Debug
if Configuration.is_debug_mode_enabled():
pygame.draw.rect(self.game_window, color=(0,0,255), rect= self.game_state.bird_state.get_collision_box())
for pipe_tuple in self.game_state.background_state.pipe_list:
for pipe in pipe_tuple:
pygame.draw.rect(self.game_window, color=(0,0,0), rect=pipe.get_collision_box())
def draw_menu(self):
# Draw Background
self.game_window.blit(
self.game_state.background_state.background_image.sprite,
Vector2(0,0),
self.game_state.background_state.background_image.texture_rect
)
self.game_menu.menu.enable()
self.game_menu.menu.mainloop(self.game_window, bgfun=self.draw_background)
def render(self):
if self.game_status is GameStatus.STARTED:
self.draw_game()
elif self.game_status is GameStatus.INGAME_MENU:
self.draw_menu()
elif self.game_status is GameStatus.START_MENU:
self.draw_menu()
pygame.display.update()
def run(self):
while self.running:
self.processInput()
self.update()
self.render()
self.clock.tick(60)
| true
|
060f447f63d51c86feb6ea26b90a0bf08cd0950f
|
Python
|
joostlek/python_school
|
/Practice Exercise 1/Practice Exercise 1_3.py
|
UTF-8
| 171
| 2.515625
| 3
|
[] |
no_license
|
a = 6
b = 7
c = (6 + 7) / 2
inventaris = ['papier', 'nietjes', 'pennen']
voornaam, tussenvoegsel, achternaam = 'Joost', '', 'Lekkerkerker'
mijnnaam = voornaam + achternaam
| true
|
56855f70448ed1f4b0a2a84f20fc7892c4f7d64e
|
Python
|
jinju-lee/Python-study
|
/05-13plus.py
|
UTF-8
| 62
| 2.65625
| 3
|
[] |
no_license
|
import re
r= re.compile("a.+c")
print(r.search("abbfffc"))
| true
|
38ea932382f7d33ea64a42d53ded985510e9ff36
|
Python
|
heqiangsc/article_spider
|
/article_spider/spiders/cnblog.py
|
UTF-8
| 2,223
| 2.515625
| 3
|
[] |
no_license
|
import re
import scrapy
import datetime
from scrapy.http import Request
from urllib import parse
from article_spider.items import CnBlogArticleItem, ArticleItemLoader
from article_spider.utils.common import get_md5
class CnBlogSpider(scrapy.Spider):
name = "cnblog"
allowed_domains = ["www.cnblogs.com"]
start_urls = ['https://www.cnblogs.com/pick/']
def parse(self, response):
"""
1. 获取文章列表页中的文章url并交给scrapy下载后并进行解析
2. 获取下一页的url并交给scrapy进行下载, 下载完成后交给parse
"""
# 解析列表页中的所有文章url并交给scrapy下载后并进行解析
post_nodes = response.xpath('//*[@id="post_list"]/article')
for post_node in post_nodes:
post_url = post_node.css("::attr(href)").extract_first("")
yield Request(url=parse.urljoin(response.url, post_url), callback=self.parse_detail)
# 提取下一页并交给scrapy进行下载
#next_url = response.xpath('//*[@id="paging_block"]/div/a//@href')[-1].extract()
#if next_url:
# yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)
def parse_detail(self, response):
# 通过item loader加载item
#cb_post_title_url span
item_loader = ArticleItemLoader(item=CnBlogArticleItem(), response=response)
item_loader.add_css("title", "#cb_post_title_url span::text")
item_loader.add_value("url", response.url)
item_loader.add_value("url_object_id", get_md5(response.url))
item_loader.add_css("create_date", "#post-date::text")
item_loader.add_css("fav_nums", "#post_view_count::text")
item_loader.add_css("comment_nums", "#post_comment_count::text")
##item_loader.add_css("fav_nums", ".bookmark-btn::text")
##item_loader.add_css("tags", "p.entry-meta-hide-on-mobile a::text")
item_loader.add_css("content", "#cnblogs_post_body")
item_loader.add_css("content_image_url", "#cnblogs_post_body>p>img::attr(src)")
item_loader.add_value("content_replace_flag", 0)
article_item = item_loader.load_item()
yield article_item
| true
|
c157fd1ceee0495d1f8659c83b2aee30d5bff6e0
|
Python
|
84ace/esp32_smart_keezer
|
/software/old_stuff/other/i2c_scanner.py
|
UTF-8
| 3,134
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
# Scanner i2c en MicroPython | MicroPython i2c scanner
# Renvoi l'adresse en decimal et hexa de chaque device connecte sur le bus i2c
# Return decimal and hexa adress of each i2c device
# https://projetsdiy.fr - https://diyprojects.io (dec. 2017)
import machine
from time import sleep
i2c = machine.I2C(scl=machine.Pin(22), sda=machine.Pin(21))
print('Scan i2c bus...')
devices = i2c.scan()
if len(devices) == 0:
print("No i2c device !")
else:
print('i2c devices found:',len(devices))
for device in devices:
print("Decimal address: ",device," | Hexa address: ",hex(device))
i2c = machine.I2C(scl=machine.Pin(22), sda=machine.Pin(21))
def adc_setup():
i2c.writeto_mem(0x1d, 0x07, b'\x01')
#i2c.writeto_mem(0x1d, 0x0B, b'\x00') # defaults to 0
i2c.writeto_mem(0x1d, 0x00, b'\x01')
def i2c_adc_temp():
data = i2c.readfrom_mem(0x1d, 0x27, 2)
high_byte = int(data[0])
low_byte = int(data[1])
value = (high_byte << 1 | low_byte >> 7) / 2
return value
def i2c_adc_voltage_in_x(channel):
if channel == batt_1_voltage:
register = 0x20
elif channel == batt_2_voltage:
register = 0x21
elif channel == batt_2_current:
register = 0x22
data = i2c.readfrom_mem(0x1d, register, 2)
#print(data)
high_byte = int(data[0])
low_byte = int(data[1])
value = (high_byte << 4 | low_byte >> 1)
adc_voltage = 2.56 / 4096 * value
print("ADC Voltage: ", adc_voltage)
battery_voltage_divider = ((105000+20000)/20000)
battery_voltage = battery_voltage_divider * adc_voltage
error = 0.9796905222
print("Battery Voltage: ", battery_voltage * error)
return value
registers = [0x20,0x21, 0x22, 0x23, 0x24, 0x25, 0x26]
i = 0
def i2c_adc_voltage_in1():
global i
while True:
if i < 2:
data = i2c.readfrom_mem(0x1d, registers[i], 2)
#print(data)
high_byte = int(data[0])
low_byte = int(data[1])
value = (high_byte << 4 | low_byte >> 1)
print(i, value)
adc_voltage = 2.56 / 4096 * value
print("ADC Voltage: ", adc_voltage)
battery_voltage_divider = ((105000+20000)/20000)
battery_voltage = battery_voltage_divider * adc_voltage
error = 0.9796905222
print("Battery Voltage: ", battery_voltage * error)
i += 1
sleep(1)
else:
i = 0
False
def i2c_adc_current():
data = i2c.readfrom_mem(0x1d, 0x22, 2)
print(data)
high_byte = int(data[0])
low_byte = int(data[1])
value = (high_byte << 4 | low_byte >> 1)
print(i, value)
adc_voltage = 2.56 / 4096 * value
print("ADC Voltage: ", adc_voltage)
crossover_point = 1.63
current = (adc_voltage - crossover_point) / 0.044
print("ADC Current: ", current)
adc_setup()
while True:
print(i2c_adc_temp(), "C")
print("")
#print("batt1")
#print(i2c_adc_voltage_in0())
#print("batt2")
#print(i2c_adc_voltage_in1())
i2c_adc_current()
print("")
sleep(1)
| true
|
749acd62e9c8bc2a2ec6824d46fbb6b798802961
|
Python
|
xueshijun/0.MachineLearningWorkSpace
|
/MachingLearningInAction/com/MapReduce/mpMeanMapper.py
|
UTF-8
| 1,246
| 2.984375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
#Name :
#Author : Xueshijun
#MailTo : xueshijun_2010@163.com / 324858038@qq.com
#QQ : 324858038
#Blog : http://blog.csdn.net/xueshijun666
#Created on Thu Mar 03 09:26:08 2016
#Version: 1.0
#-------------------------------------------------------------------------------
import sys
from numpy import mat, mean, power
'''
该mapper首先按行读取所有的输人并创建一组对应的浮点数,然后得到数组的长度并创建NumPy矩阵。
再对所有的值进行平方,最后将均值和平方后的均值发送出去。这些值将用于计算全局的均值和方差
一个好的习惯是向标准错误输出发送报告。如果某作业10分钟内没有报告输出,则将Hadoop 中止。
'''
def read_input(file):
for line in file:
yield line.rstrip()
input = read_input(sys.stdin)#creates a list of input lines
input = [float(line) for line in input] #overwrite with floats
numInputs = len(input)
input = mat(input)
sqInput = power(input,2)
#output size, mean, mean(square values)
print "%d\t%f\t%f" % (numInputs, mean(input), mean(sqInput)) #calc mean of columns
print >> sys.stderr, "report: still alive"
| true
|
2fbdafa2fc90337cb1e5febee126ba42c2ac4241
|
Python
|
ZarulHanifah/index_combination_software
|
/game.py
|
UTF-8
| 4,417
| 2.828125
| 3
|
[] |
no_license
|
import sys
import pygame
from index_combination.world_constants import *
import index_combination.support_functions as sf
class Game:
def __init__(self):
pygame.init()
pygame.display.set_icon(pygame.image.load(LOGO_PATH))
pygame.display.set_caption(SOFTWARE_CAPTION)
self.surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), pygame.RESIZABLE)
self.bigger_font = pygame.font.SysFont('Calibri', 30)
self.smaller_font = pygame.font.SysFont('Calibri', 15)
self.grid = sf.initiate_grid()
self.running_menu = True
self.running_game = True
self.dragging = False
self.button_menu_samplesheet = pygame.Rect(50, 100, 200, 50)
self.button_start_now = pygame.Rect(50, 200, 200, 50)
while self.running_menu:
self.handle_events_menu()
self.draw_menu()
while self.running_game:
self.handle_events_game()
self.draw_game()
def myquit(self):
pygame.quit()
sys.exit()
def handle_events_menu(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
# self.running_menu = False
self.myquit()
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if self.button_menu_samplesheet.collidepoint((mx, my)):
try:
self.grid = sf.choose_samplesheet_mark_used(self.grid)
self.running_menu = False
except:
pass
if self.button_start_now.collidepoint((mx, my)):
self.running_menu = False
def handle_events_game(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sf.print_indices(self.grid)
self.myquit()
return False
elif event.type == pygame.MOUSEBUTTONDOWN:
self.start_row, self.start_column = sf.given_mouse_get_rowcolumn()
current_value = sf.given_mouse_get_value(self.grid)
if event.button == CLICK_BUTTON["LEFT"]:
self.dragging = True
try:
pass
except IndexError as error:
print(f"Out of bounds:\n{error}")
elif event.button == CLICK_BUTTON["MIDDLE"]:
self.dragging = True
try:
pass
# if current_value != 100:
# self.grid[self.start_row][self.start_column] = 100
# elif current_value == 100:
# self.grid[self.start_row][self.start_column] = 0
except IndexError as error:
print(f"Out of bounds:\n{error}")
elif event.type == pygame.MOUSEBUTTONUP:
self.dragging = False
self.end_row, self.end_column = sf.given_mouse_get_rowcolumn()
try:
row1, row2 = sorted([self.start_row, self.end_row])
column1, column2 = sorted([self.start_column, self.end_column])
except:
pass
if event.button == CLICK_BUTTON["LEFT"]:
try:
for row in range(row1, row2 + 1):
for column in range(column1, column2 + 1):
try:
current_value = self.grid[row][column]
if current_value == 100:
pass
elif current_value == 10:
self.grid[row][column] = 0
elif current_value == 0:
self.grid[row][column] = 10
except IndexError as error:
pass
except:
pass
elif event.button == CLICK_BUTTON["MIDDLE"]:
try:
for row in range(row1, row2 + 1):
for column in range(column1, column2 + 1):
try:
current_value = self.grid[row][column]
if current_value == 100:
self.grid[row][column] = 0
elif current_value == 10:
self.grid[row][column] = 100
elif current_value == 0:
self.grid[row][column] = 100
except IndexError as error:
pass
except:
pass
# print("Out of bounds")
elif event.type == pygame.MOUSEMOTION:
if self.dragging:
pass
def draw_menu(self):
self.surface.fill(BLACK)
sf.draw_text("SYL INDEX COMBINATION SOFTWARE", self.bigger_font, WHITE, self.surface, 20, 20)
pygame.draw.rect(self.surface, RED, self.button_menu_samplesheet)
pygame.draw.rect(self.surface, RED, self.button_start_now)
sf.draw_text("READ SAMPLE SHEET", self.smaller_font, WHITE, self.surface, 55, 120)
sf.draw_text("START NOW", self.smaller_font, WHITE, self.surface, 55, 220)
pygame.display.flip()
def draw_game(self):
self.surface.fill(BLACK)
sf.draw_world_grid(self.surface, self.grid)
sf.draw_world_lines(self.surface)
sf.put_index_labels(self.surface, self.smaller_font)
pygame.display.flip()
if __name__ == "__main__":
Game()
| true
|
292565acd11e3c3c17bb5a458bcf9ae43a7b3849
|
Python
|
FWWorks/Assn1_DSP
|
/logger.py
|
UTF-8
| 1,323
| 2.90625
| 3
|
[] |
no_license
|
import logging
loggers = {}
def get_logger(log_file):
# create logger
logger_name = log_file
if logger_name in loggers:
return loggers[logger_name]
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# create file handler
log_path = log_file
fh = logging.FileHandler(log_path, 'w')
sh = logging.StreamHandler()
# create formatter
# fmt = "%(asctime)-15s %(levelname)s %(filename)s %(lineno)d %(process)d %(message)s"
fmt = "%(asctime)-15s %(levelname)s %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt, datefmt)
# add handler and formatter to logger
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
loggers[logger_name] = logger
return logger
'''
time stamp: 2019-02-05 00:21:00.254
stamp: 20190205002100254
'''
def get_time_stamp():
ct = time.time()
local_time = time.localtime(ct)
data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
data_secs = (ct - int(ct)) * 1000
time_stamp = "%s.%03d" % (data_head, data_secs)
print(time_stamp)
stamp = ("".join(time_stamp.split()[0].split("-"))+"".join(time_stamp.split()[1].split(":"))).replace('.', '')
print(stamp)
return time_stamp, stamp
| true
|
3d113871439e4e9451c2976d378315d2f7e5ed99
|
Python
|
razib764/Treelib
|
/boxplots.py
|
UTF-8
| 1,095
| 3.546875
| 4
|
[] |
no_license
|
"""
The following method creates boxplots of the variance of data for the number of
nodes for 10 different birthrates with fixed maximum time over 100 iterations
per birth rate
"""
import dendropy
import matplotlib.pyplot as plt
from ete3 import Tree, TreeStyle
def box_plot():
birth_rate = 0.2 #initial vaue
death_rate = 0.1
rate = [] #list of rates will be populated
plot = [] #list of a list of variances will be populated
while birth_rate < 1:
birth_rate = round(birth_rate,1)
death_rate = round(death_rate,1)
rate += [birth_rate]
i = 0
num_avg = [] #list of number of nodes at a given birth rate for 100 iterations
while i<2000:
t = dendropy.model.birthdeath.birth_death_tree(birth_rate,death_rate,max_time=5)
num_avg += [len(t.leaf_nodes())]
i += 1
plot += [[num_avg]]
birth_rate += 0.1
death_rate += 0.1
plt.boxplot(plot)
plt.xticks([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],rate)
plt.show()
box_plot()
#variance mathematically
#
| true
|
a57234e7c510a643f5051b3f7936fec196567043
|
Python
|
Jordan-Voss/CA117-Programming2
|
/middle_011.py
|
UTF-8
| 225
| 3.453125
| 3
|
[] |
no_license
|
import sys
def mid(s):
if len(s) % 2 != 0 :
return (s[len(s) // 2])
else:
return ('No middle character!')
def main():
for line in sys.stdin:
ml = mid(line.strip())
print(ml)
if __name__ == '__main__':
main()
| true
|
f16c9c9092e0b9d828e3e7ff0e619d3dab42cde2
|
Python
|
mcgaw/psychic-garbanzo
|
/5/week1/airline_crews/airline_crews.py
|
UTF-8
| 5,727
| 3.3125
| 3
|
[] |
no_license
|
# python3
import collections
class Edge:
def __init__(self, u, v, capacity):
self.u = u
self.v = v
self.capacity = capacity
self.flow = 0
def __repr__(self):
return '<u: {0} v: {1} cap: {2} flow: {3}>'.format(self.u, self.v, self.capacity, self.flow)
# This class implements a bit unusual scheme for storing edges of the graph,
# in order to retrieve the backward edge for a given edge quickly.
class FlowGraph:
def __init__(self, n):
# List of all - forward and backward - edges
self.edges = []
# These adjacency lists store only indices of edges in the edges list
self.graph = [[] for _ in range(n)]
def add_edge(self, from_, to, capacity):
# Note that we first append a forward edge and then a backward edge,
# so all forward edges are stored at even indices (starting from 0),
# whereas backward edges are stored at odd indices.
forward_edge = Edge(from_, to, capacity)
backward_edge = Edge(to, from_, 0)
self.graph[from_].append(len(self.edges))
self.edges.append(forward_edge)
self.graph[to].append(len(self.edges))
self.edges.append(backward_edge)
def size(self):
return len(self.graph)
def get_ids(self, from_):
return self.graph[from_]
def get_edge(self, id):
return self.edges[id]
def add_flow(self, id, flow):
# To get a backward edge for a true forward edge (i.e id is even), we should get id + 1
# due to the described above scheme. On the other hand, when we have to get a "backward"
# edge for a backward edge (i.e. get a forward edge for backward - id is odd), id - 1
# should be taken.
#
# It turns out that id ^ 1 works for both cases. Think this through!
self.edges[id].flow += flow
self.edges[id ^ 1].flow -= flow
def find_edge(self, from_, to):
for e in self.graph[from_]:
edge = self.edges[e]
if edge.v == to:
return edge
def find_augement_path(graph, from_, to):
q = collections.deque()
visited = [False] * len(graph.graph)
augmented_path = None
q.appendleft((from_, []))
while len(q) > 0:
(vert, path) = q.pop()
if vert == to:
augmented_path = path
break
if visited[vert]:
continue
visited[vert] = True
for idx in graph.graph[vert]:
edge = graph.edges[idx]
# edge in the flow that is maxed out
if edge.u == edge.v:
continue
if edge.capacity == edge.flow and edge.capacity != 0:
continue
# check for 'reverse edge' with no flow to reverse
if edge.capacity == 0 and graph.edges[idx-1].flow == 0:
continue
node = (edge.v, list(path))
node[1].append(idx)
q.appendleft(node)
return augmented_path
def augment_flow(graph, path):
max_potential = float('Inf')
for idx in path:
edge = graph.edges[idx]
if edge.capacity == 0:
# 'backward' flow
potential = graph.edges[idx-1].flow
else:
potential = edge.capacity - edge.flow
if potential < max_potential:
max_potential = potential
return max_potential
def update_augmented(graph, path, flow):
for idx in path:
edge = graph.edges[idx]
if edge.capacity == 0:
# 'reversing' the flow
forward_edge = graph.edges[idx-1]
forward_edge.flow = forward_edge.flow - flow
else:
edge.flow = edge.flow + flow
backward_edge = graph.edges[idx+1]
backward_edge.flow = edge.flow
def max_flow(graph, from_, to):
while True:
path = find_augement_path(graph, from_, to)
#print('path: '+str(path))
if not path:
break
flow = augment_flow(graph, path)
#print('flow: '+str(flow))
assert flow != 0
update_augmented(graph, path, flow)
flow = 0
for idx in graph.graph[from_]:
flow += graph.edges[idx].flow
return flow
def airline_crews(num_flight, num_crews, adj_matrix):
# convert to format required for graph
num_nodes = 2 + n + m
graph = FlowGraph(num_nodes)
crew_offset = 1 + n
sink_node = num_nodes - 1
for idx, row in enumerate(adj_matrix):
flight_node = 1 + idx
graph.add_edge(0, flight_node, 1)
for idx, edge in enumerate(row):
crew_node = idx + crew_offset
if edge == 0:
continue
# don't add duplicate edges
if graph.find_edge(crew_node, sink_node) is None:
graph.add_edge(crew_node, sink_node, 1)
if graph.find_edge(flight_node, crew_node) is None:
graph.add_edge(flight_node, crew_node, 1)
# calculate max flow in graph
flow = max_flow(graph, 0, sink_node)
# pull out the 'pairings' of flight to crew, indicated by a
# flow on an edge between flight and crew.
pairings = []
for node in range(1, 1 + n):
for edge_idx in graph.graph[node]:
edge = graph.edges[edge_idx]
if edge.flow == 1:
pairings.append(edge.v - crew_offset + 1)
break
else:
pairings.append(-1)
return pairings
if __name__ == '__main__':
n, m = map(int, input().split())
adj_matrix = [list(map(int, input().split())) for i in range(n)]
pairings = airline_crews(n, m, adj_matrix)
print(' '.join([str(pairing) for pairing in pairings]))
| true
|
9d54f7ac06ed620f9e8d26138de3d1dcd878cad7
|
Python
|
Derek-Cartwright-Jr/Data-Structures-And-Algorithms
|
/Linear_Structures/linkedlist.py
|
UTF-8
| 4,714
| 4.25
| 4
|
[] |
no_license
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return str(self.data)
def get_data(self):
return self.data
def get_next(self):
return self.next
def set_next(self, node):
self.next = node
class LinkedList:
def __init__(self, iterator=[]):
self.head = None
self.length = 0
for item in iterator: self.add_head(item)
""" Overriding built in function that gets called when we use len(LinkedList)
function to see the length of the linked list """
def __len__(self):
return self.length
""" Overriding built in function that gets called when we want to search a
container for a specific object or data element """
def __contains__(self, data):
return self.search(data)
""" Representation of our linked list class """
def __repr__(self):
current, items = self.head, []
while current:
items.append(str(current))
current = current.get_next()
return '->'.join(items)
""" Returns the head (first data element) of the linked list """
def get_head(self):
return self.head
""" Returns the last data element of the linked list """
def get_tail(self):
current = self.head
while current.get_next():
current = current.get_next()
return current
""" Add a data element to the front of the linked list """
def add_head(self, data):
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
self.length += 1
""" Add a data element at the end of the linked list """
def append(self, data):
new_node = Node(data)
current = self.head
while current.get_next():
current = current.get_next()
current.set_next(new_node)
""" Remove the last data element from the linked list """
def pop(self):
prev = None
current = self.head
while current.get_next():
prev = current
current = current.get_next()
prev.set_next(current.get_next())
current.set_next(None)
self.length -= 1
""" Removes a data element from the linked list """
def remove(self, data):
if len(self) == 0:
return "No elements in the list"
prev = None
current = self.head
found = False
while current and not found:
if current.get_data() == data:
found = True
else:
prev = current
current = current.next
if found:
self.length -= 1
if prev == None:
self.head = self.head.get_next()
else:
prev.next = current.get_next()
""" Search for an element in the linked list """
def search(self, data):
current = self.head
found = False
while current and not found:
if current.get_data() == data:
found = True
else:
current = current.get_next()
return found, current.get_data()
""" Reverses the linked list iteratively """
def reverse(self):
prev = None
current = self.head
while current:
current.next, current, prev = prev, current.next, current
self.head = prev
return self.head
""" Merge Sort Method to sort the linked list """
def sort(self):
if self.head == None or len(self) == 1:
return self
left = self._split()[0]
right = self._split()[1]
left.sort()
right.sort()
return self.merge(left, right)
""" Helper Merge method """
def _merge(self, left, right):
if not len(left):
return left
elif not len(right):
return right
result = LinkedList()
head_l = left.head
head_r = right.head
while(len(result) < len(left) + len(right)):
if head_l.get_data() <= head_r.get_data():
result.add_head(headl_l.get_data())
head_l = head_l.get_next()
left.length -= 1
else:
result.add_head(head_r.get_data())
head_r = head_r.get_next()
right.length -= 1
return result
""" Helper splitting method """
def _split(self):
current = self.head
count = 0
middle = len(self)//2
left_list = LinkedList()
right_list = LinkedList()
while count < middle:
left_list.add_head(current.get_data())
current = current.get_next()
count += 1
while current:
right_list.add_head(current.get_data())
current = current.get_next()
left_list.reverse()
right_list.reverse()
return left_list, right_list
""" Running some tests """
def test():
L = LinkedList(range(0, 100, 10))
print(L)
print(len(L)) # testing __len__ function
L.pop() # testing pop method
print(L)
L.add_head(23) # testing add_head method
print(23 in L) # testing __contains__
print(L.search(23)) # testing search method
print(L)
L.remove(23) # testing remove method
print(L)
L.reverse() # testing reverse method
print(L)
print(L.get_head()) # testing get_head method
print(L.get_tail()) # testing get_tail method
L.append(25) # testing append method
print(L)
def main():
test()
if __name__ == "__main__":
main()
| true
|
70621613684910fc55dfc56083f4df501a4d21fc
|
Python
|
siva6160/num
|
/becomde2-5.py
|
UTF-8
| 328
| 3.625
| 4
|
[] |
no_license
|
import math
def isarmstrong(num):
r=0
sum=0
temp=num
for i in range(1,num+1):
r=num%10
sum+=math.pow(r,3)
num=num//10
if(temp==sum):
print("Arm Strong Number")
else:
print("Not Arm Strong Number")
num=int(input(""))
isarmstrong(num)
| true
|
d0d6ea080b212df22c07b4e2ac3cf67354a4db98
|
Python
|
icicleling/learnPython
|
/10io/jsonStudent.py
|
UTF-8
| 372
| 3.21875
| 3
|
[] |
no_license
|
import json
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
def dict2student(d):
return Student(d['name'],d['age'],d['score'])
s =Student('Bob',20,88)
s_dumps=json.dumps(s,default=lambda obj:obj.__dict__)
print(s_dumps)
print(json.loads(s_dumps, object_hook=dict2student))
| true
|
ba5daf41503020944a27f117f70fc4088c5678d3
|
Python
|
Zhenye-Na/leetcode
|
/python/186.reverse-words-in-a-string-ii.py
|
UTF-8
| 1,928
| 4.375
| 4
|
[
"MIT"
] |
permissive
|
# 186. Reverse Words in a String II
# Description
# Given an input character array, reverse the array word by word.
# A word is defined as a sequence of non-space characters.
# The input character array does not contain leading or trailing spaces
# and the words are always separated by a single space.
# Example
# Example1
# Input: s = "the sky is blue"
# Output: "blue is sky the"
# Example2
# Input: "a b c"
# Output: "c b a"
# Challenge
# Could you do it in-place without allocating extra space?
class Solution:
"""
@param str: a string
@return: return a string
"""
def reverseWords(self, string):
string = [char for char in string]
# reverse every word in the string
i = j = 0
while i < len(string) and j < len(string):
while i < len(string) and not string[i].isalpha():
i += 1
# string[i] is a letter
j = i
while j < len(string) and string[j].isalpha():
j += 1
# string[j] will be the space after a char
# swap i ~ j - 1
left , right = i, j - 1
while left <= right:
string[left], string[right] = string[right], string[left]
left += 1
right -= 1
# move pointer forward
i = j
# reverse the entire string
left, right = 0, len(string) - 1
while left <= right:
string[left], string[right] = string[right], string[left]
left += 1
right -= 1
return "".join(string)
class Solution:
"""
@param str: a string
@return: return a string
"""
def reverseWords(self, str):
# write your code here
s_lst = str.split(" ")
for i in range(len(s_lst)):
s_lst[i] = s_lst[i][::-1]
new_str = " ".join(s_lst)
return new_str[::-1]
| true
|
4e1e1c1ecdf4c3fd5c083da8094a11fa2fed337c
|
Python
|
gkahn13/gcg-old
|
/src/gcg/policies/tf/bnn/probabilistic_backprop/network_layer.py
|
UTF-8
| 3,106
| 3
| 3
|
[] |
no_license
|
import math
class Network_layer:
def __init__(self, m_w_init, v_w_init, non_linear = True):
# We create the theano variables for the means and variances
self.m_w = theano.shared(value = m_w_init.astype(theano.config.floatX),
name='m_w', borrow = True)
self.v_w = theano.shared(value = v_w_init.astype(theano.config.floatX),
name='v_w', borrow = True)
self.w = theano.shared(value = m_w_init.astype(theano.config.floatX),
name='w', borrow = True)
# We store the type of activation function
self.non_linear = non_linear
# We store the number of inputs
self.n_inputs = theano.shared(float(m_w_init.shape[ 1 ]))
@staticmethod
def n_pdf(x):
return 1.0 / tf.sqrt(2 * math.pi) * tf.exp(-0.5 * x**2)
@staticmethod
def n_cdf(x):
return 0.5 * (1.0 + tf.erf(x / tf.sqrt(2.0)))
@staticmethod
def gamma(x):
return Network_layer.n_pdf(x) / Network_layer.n_cdf(-x)
@staticmethod
def beta(x):
return Network_layer.gamma(x) * (Network_layer.gamma(x) - x)
def output_probabilistic(self, m_w_previous, v_w_previous):
# We add an additional deterministic input with mean 1 and variance 0
m_w_previous_with_bias = \
tf.concatenate([ m_w_previous, tf.alloc(1, 1) ], 0)
v_w_previous_with_bias = \
tf.concatenate([ v_w_previous, tf.alloc(0, 1) ], 0)
# We compute the mean and variance after the linear operation
m_linear = tf.dot(self.m_w, m_w_previous_with_bias) / tf.sqrt(self.n_inputs)
v_linear = (tf.dot(self.v_w, v_w_previous_with_bias) + \
tf.dot(self.m_w**2, v_w_previous_with_bias) + \
tf.dot(self.v_w, m_w_previous_with_bias**2)) / self.n_inputs
if (self.non_linear):
# We compute the mean and variance after the ReLU activation
alpha = m_linear / tf.sqrt(v_linear)
gamma = Network_layer.gamma(-alpha)
gamma_robust = -alpha - 1.0 / alpha + 2.0 / alpha**3
gamma_final = tf.switch(tf.lt(-alpha, tf.fill(alpha, 30)), gamma, gamma_robust)
v_aux = m_linear + tf.sqrt(v_linear) * gamma_final
m_a = Network_layer.n_cdf(alpha) * v_aux
v_a = m_a * v_aux * Network_layer.n_cdf(-alpha) + \
Network_layer.n_cdf(alpha) * v_linear * \
(1 - gamma_final * (gamma_final + alpha))
return (m_a, v_a)
else:
return (m_linear, v_linear)
def output_deterministic(self, output_previous):
# We add an additional input with value 1
output_previous_with_bias = \
tf.concatenate([ output_previous, tf.alloc(1, 1) ], 0) / \
tf.sqrt(self.n_inputs)
# We compute the mean and variance after the linear operation
a = tf.dot(self.w, output_previous_with_bias)
if (self.non_linear):
# We compute the ReLU activation
a = tf.switch(tf.lt(a, tf.fill(a, 0)), tf.fill(a, 0), a)
return a
| true
|
781d235d7e1bbf606de6cd754461ccb0e55c4251
|
Python
|
shoma0987/At_coder_code
|
/atc176/D_176.py
|
UTF-8
| 49
| 3.1875
| 3
|
[] |
no_license
|
a = [4,5,6]
for i in range(2):
print(a[i])
| true
|
ad7144966587513d0b2b9fdfcf4193d7ccca8701
|
Python
|
bdwilliamson/spvim_supplementary
|
/code/sims/run_shapley_sim_once.py
|
UTF-8
| 9,762
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
# run through simulation one time
def do_one(n_train, n_test, p, m, measure_type, binary, gamma, cor, V, conditional_mean = "nonlinear", estimator_type = "tree"):
"""
Run the simulation one time for a given set of parameters
@param n: sample size
@param p: dimension
@param m: number of subsets to sample for SGD
@param tail: number of SGD samples to use for tail averaging
@param measure_type: variable importance measure
@param binary: is the outcome binary?
@param gamma: the constant multiplied by n for sampling
@param cor: the correlation (only used if p > 10)
@param V: folds for cross-fitting
@param conditional_mean: type of conditional mean (linear or nonlinear)
@param estimator_type: the type of estimator to fit (tree or linear model)
@return multiple values, including
shapley_vals: the shapley values
shapley_ics: the influence curves for the shapley values
shap_values: the mean absolute SHAP values
shapley_dict['num_subsets_sampled']: the number of subsets sampled
all_mps: all measures of predictiveness
p_values: p-values
hyp_tests: hypothesis test decisions
shapley_dict['beta']: the "beta" matrix, from SGD on ics
"""
# import standard libraries
import numpy as np
from xgboost import XGBRegressor
from sklearn.linear_model import LinearRegression
import shap
from sklearn.model_selection import GridSearchCV
from warnings import warn
# import user-defined functions
import data_generator as dg
import measures_of_predictiveness as mp
import utils as uts
import get_influence_functions as gif
import compute_ic as ci
import get_shapley_value as gsv
import shapley_hyp_test as sht
# generate data
if conditional_mean == "nonlinear":
if binary:
func_name = "ten_variable_binary_conditional_mean"
else:
func_name = "ten_variable_continuous_conditional_mean"
else:
func_name = "lm_conditional_mean"
beta = np.array([1, 0, 1.2, 0, 1.05, 0] + [0] * (p - 6))
if measure_type == "r_squared":
measure_func = mp.r_squared
objective_function = 'reg:linear'
else:
measure_func = mp.auc
objective_function = 'binary:logistic'
data_gen = dg.DataGenerator(func_name, n_train, n_test, p, binary, beta, cor)
draw = data_gen.create_data()
folds_outer = np.random.choice(a = np.arange(2), size = draw.y_train.shape[0], replace = True, p = np.array([0.25, 0.75]))
draw_0 = dg.Dataset(x_train = draw.x_train[folds_outer == 0, :], y_train = draw.y_train[folds_outer == 0], x_test = None, y_test = None)
draw_1 = dg.Dataset(x_train = draw.x_train[folds_outer == 1, :], y_train = draw.y_train[folds_outer == 1], x_test = None, y_test = None)
# set up args for xgboost
# use the cross-validated selector to get the number of trees
ntrees_tree = np.array([50, 100, 250, 500, 1000, 1500, 2000, 2500, 3000])
lambdas_tree = np.array([1e-3, 1e-2, 1e-1, 1, 5, 10])
param_grid_tree = [{'n_estimators': ntrees_tree, 'reg_alpha': lambdas_tree}]
# estimate full regression
if estimator_type == "tree":
cv_tree = GridSearchCV(XGBRegressor(objective = objective_function, max_depth = 1, verbosity = 0, learning_rate = 1e-2, reg_lambda = 0), param_grid = param_grid_tree, cv = 5)
cv_tree.fit(draw.x_train, np.ravel(draw.y_train))
ensemble_tree = XGBRegressor(objective = objective_function, max_depth = 1, verbosity = 0, reg_lambda = 0, learning_rate = 1e-2, n_estimators = cv_tree.best_params_['n_estimators'], reg_alpha = cv_tree.best_params_['reg_alpha'])
ensemble = ensemble_tree
print("Num. est. in boosted tree: " + str(cv_tree.best_params_['n_estimators']))
else:
ensemble = LinearRegression(fit_intercept = False)
# get a list of n subset sizes, Ss, Zs
max_subset = np.array(list(range(p)))
sampling_weights = np.append(np.append(1, [uts.choose(p - 2, s - 1) ** (-1) for s in range(1, p)]), 1)
subset_sizes = np.random.choice(np.arange(0, p + 1), p = sampling_weights / sum(sampling_weights), size = draw.x_train.shape[0] * gamma, replace = True)
S_lst_all = [np.sort(np.random.choice(np.arange(0, p), subset_size, replace = False)) for subset_size in list(np.sort(subset_sizes))]
# only need to continue with the unique subsets S
Z_lst_all = [np.in1d(max_subset, S).astype(np.float64) for S in S_lst_all]
Z, z_counts = np.unique(np.array(Z_lst_all), axis = 0, return_counts = True)
Z_lst = list(Z)
Z_aug_lst = [np.append(1, z) for z in Z_lst]
S_lst = [max_subset[z.astype(bool).tolist()] for z in Z_lst]
if estimator_type == "tree":
cv_tree_small = GridSearchCV(XGBRegressor(objective = objective_function, max_depth = 1, verbosity = 0, learning_rate = 1e-2, reg_lambda = 0), param_grid = param_grid_tree, cv = 5)
all_s_sizes = [len(s) for s in S_lst[1:]]
s_sizes = np.unique(all_s_sizes)
all_best_tree_lst = [None] * len(S_lst[1:])
all_best_lambda_lst = [None] * len(S_lst[1:])
for i in range(s_sizes.shape[0]):
indx = all_s_sizes.index(s_sizes[i])
this_s = S_lst[1:][indx]
cc_i = (np.sum(np.isnan(draw_1.x_train[:, this_s]), axis = 1) == 0)
these_best_params = cv_tree_small.fit(draw_1.x_train[:, this_s][cc_i, :], np.ravel(draw_1.y_train[cc_i])).best_params_
all_indices = [index for index, value in enumerate(all_s_sizes) if value == s_sizes[i]]
all_best_tree_lst = [these_best_params['n_estimators'] if x in all_indices else all_best_tree_lst[x] for x in range(len(all_best_tree_lst))]
all_best_lambda_lst = [these_best_params['reg_alpha'] if x in all_indices else all_best_lambda_lst[x] for x in range(len(all_best_lambda_lst))]
ensemble_funcs = [XGBRegressor(objective = objective_function, max_depth = 1, verbosity = 0, reg_lambda = 0, reg_alpha = all_best_lambda_lst[i], learning_rate = 1e-2, n_estimators = all_best_tree_lst[i]) for i in range(len(all_best_tree_lst))]
else:
ensemble_funcs = [ensemble for i in range(len(S_lst[1:]))]
# get v, preds, ic for each unique S
preds_none = np.repeat(np.mean(draw_1.y_train), draw_1.x_train.shape[0])
v_none = measure_func(draw_1.y_train, preds_none)
ic_none = ci.compute_ic(draw_1.y_train, preds_none, measure_type)
# get v, preds, ic for the remaining non-null groups
v_lst, preds_lst, ic_lst, folds = zip(*(mp.cv_predictiveness(draw_1, S_lst[1:][i], measure_func, ensemble_funcs[i], V = V, stratified = binary, na_rm = False) for i in range(len(S_lst[1:]))))
v_lst_all = [v_none] + list(v_lst)
ic_lst_all = [ic_none] + list(ic_lst)
# set up Z, v, W, G, c_n matrices
Z = np.array(Z_aug_lst)
# constrain v >= 0
v = np.maximum(np.array(v_lst_all), 0)
W = np.diag(z_counts / np.sum(z_counts))
G = np.vstack((np.append(1, np.zeros(p)), np.ones(p + 1) - np.append(1, np.zeros(p))))
c_n = np.array([v_none, v_lst_all[len(v_lst)] - v_none])
# do constrained least squares
A_W = np.sqrt(W).dot(Z)
v_W = np.sqrt(W).dot(v)
kkt_matrix = uts.create_kkt_matrix(A_W, G)
ls_matrix = np.vstack((2 * A_W.transpose().dot(v_W.reshape((len(v_W), 1))), c_n.reshape((c_n.shape[0], 1))))
ls_solution = np.linalg.pinv(kkt_matrix).dot(ls_matrix)
shapley_vals = ls_solution[0:(p + 1), :]
# get relevant objects
shapley_ics = gif.shapley_influence_function(Z, z_counts, W, v, shapley_vals, G, c_n, np.array(ic_lst_all), measure_func.__name__)
# if any shapley values are < 0, make zero and print a warning
if any(shapley_vals < 0):
if any(shapley_vals[1:] < 0):
warn("At least one estimated shapley value is < 0. Setting to 0.")
shapley_vals = np.maximum(shapley_vals, 0)
if any(shapley_vals > 1):
if any(shapley_vals[1:] > 1):
warn("At least one estimated shapley value is > 1. Setting to 1.")
shapley_vals = np.minimum(shapley_vals, 1)
# do hypothesis test
# get the null predictiveness on a separate split
preds_none_0 = np.repeat(np.mean(draw_0.y_train), draw_0.x_train.shape[0])
v_none_0 = measure_func(draw_0.y_train, preds_none_0)
ic_none_0 = ci.compute_ic(draw_0.y_train, preds_none_0, measure_type)
sigma_none_0 = np.sqrt(np.mean((ic_none_0) ** 2)) / np.sqrt(np.sum(draw_0.y_train.shape[0]))
# get the shapley values + null predictiveness on the first split
shapley_vals_plus = shapley_vals + shapley_vals[0]
sigmas_one = [np.sqrt(gsv.shapley_se(shapley_ics, i, gamma) ** 2 + sigma_none_0 ** 2) for i in range(1, p + 1)]
test_statistics, p_values, hyp_tests = sht.shapley_hyp_test(shapley_vals_plus[1:], v_none_0, sigmas_one, sigma_none_0, level = 0.05, p = p)
# get variable importance using SHAP values
if estimator_type == "tree":
mod = XGBRegressor(objective = objective_function, learning_rate = 1e-2, reg_lambda = 0, max_depth = 1, n_estimators = cv_tree.best_params_['n_estimators'], reg_alpha = cv_tree.best_params_['reg_alpha'], verbosity = 0)
mod.fit(draw.x_train, draw.y_train)
explainer = shap.TreeExplainer(mod)
else:
mod = LinearRegression(fit_intercept = False)
mod.fit(draw.x_train, draw.y_train)
explainer = shap.LinearExplainer((np.ravel(mod.coef_), 0), draw.x_train, feature_dependence = 'correlation', nsamples = 500)
shap_values = explainer.shap_values(draw.x_test)
# return the population shapley values and averaged prediction-level shapley values
return shapley_vals, shapley_ics, shap_values, Z.shape[0], v, p_values, hyp_tests
| true
|
47d21df3bce23c51524b9bf9d0ee55003a285236
|
Python
|
seanchoi/algorithms
|
/CyclicRotation/cyclicRotation.py
|
UTF-8
| 1,030
| 4.53125
| 5
|
[] |
no_license
|
"""
An array A consisting of N integers is given. Rotation of the array means
that each element is shifted right by one index, and the last element of the array
is moved to the first place. For example, the rotation of array A = [3, 8, 9, 7, 6]
is [6, 3, 8, 9, 7] (elements are shifted right by one index and 6 is moved to the first place).
For example, given
A = [3, 8, 9, 7, 6]
K = 3
the function should return [9, 7, 6, 3, 8]. Three rotations were made:
[3, 8, 9, 7, 6] -> [6, 3, 8, 9, 7]
[6, 3, 8, 9, 7] -> [7, 6, 3, 8, 9]
[7, 6, 3, 8, 9] -> [9, 7, 6, 3, 8]
For another example, given
A = [0, 0, 0]
K = 1
the function should return [0, 0, 0]
"""
def cyclicRotation(a, k = 0):
temp_list = []
if a == []:
return
for i in range(k):
temp_list.append(a[len(a)-1])
a.pop()
a = temp_list + a
temp_list = []
return print(a)
cyclicRotation([1,2,3,4], 4)
cyclicRotation([0,0,0], 1)
cyclicRotation([3,8,9,7,6], 3)
cyclicRotation([], 0)
| true
|
3ef6273e4a25c6f76b68f90ac77abc6ef7e477f9
|
Python
|
psydok/booka_db
|
/src/connection.py
|
UTF-8
| 259
| 2.578125
| 3
|
[] |
no_license
|
import sqlite3
class _Connection(object):
"""Подключение БД"""
def __init__(self):
self.conn = sqlite3.connect("test_booka01.db") # или :memory: чтобы сохранить в RAM
self.cursor = self.conn.cursor()
| true
|
1577c3d178a8cce10e2aa29375df719829f1f61b
|
Python
|
mahlikag/Capstone_Code_Final
|
/Cleaning/cluster.py
|
UTF-8
| 2,608
| 3.34375
| 3
|
[] |
no_license
|
"""importing the required packages"""
import pandas as pd
from pylab import rcParams
import seaborn as sb
import matplotlib.pyplot as plt
import numpy as np
import math as m
import sklearn
from sklearn.cluster import DBSCAN
from collections import Counter
"""this function creates the clusters from DBSCAN"""
def creating_clusters(filename):
"""creating a list for all the years that will be ran"""
Years = ['2007','2008','2009','2010','2011','2012','2013','2014','2015','2016']
x = []
y = []
location = []
colors = []
"""this loop will run and print for each year individually"""
for yr in Years:
df = pd.read_excel(filename, sheet_name = eval('yr'))
table = df.iloc[:,[0,1,2,9,13,17]]
"""removing any None values"""
data = df.iloc[:,[0,9,13,17]].dropna()
"""creating the row numbers"""
data['row_num'] = np.arange(len(data))
"""
the average for all cities will be the sum of their clearance rates divided by three
in order to do this, I'm using the row number to find the appropriate values since the indexs aren't are consecutive
"""
avg = []
for i in range(len(data)):
avg.append(float((data.loc[data['row_num'] == i]['Murder/Nonnegligent Manslaughter Clearance Rate'] + data.loc[data['row_num'] == i]['Aggravated Assault Clearance Rate'] + data.loc[data['row_num'] == i]['Robbery Clearance Rate'])/3))
"""the averages are saved as a new column"""
data['average'] = avg
"""The model for DBSCAN"""
model = DBSCAN(eps = 15000, min_samples=3).fit(data)
print(yr + 's model is: ')
print(model)
print()
outliers_df = pd.DataFrame(data)
"""The groups of clusters"""
print(yr + 's grouping is: ')
print(Counter(model.labels_))
print()
t = table.dropna()
print('The outlier cities for '+ yr +' are: ')
print()
"""Printing each outlier one at a time"""
for i in outliers_df[model.labels_==-1]['Total Population']:
print(t.loc[data['Total Population'] == i]['City'].to_string())
"""Adding the x and y values and the colors to a list for the later plots"""
colors.append(model.labels_)
x.append(data.iloc[:,0])
y.append(data.iloc[:,5])
location.append(t.iloc[:,1] + ", " + t.iloc[:,2])
print()
values = {}
values['colors'] = colors
values['x'] = x
values['y'] = y
values['location'] = location
return values
| true
|
5fb8328d54b6eb0c2e619065871a2e2289df7b97
|
Python
|
KomissarovSemyon/Course_work_database
|
/schema/init.py
|
UTF-8
| 1,184
| 2.78125
| 3
|
[] |
no_license
|
import json
from collections import defaultdict
import psycopg2
import sys
def load_country_tuples():
with open("countries_en.json") as f:
en = json.load(f)
with open("countries_ru.json") as f:
ru = json.load(f)
uni = defaultdict(lambda: [None]*2)
for c in ru:
uni[c[0]][0] = c[1]
for c in en:
uni[c[0]][1] = c[1]
for code, names in uni.items():
yield code, names[0], names[1]
# Open connection
if len(sys.argv) != 2:
print('usage: {} pg-conn'.format(sys.argv[0]))
exit(1)
conn = psycopg2.connect(sys.argv[1])
cur = conn.cursor()
# Initialize Schema
with open('kino.sql') as f:
cur.execute(f.read())
# Fill Timezones
cur.execute('''
INSERT INTO timezones (name, utc_offset)
SELECT name, utc_offset
FROM pg_timezone_names
ON CONFLICT ON CONSTRAINT timezones_name_key DO NOTHING
''')
# Fill countries
cur.execute('''
INSERT INTO countries (country_code, name_ru, name_en)
VALUES
{}
ON CONFLICT (country_code) DO NOTHING
'''.format(
', '.join(
cur.mogrify("(%s,%s,%s)", c).decode() for c in load_country_tuples())
)
)
# Close connection gracefully
conn.commit()
cur.close()
conn.close()
| true
|
a86ddcaa52e65a24f81a68b6b681fc66285c6bb8
|
Python
|
google/pytype
|
/pytype/pytd/transforms_test.py
|
UTF-8
| 2,562
| 2.78125
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
"""Tests for transforms.py."""
import textwrap
from pytype.pytd import transforms
from pytype.pytd.parse import parser_test_base
import unittest
class TestTransforms(parser_test_base.ParserTest):
"""Tests the code in transforms.py."""
def test_remove_mutable_list(self):
# Simple test for RemoveMutableParameters, with simplified list class
src = textwrap.dedent("""
from typing import Union
T = TypeVar('T')
T2 = TypeVar('T2')
class TrivialList(typing.Generic[T], object):
def append(self, v: T2) -> NoneType:
self = Union[T, T2]
class TrivialList2(typing.Generic[T], object):
def append(self, v: T2) -> NoneType:
self = Union[T, T2]
def get_first(self) -> T: ...
""")
expected = textwrap.dedent("""
T = TypeVar('T')
T2 = TypeVar('T2')
class TrivialList(typing.Generic[T], object):
def append(self, v: T) -> NoneType: ...
class TrivialList2(typing.Generic[T], object):
def append(self, v: T) -> NoneType: ...
def get_first(self) -> T: ...
""")
ast = self.Parse(src)
ast = transforms.RemoveMutableParameters(ast)
self.AssertSourceEquals(ast, expected)
def test_remove_mutable_dict(self):
# Test for RemoveMutableParameters, with simplified dict class.
src = textwrap.dedent("""
from typing import Union
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
K2 = TypeVar('K2')
V2 = TypeVar('V2')
class MyDict(typing.Generic[K, V], object):
def getitem(self, k: K, default: T) -> Union[V, T]: ...
def setitem(self, k: K2, value: V2) -> NoneType:
self = dict[Union[K, K2], Union[V, V2]]
def getanykeyorvalue(self) -> Union[K, V]: ...
def setdefault(self, k: K2, v: V2) -> Union[V, V2]:
self = dict[Union[K, K2], Union[V, V2]]
""")
expected = textwrap.dedent("""
from typing import Union
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
K2 = TypeVar('K2')
V2 = TypeVar('V2')
class MyDict(typing.Generic[K, V], object):
def getitem(self, k: K, default: V) -> V: ...
def setitem(self, k: K, value: V) -> NoneType: ...
def getanykeyorvalue(self) -> Union[K, V]: ...
def setdefault(self, k: K, v: V) -> V: ...
""")
ast = self.Parse(src)
ast = transforms.RemoveMutableParameters(ast)
self.AssertSourceEquals(ast, expected)
if __name__ == "__main__":
unittest.main()
| true
|
9dcfbd51b17744c4f6cd51b8ea93a34b6aa0ac57
|
Python
|
mkaminskas/beyond_accuracy
|
/frameworkMetrics/diversity.py
|
UTF-8
| 4,345
| 3.21875
| 3
|
[] |
no_license
|
'''
Created on 12 Feb 2015
@author: mkaminskas
'''
from utils import config
def getListDiversity(training_data, item_list, method):
'''
list diversity, computed as defined by [McClave & Smyth, 2001]: diversity(R) = SUM{i in R} SUM{j in R\i} {dist(v_i, v_j)} / |R| * |R|-1
where dist(v_i, v_j) is the distance between the items' vectors which can be computed using ratings or content labels
@param training_data: the training data object
@type training_data: trainData.TrainData
@param item_list: (id, score) tuples of items
@type item_list: list
@param method: the method used to compute list diversity {div_r, div_c}
@type method: string
@return: diversity of the item list
@rtype: float
'''
diversity_sum = 0.0
for i, (item_id, _) in enumerate(item_list[:-1]):
tmp_item_list = [tup for j, tup in enumerate(item_list) if j > i]
diversity_sum += _getItemDiversity(training_data, item_id, tmp_item_list, method, average=False)
return 2 * diversity_sum / ( len(item_list) * (len(item_list)-1) )
def _getItemDiversity(training_data, item_id, item_list, method, average=True):
'''
item diversity score, as defined by [McClave & Smyth, 2001]: diversity(i,R) = SUM{j in R}{dist(v_i, v_j)} / |R|
where dist(v_i, v_j) is the distance between the items' rating vectors [McClave & Smyth, 2001], or content label vectors [Ziegler et al. 2005]
may be extended to include item factor vectors [Vargas and Castells, 2011], or binary rating vectors [Kelly & Bridge, 2006]
@param training_data: the training data object
@type training_data: trainData.TrainData
@param item_id: ID of item whose diversity we need to compute
@type item_id: string
@param item_list: the list of (id, score) tuples of items against which we compute the item's diversity
@type item_list: list
@param method: the method used to compute item diversity {div_r, div_c}
@type method: string
@param average: the flag to disable averaging of the diversity sum (needed for getListDiversity method)
@type average: bool
@return: diversity of the item with respect to the item_list
@rtype: float
'''
if len(item_list) >= 1:
item_index = training_data.getItemIndex(item_id)
distance_sum = 0.0
for i_id, _ in item_list:
i_index = training_data.getItemIndex(i_id)
if method == 'div_r':
# need to convert the cosine similarity in [-1, 1] to distance in [0, 1]:
# 1 - [(sim - min) / (max - min)] = (1 - sim) / 2
distance_sum += (1.0 - training_data.item_similarity_matrix[item_index, i_index]) / 2.0
elif method == 'div_c':
a = set(config.ITEM_DATA[item_id]['labels'])
b = set(config.ITEM_DATA[i_id]['labels'])
c = a.intersection(b)
distance_sum += 1 - ( float(len(c)) / (len(a) + len(b) - len(c)) )
# elif method == 'diversity_factors':
# # need to divide the distance by 2.0 because there are negative values in vectors and therefore cosine ranges in [-1,1]
# distance_sum += spatial.distance.cosine(Q[:, item_index], Q[:, i_index]) / 2.0
#
# elif method == 'diversity_ratings_bool':
# distance_sum += frameworkMetrics.getHammingDistance(training_data.matrix.getcol(item_index).indices, training_data.matrix.getcol(i_index).indices)
else:
raise ValueError('Wrong diversity computation method. Choose between div_r and div_c')
if average:
return distance_sum / len(item_list)
else:
return distance_sum
else:
return 1.0
# def getHammingDistance(vector_1, vector_2):
# '''
# calculate the (normalized) Hamming distance between two sparse vectors,
# i.e., the number of positions where one vector has a non-zero value and the other doesn't, divided by the union of non-zero positions
# '''
#
# v_1 = set(vector_1)
# v_2 = set(vector_2)
#
# return float(len(v_1 | v_2) - len(v_1 & v_2)) / len(v_1 | v_2)
| true
|
15823d24a08e1628dc36918157990db00f971724
|
Python
|
DanielSammon576/DCU-College-Work
|
/CA117/readnum_022.py
|
UTF-8
| 373
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
def main():
a = []
for line in sys.stdin:
a.append(line.strip())
i = 0
while i < len(a):
if a[i].isdigit():
print("Thank you for {:}".format(a[i]))
i = len(a)
else:
print("{:} is not a number".format(a[i]))
i += 1
if __name__ == '__main__':
main()
| true
|
db1e6367c46ca9121ceec1b9cb4db28f2fb32438
|
Python
|
profhall/py-slack-bot
|
/storefinderbot.py
|
UTF-8
| 4,169
| 2.515625
| 3
|
[] |
no_license
|
from slackclient import SlackClient
import os, time, json
from play import markets
from THD_MD.getMarkets import listMarkets
"""
Psuedocode
#if event is a message, then...
#if message contains botid
#if message contains 'directions'
#ask to what store
#and the starting destination
#call getStore func
#call api & return results
#if message contains 'help'
#show menu with example commands
#if message contains 'markets'
#call getMarkets func and list markets in pages or scrollable list
#if message contains 'stores' or 'locations'
#askk what market
#call get locations func
"""
#SLACK_VERIFICATION_TOKEN = os.environ['V-TOKEN']
SLACK_BOT_USER_TOKEN = os.environ['BOT_USER_TOKEN']
slack_client = SlackClient(SLACK_BOT_USER_TOKEN)
mktss = markets()
markets = []
for market in mktss:
markets.append(next(mktss))
link = '<https://media.makeameme.org/created/that-would-be-3dsosw.jpg|That would be great>'
if slack_client.rtm_connect():
while True:
events = slack_client.rtm_read()
for event in events:
print('New Event:',event['type'])
# if event is a message, then...
if(event['type'] == 'message' and 'text' in event and 'channel' in event ):
#print(json.dumps(event,indent=2))
if('bot_id' not in event):
print('This is a message from: '+ event['user'])
user = event['user']
print('The message reads "'+ event['text']+ '"')
channel = event['channel']
text = event['text']
if 'that would be great' in text.lower() and link not in text:
slack_client.api_call(
'chat.postMessage',
channel=channel,
text=link,
as_user='true:'
)
# if message contains botID
if ('UC5ERJZUP' in text ):
slack_client.api_call('chat.postMessage',channel=channel,text="you rang <@" + user + ">?",as_user='true:')
# if message contains 'directions'
if ('directions' in text):
# ask to what market store is in
slack_client.api_call('chat.postMessage',
channel=channel,
text="What market is the store in <@" + user + ">?",
attachments= [{
"text": "Choose a game to play",
"fallback": "If you could read this message, you'd be choosing something fun to do right now.",
"color": "#3AA3E3",
"attachment_type": "default",
"callback_id": "game_selection",
"actions": [
{
"name": "markets",
"text": "Which Market",
"type": "select",
#markets will go here
"options": markets
}
]
}]
)
# call getmarkets and list markets
# and the starting destination
# call getStore func
# call api & return results
#print('No event:',slack_client.rtm_read())
time.sleep(1)
else:
print("Connection failed. Invalid Slack token or bot ID")
| true
|
5bf54e3fe8b74c080b6df2e42c02b255bc3b1d6d
|
Python
|
blejdfist/NeuBot
|
/tests/TestArguments.py
|
UTF-8
| 2,923
| 3.28125
| 3
|
[] |
no_license
|
import unittest
from models.arguments import Arguments
class TestArguments(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testArgumentLengths(self):
self.assertEqual(len(Arguments("a b c")), 3)
self.assertEqual(len(Arguments(" a b c")), 3)
self.assertEqual(len(Arguments(" 'a b' c d ")), 3)
self.assertEqual(len(Arguments("'a b' \"c d\" \"e f\"")), 3)
def testArgumentValues(self):
a = Arguments("a b c")
self.assertEqual(a[1], 'b')
a = Arguments("\tapa 'b c d' epa \"f g\" l h i j")
self.assertEqual(a[0], 'apa')
self.assertEqual(a[1], 'b c d')
self.assertEqual(a[2], 'epa')
self.assertEqual(a[3], 'f g')
self.assertEqual(a[4], 'l')
self.assertEqual(a[5], 'h')
self.assertEqual(a.get_args_after(4), 'h i j')
self.assertEqual(a.get_args_after(2), '\"f g\" l h i j')
def testBoundaryChecks(self):
a = Arguments("a b c 'd e f' 'g h'")
self.assertRaises(IndexError, a.__getitem__, 5)
self.assertRaises(IndexError, a.__getitem__, -6)
def testEscapeCharacters(self):
a = Arguments("a b c \\\"")
self.assertEqual(a[3], '"')
a = Arguments("a b c \"\\\"\"")
self.assertEqual(a[3], '"')
a = Arguments("\\\"")
self.assertEqual(a[0], '"')
a = Arguments("\\\\")
self.assertEqual(a[0], '\\')
a = Arguments("\\n")
self.assertEqual(str(a), "\\n")
self.assertRaises(IndexError, a.__getitem__, 0)
a = Arguments("a \"Quoted \\\"string\\\"\"")
self.assertEqual(a[1], 'Quoted "string"')
a = Arguments("Hello \"Quoted\\\"\"")
def testInvalidEscapeSequences(self):
invalid_sequences = [
"Hello \\", # Escape character at last position
"Hello\\nthere",
"Hello\\q",
]
for sequence in invalid_sequences:
a = Arguments(sequence)
self.assertEqual(len(a), 0)
self.assertEqual(str(a), sequence)
def testSlices(self):
a = Arguments("a b c d e")
self.assertEqual(a[1:4], ["b", "c", "d"])
a = Arguments("'a b' c 'd f g' h i j")
self.assertEqual(a[1:4], ["c", "d f g", "h"])
def testEmptyString(self):
a = Arguments("")
self.assertEqual(str(a), "")
self.assertEqual(len(a), 0)
self.assertEqual(a[0:], [])
def testInvalidQuotedString(self):
invalid_quoted_strings = [
'This string is missing the last "quote',
'This string is missing the last \'quote',
'"This string has unmatched \'quotes',
]
for s in invalid_quoted_strings:
a = Arguments(s)
self.assertEqual(len(a), 0)
self.assertEqual(str(a), s)
| true
|
55ef9f74f6115ef191c428fa36ed6a864f2ec405
|
Python
|
Hunt66/holbertonschool-higher_level_programming
|
/0x01-python-if_else_loops_functions/9-print_last_digit.py~
|
UTF-8
| 119
| 3.125
| 3
|
[] |
no_license
|
#!/ust/bin/python3
def print_last_digit(number):
number = number % 10
print(number, end='')
return number
| true
|
30ac3704777b1e156217f0311b992e54d90e5191
|
Python
|
tahsinalamin/leetcode_problems
|
/leetcode-my-solutions/58_length_of_last_word.py
|
UTF-8
| 333
| 3.875
| 4
|
[] |
no_license
|
"""
Author: Sikder Tahsin Al-Amin
Problem: Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string.
Input: "Hello World"
Output: 5
"""
def lengthofLastWord(s):
substr = s.split()
if len(substr)==0:
return 0
else:
return len(substr[-1])
| true
|
16a6a27beeceb42ab7ce22262083e4e87807778f
|
Python
|
ilya-il/projecteuler.net
|
/p005.py
|
UTF-8
| 535
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# coding: utf-8
# IL 30.10.2017
"""
ProjectEuler Problem 5
"""
__author__ = 'ilya_il'
import time
def get_number2(init):
# factors, skip 1 and 2 because of step 10 (number is even in any case)
f = [x for x in range(3, init + 1)]
for i in range(10, 1000000000, 10):
for j in f:
if i % j != 0:
break
else:
print(i)
break
# 232792560 @ 3.46 sec
st = time.time()
get_number2(20)
print("--- %s seconds ---" % (time.time() - st))
| true
|
48f0ed85bda0a4668197aa2be60bafbb98f9114c
|
Python
|
amulya444/CBIR-CNN-SVM
|
/packages/Model/BaseModel.py
|
UTF-8
| 614
| 2.671875
| 3
|
[] |
no_license
|
class BaseModel:
def __init__(self, kernels = {}, optimizer = 'adadelta', loss = 'categorical_crossentropy'):
self.optimizer = optimizer
self.loss = loss
self.model = None
self.kernels = kernels
self.initModel()
def initModel(self):
raise NotImplementedError
def setKernel(self, kernels):
self.kernels = kernels
def compile(self):
self.model.compile(
optimizer = self.optimizer,
loss = self.loss,
metrics = ['accuracy']
)
def save(self, path):
self.model.save(path)
| true
|
24ec8ae24f07be54f2990722b4eb9769a1b354ba
|
Python
|
preetanshu2508/Python-Django
|
/practicepython +tkinter/python/fhandlind2.py
|
UTF-8
| 517
| 3.046875
| 3
|
[] |
no_license
|
''''x=input("enter file name")
f=open(x,"w+")
st=int(input("Enter No of students"))
for i in range(1,st+1):
rn=input("enter your roll no")
sn=input("Enter student name")
m=input("Enter Your marks")
f.write((str)(st)+'\n')
f.write('Roll no'+rn+'\n')
f.write('Name'+sn+'\n')
f.write('Marks'+m+'\n')
print("file saved")
f.close()'''
'''f=open("dd.txt","rt+")
p=f.read(20)
for i in p:
if i=='a'or i=='i'or i=='o' or i=='u':
print("vowels",i)
f.close()
'''
| true
|
8ffec07a2469e54e54ac82f7bd7b93eca2332a00
|
Python
|
Panamera-Turbo/MyPython
|
/loop/while.py
|
UTF-8
| 754
| 4.09375
| 4
|
[] |
no_license
|
'''
while循环
'''
number = 0
while number < 5:
print(number+10)
number += 1
# 让用户选择何时退出
print('\n--------------------------------\n第一次实验')
a = '\n输入的内容会被重复'
a += '\nwq保存退出:'
message = ''
while message != 'wq':
message = input(a)
if message != 'wq':
print(message)
print('--------------------------------\n第二次实验')
# 使用标志
active = True
a = '\n输入的内容会被重复'
a += '\nwq保存退出:'
while active:
message = input(a)
if message == 'wq':
active = False
else:
print(message)
print('-----------------------------')
#break退出循环
#continue继续下一次循环
#类似于C语言
| true
|
1680d3efcc43460259b5b3876429a843f3dd79c5
|
Python
|
tonymtran36/Python-Basics
|
/Homework/Assignment3.py
|
UTF-8
| 2,591
| 3.828125
| 4
|
[] |
no_license
|
#Question 7 a ---------------------------------------------------------------------------------------------------
def findDiv7():
for i in range(1500,2701):
if (i%7==0 and i%5==0):
print(i, end=", ")
print()
findDiv7()
#Question 7 b ---------------------------------------------------------------------------------------------------
def convert(temp_str):
degree = temp_str[-1]
temp = int(temp_str[:2])
if(degree=="C"):
print(f"{temp_str} is {(int)((temp*(9/5))+32)} in Fahrenheit")
elif(degree=="F"):
print(f"{temp_str} is {(int)((temp-32)*(5/9))} in Celcius")
convert("60C")
convert("45F")
#Question 7 c ---------------------------------------------------------------------------------------------------
import random
def guess():
target = random.randint(1, 9)
while(True):
x=input("Guess a number between 1 and 9\n")
if int(x)==target:
print("Well Done!")
break
else:
print("Try Again")
guess()
#Question 7 d ---------------------------------------------------------------------------------------------------
def stars():
for i in range(5):
for j in range(i):
print("*",end=" ")
print()
for i in range(5,0,-1):
for j in range(i):
print("*",end=" ")
print()
stars()
#Question 7 e ---------------------------------------------------------------------------------------------------
def reverse(str):
print(str[::-1])
reverse(input("Enter a string to be reversed\n"))
#Question 7 f ---------------------------------------------------------------------------------------------------
def count(lst):
evenCount = 0
oddCount = 0
for i in lst:
if i%2==0:
evenCount+=1
else:
oddCount+=1
return evenCount, oddCount
numbers = (1,2,3,4,5,6,7,8,9)
print(count(numbers))
#Question 7 g ---------------------------------------------------------------------------------------------------
def printType(list):
for i in list:
print(f"{i} : {type(i)}")
datalist = [1452, 11.23, 1+2j, True, 'w3resource', (0, -1), [5, 12], {"class":'V', "section":'A'}]
printType(datalist)
#Question 7 h ---------------------------------------------------------------------------------------------------
def except_three_six():
for i in range(6):
if i==3 or i==6:
continue
else:
print(f"{i} ", end="")
except_three_six()
| true
|
7f82b9c36058f01fbdf1613167fefe424872b903
|
Python
|
shubhampachori12110095/Chatbot-Keras-TransferLearning
|
/Process_WhatsAppData_2.py
|
UTF-8
| 6,067
| 2.6875
| 3
|
[] |
no_license
|
##############################################################################################################################################
# AUTHOR: KUNAL PALIWAL
# EMAIL ID: kupaliwa@syr.edu
# COURSE: ARTIFICAL NEURAL NETWORKS
# This file is responsible for processing our dataset and building padded inputs and outputs for training our model
##############################################################################################################################################
import numpy as np
np.random.seed(0)
import pandas as pd
import os
from os import path
import csv
import nltk
import itertools
import operator
import pickle
from keras.preprocessing import sequence
from scipy import sparse, io
from numpy.random import permutation
import re
import tensorflow
print(tensorflow.__version__)
class NN:
# --------------------------------< Initializing parameters (Constructor) >-------------------------------
def __init__(self):
self.questions_file = 'questions'
self.answers_file = 'answers'
self.vocabulary_file = 'vocabulary_file'
self.padded_questions_file = 'padded_questions'
self.padded_answers_file = 'padded_answers'
self.unknown_token = 'something'
self.vocabulary_size = 7000
self.max_features = self.vocabulary_size
self.maxlen_input = 50
self.maxlen_output = 50 # cut texts after this number of words
# --------------------------------< Extracting question and answers from our Whatsapp dataset >-------------------------------
def extract_question_answers(self):
text = open('training_data','r')
q = open('questions', 'w')
a = open('answers', 'w')
pre_pre_previous_raw=''
pre_previous_raw=''
previous_raw=''
person = ' '
previous_person=' '
l1 = ['won’t','won\'t','wouldn’t','wouldn\'t','’m', '’re', '’ve', '’ll', '’s','’d', 'n’t', '\'m', '\'re', '\'ve', '\'ll', '\'s', '\'d', 'can\'t', 'n\'t', 'B: ', 'A: ', ',', ';', '.', '?', '!', ':', '. ?', ', .', '. ,', 'EOS', 'BOS', 'eos', 'bos']
l2 = ['will not','will not','would not','would not',' am', ' are', ' have', ' will', ' is', ' had', ' not', ' am', ' are', ' have', ' will', ' is', ' had', 'can not', ' not', '', '', ' ,', ' ;', ' .', ' ?', ' !', ' :', '? ', '.', ',', '', '', '', '']
l3 = ['-', '_', ' *', ' /', '* ', '/ ', '\"', ' \\"', '\\ ', '--', '...', '. . .']
for i, raw_word in enumerate(text):
pos = raw_word.find('+++$+++')
if pos > -1:
person = raw_word[pos+7:pos+10]
raw_word = raw_word[pos+8:]
while pos > -1:
pos = raw_word.find('+++$+++')
raw_word = raw_word[pos+2:]
raw_word = raw_word.replace('$+++','')
previous_person = person
for j, term in enumerate(l1):
raw_word = raw_word.replace(term,l2[j])
for term in l3:
raw_word = raw_word.replace(term,' ')
raw_word = raw_word.lower()
if i>0 :
q.write(pre_previous_raw[:-1] + ' ' + previous_raw[:-1]+ '\n') # python will convert \n to os.linese
a.write(raw_word[:-1]+ '\n')
pre_pre_previous_raw = pre_previous_raw
pre_previous_raw = previous_raw
previous_raw = raw_word
q.close()
a.close()
# --------------------------------< Padding the question and anwer / input and output generated above >----------------
def pad_question_answers(self):
print ("Reading the context data...")
q = open(self.questions_file, 'r')
questions = q.read()
print ("Reading the answer data...")
a = open(self.answers_file, 'r')
answers = a.read()
all = answers + questions
print ("Tokenazing the answers...")
paragraphs_a = [p for p in answers.split('\n')]
paragraphs_b = [p for p in all.split('\n')]
paragraphs_a = ['BOS '+p+' EOS' for p in paragraphs_a]
paragraphs_b = ['BOS '+p+' EOS' for p in paragraphs_b]
paragraphs_b = ' '.join(paragraphs_b)
tokenized_text = paragraphs_b.split()
paragraphs_q = [p for p in questions.split('\n') ]
tokenized_answers = [p.split() for p in paragraphs_a]
tokenized_questions = [p.split() for p in paragraphs_q]
vocab = pickle.load(open(self.vocabulary_file, 'rb'))
index_to_word = [x[0] for x in vocab]
index_to_word.append(self.unknown_token)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
print ("Using vocabulary of size %d." % self.vocabulary_size)
print ("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
# Replacing all words not in our vocabulary with the unknown token:
for i, sent in enumerate(tokenized_answers):
tokenized_answers[i] = [w if w in word_to_index else self.unknown_token for w in sent]
for i, sent in enumerate(tokenized_questions):
tokenized_questions[i] = [w if w in word_to_index else self.unknown_token for w in sent]
# Creating the training data:
X = np.asarray([[word_to_index[w] for w in sent] for sent in tokenized_questions])
Y = np.asarray([[word_to_index[w] for w in sent] for sent in tokenized_answers])
Q = sequence.pad_sequences(X, maxlen = self.maxlen_input)
A = sequence.pad_sequences(Y, maxlen = self.maxlen_output, padding='post')
with open(self.padded_questions_file, 'wb') as q:
pickle.dump(Q, q)
with open(self.padded_answers_file, 'wb') as a:
pickle.dump(A, a)
# --------------------------------< Main method >----------------
if __name__ == "__main__":
print('testing')
c_processData = NN()
c_processData.extract_question_answers()
c_processData.pad_question_answers()
# parse_whatsapp()
| true
|
9c6fc9e13e64190eba7cf82ed1edcc48434eb4b5
|
Python
|
StevenMMortimer/one-r-package-a-day
|
/script.py
|
UTF-8
| 3,911
| 2.75
| 3
|
[] |
no_license
|
# script.py
from os import environ
from os.path import join, dirname
from dotenv import load_dotenv
import re
import pandas
from TwitterAPI import TwitterAPI, TwitterPager
# create .env file path
try:
# this will fail if running interactively which will source
# the script from current directory
dotenv_path = join(dirname(__file__), '.env')
except:
dotenv_path = '.env'
# load file from the path
load_dotenv(dotenv_path)
if __name__ == "__main__":
# connect to api
api = TwitterAPI(consumer_key=environ['TWITTER_CONSUMER_KEY'],
consumer_secret=environ['TWITTER_CONSUMER_SECRET'],
access_token_key=environ['TWITTER_ACCESS_TOKEN'],
access_token_secret=environ['TWITTER_ACCESS_TOKEN_SECRET'])
# scrape all prior tweets to check which packages I've already tweeted
SCREEN_NAME = 'RLangPackage'
pager = TwitterPager(api,
'statuses/user_timeline',
{'screen_name': SCREEN_NAME, 'count': 100})
# parse out the package name that occurs before the hyphen at the beginning
previous_pks = []
for item in pager.get_iterator(wait=3.5):
if 'text' in item:
this_pkg = re.sub("^([A-Za-z0-9.]+) - (.*)", "\\1", item['text'])
previous_pks.append(this_pkg)
# add packrat, it wasn't formatted correctly when it tweeted
previous_pks.append('packrat')
# convert the package names to a dataframe
prev_df = pandas.DataFrame({'name': previous_pks})
prev_df.set_index('name')
# load the data I've compiled on R packages
url = "https://raw.githubusercontent.com/StevenMMortimer/one-r-package-a-day/d94392d7abb9a7ade71c75e77c4284ad6e350969/r-package-star-download-data.csv"
all_df = pandas.read_csv(url)
all_df.set_index('name')
# do an "anti join" to throw away previously tweeted rows
all_df = pandas.merge(all_df, prev_df, how='outer', indicator=True)
all_df = all_df[all_df['_merge'] == 'left_only']
# focus on packages in middle ground of downloads and stars
filtered_df = all_df[all_df['github_url'].notnull()]
filtered_df = filtered_df.loc[lambda df: df.stars.notnull() | df.stars < 1000]
filtered_df = filtered_df[filtered_df['downloads'].notnull()]
filtered_df = filtered_df.loc[lambda df: df.downloads < 1000000]
# randomly select one of the remaining rows
selected_pkg = filtered_df.sample(1)
# pull out the name and description to see if we need
# to truncate because of Twitter's 280 character limit
prepped_name = selected_pkg.iloc[0]['name']
prepped_desc = re.sub(r'\s+', ' ',
selected_pkg.iloc[0]['description']).strip()
# determine how many urls are in the description
# since Twitter shortens or expands all URLs to 23 chars
urls_count = len(re.findall("https|http|\bwww|<www", prepped_desc))
name_len = len(prepped_name)
desc_len = len(prepped_desc)
# determine the max length of the description
# 280 tweet char max
# then minus 3 for " - "
# then minus 9 for the " #rstats " hashtag
# then minus the number of urls plus one github url
# times 23 because all links are counted as 23 chars
max_len = (280 - 3 - ((urls_count + 1) * 23) - 9 - name_len)
# truncate the description to the max length if needed
if desc_len <= max_len:
prepped_desc = prepped_desc[0:desc_len]
else:
# minus extra 3 for the added "..."
prepped_desc = prepped_desc[0:(max_len - 3)] + "..."
# cobble together the tweet text
TWEET_TEXT = prepped_name + " - " + prepped_desc + \
" #rstats " + selected_pkg.iloc[0]['github_url']
print(TWEET_TEXT)
# tweet it out to the world!
r = api.request('statuses/update', {'status': TWEET_TEXT})
print('SUCCESS' if r.status_code == 200 else 'PROBLEM: ' + r.text)
| true
|