blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
49cb23b1e913593aed82077ac848ea23dfa073fe | Python | LucianoBI/POO | /assets/provas/prova-1/questao1/Main.py | UTF-8 | 323 | 3.109375 | 3 | [] | no_license | from Pessoa import Pessoa
from CalculadoraAfinidade import CalculadoraAfinidade
pessoa1 = Pessoa("Coutinho", 27)
pessoa2 = Pessoa("Neymar", 26)
calculadora = CalculadoraAfinidade()
afinidade = calculadora.calcular_afinidade(pessoa1,pessoa2)
print("Afinidade entre %s e %s = %d " % (pessoa1.nome, pessoa2.nome, afinidade)) | true |
52e8055f04087945bcdcd69d4dd0a23de2ce0b2a | Python | AlessandroFC15/Python | /instagramProfilePic.py | UTF-8 | 875 | 3.4375 | 3 | [] | no_license | import urllib
def get_page(username):
url = "https://instagram.com/" + username
try:
return urllib.urlopen(url).read()
except:
return ""
def download_pic (url, name):
urllib.urlretrieve(url, name)
def get_profile_pic (username):
page = get_page(username)
if page == "":
print "# FAILED TO DOWNLOAD #".center(100)
return
place = page.find(":image")
begin_link = place + len(":image") + 11
end_quote = page[begin_link:].find('"')
end_quote = begin_link + end_quote
profile_link = page[begin_link:end_quote]
download_pic (profile_link, username + ".jpg")
print "|| SUCCESSFUL DOWNLOAD ||".center(100)
print
# Main Program
print ".: PICTURE DOWNLOAD INSTAGRAM :.".center(100)
print
while True:
username = raw_input(">> Insira o nome do usuario (X para finalizar): ").lower()
if username == "x":
break
get_profile_pic(username)
| true |
b776231401368a9a9d9182b581b7c3683b769b3a | Python | Jaredbartley123/Lab-5 | /targetmark.py | UTF-8 | 1,188 | 4.09375 | 4 | [] | no_license | def calculateTargetMark(mm,tp):
mm=int(mm)
tp=int(tp)
tm=mm*(tp/100)
return tm
while True:
try:
#student inputs maximum mark of test
maxMarks = input("Please enter the maximum mark of the assignment ... ")
maxmarks = int(maxMarks)
except(ValueError):
print("Please enter a valid Base 10 integer")
continue
try:
#student inputs target percentage that they desire
targetPercent = input("Please enter your target success percentage ... ")
targetPercent = int(targetPercent)
except(ValueError):
print("Please enter a valid Base 10 integer")
continue
if targetPercent > 100 or targetPercent <= 0:
print("Please enter a valid Base 10 Integer between 1 and 100")
continue
#calculate number of marks required to achieve student's target percentage
targetMark=calculateTargetMark(maxMarks,targetPercent)
#print result
print(targetMark,"marks out of",maxMarks,"are required to achieve",targetPercent,"%")
val = input("Would you like to exit? (Y/N) ")
if val == "y" or val == "Y":
break
| true |
f73a5270c7320eecd02013bd0204eea9c5064450 | Python | joelwitherspoon/PythonCrashCourse | /die_visual.py | UTF-8 | 717 | 3.40625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 15:06:47 2020
@author: jwitherspoon
"""
from die import Die
import pygal
#create a D6
die = Die()
#make some rolls and store rolls in list
results = []
for roll_num in range(1000):
result = die.roll()
results.append(result)
#analyze the results
frequencies = []
for value in range(1,die.num_sides+1):
frequency = results.count(value)
frequencies.append(frequency)
#visualize the results
hist = pygal.Bar()
hist.title = "Results of rolling on D6 1,000 times"
hist.x_labels = ['1','2','3','4','5','6']
hist.x_title = "Result"
hist.y_title = "Frequency of the result"
hist.add('D6',frequencies)
hist.render_to_file('die_visual.svg')
| true |
2933d405ac2815be2a0a555620f015fa02e7382f | Python | lch1000m/Code_Snippets | /python/multiprocessing/pgm2.py | UTF-8 | 198 | 2.703125 | 3 | [] | no_license |
import time, os
def run_program():
print('pgm start!')
time.sleep(30)
print('pgm end!')
if __name__ == '__main__':
print('pid is {0}'.format(os.getpid()))
run_program()
| true |
00c0cc23d0a35ea0b645a9c4546b8ca6404b8745 | Python | marannan/repo_1 | /OS/p2/xv6/mlfq2.py | UTF-8 | 461 | 2.6875 | 3 | [] | no_license | __author__ = 'Ashok Marannan'
import matplotlib.pyplot as plt
plt.xlabel("Time -->")
plt.ylabel("Priority Queue -->")
plt.plot([1,2,3,4,5,6,7,8,9,10,11], [0,1,1,2,2,2,2,3,3,3,3],marker="*",label="P1")
plt.plot([3,7,8,11,12,13,14,19], [0,1,1,2,2,2,2,3],marker="^",label="P2")
plt.plot([4,9,10,15,16,17,18,20,21,22], [0,1,1,2,2,2,2,3,3,3],marker="o",label="P3")
plt.legend()
#plt.axis([0, 6, 0, 20])
plt.xlim(0,30)
plt.yticks([0,1,2,3,4])
plt.savefig('mlfq_4') | true |
66617b63bfe1e55bf4379f0d4c94645ef6cfb9ad | Python | chrisvonbienias/HeritageAddon | /curvature.py | UTF-8 | 3,715 | 2.625 | 3 | [] | no_license | import bpy
import bmesh
from mathutils import Vector
from collections import defaultdict
import numpy as np
import colorsys
class HERITAGE_OT_CheckCurvature(bpy.types.Operator):
bl_idname = "heritage.check_curvature"
bl_label = "Calculate curvature"
bl_description = "Calculate minimal and maximal mesh curvature"
@classmethod
def poll(cls, context):
return bpy.context.active_object
def execute(self, context):
obj = bpy.context.active_object
bm = bmesh.new()
bm.from_mesh(obj.data)
edges = bm.edges
edges.ensure_lookup_table()
cmedian = 0
cmax = -10
cmin = 10
for e in edges:
p1 = e.verts[0].co
p2 = e.verts[1].co
n1 = e.verts[0].normal
n2 = e.verts[1].normal
curva1 = 2 * n1.dot(p1 - p2)
curva2 = 2 * n2.dot(p2 - p1)
if not (p1 - p2).length :
curva1 = curva1 / (p1 - p2).length
if not (p2 - p1).length :
curva2 = curva2 / (p2 - p1).length
curva1 = round(curva1, 3)
curva2 = round(curva2, 3)
if max(curva1, curva2) > cmax: cmax = max(curva1, curva2)
if min(curva1, curva2) < cmin: cmin = min(curva1, curva2)
#cmedian = cmedian/len(edges)
obj.curv_data[0] = round(cmin, 2)
obj.curv_data[1] = round(cmax, 2)
#obj.curv_data[2] = round(cmedian, 2)
return {'FINISHED'}
class HERITAGE_OT_ColorCurvature(bpy.types.Operator):
bl_idname = "heritage.color_curvature"
bl_label = "Color curvature"
bl_description = "Color mesh with curvature data"
@classmethod
def poll(cls, context):
return bpy.context.active_object
def execute(self, context):
obj = bpy.context.active_object.data
bm = bmesh.new()
bm.from_mesh(obj)
if not ('Curvature' in obj.vertex_colors.keys()):
obj.vertex_colors.new(name = 'Curvature')
obj.vertex_colors['Curvature'].active = True
col = obj.vertex_colors['Curvature']
col.active = True
edges = bm.edges
faces = bm.faces
verts = bm.verts
verts.ensure_lookup_table()
faces.ensure_lookup_table()
edges.ensure_lookup_table()
verts_dict = defaultdict(list)
vertex_map = defaultdict(list)
for f in faces:
for v_ix, l_ix in zip(f.verts, f.loops):
vertex_map[v_ix.index].append(l_ix)
for e in edges:
v1 = e.verts[0]
v2 = e.verts[1]
p1 = v1.co
p2 = v2.co
n1 = v1.normal
n2 = v2.normal
curva1 = 2 * n1.dot(p1 - p2)
curva2 = 2 * n2.dot(p2 - p1)
if (p1 - p2).length :
curva1 = curva1 / (p1 - p2).length
if (p2 - p1).length :
curva2 = curva2 / (p2 - p1).length
curva1 = round(curva1, 3)
curva2 = round(curva2, 3)
#Add v1
verts_dict[v1.index].append([e.index, abs(curva1)])
#Add v2
verts_dict[v2.index].append([e.index, abs(curva2)])
for v, edge in verts_dict.items():
average = 0
for e in edge:
average += e[1]
average /= len(edge)
h = np.interp(average, [0, 1.0], [0.333, 0])
color = colorsys.hsv_to_rgb(h, 1.0, 1.0)
color += (1.0,)
for l in vertex_map[v]:
col.data[l.index].color = color
return {'FINISHED'}
| true |
cabf42ba31a685f1e110f2e6ce42b289b7ece9f2 | Python | andy1li/adventofcode | /2018/day22_cave.py | UTF-8 | 2,804 | 3.34375 | 3 | [] | no_license | # https://adventofcode.com/2018/day/22
from advent import get_neighbor_items, iterate
from typing import NamedTuple
import networkx
import numpy as np
class Pos(NamedTuple): x: int; y: int
def scan_cave(depth, target, MOD=20183):
y_times, x_times = 2, 3
cave = np.zeros((target.y*y_times, target.x*x_times), dtype=int)
cave[0,...] = (np.arange(target.x*x_times) * 16807 + depth) % MOD
cave[...,0] = (np.arange(target.y*y_times) * 48271 + depth) % MOD
for y in range(1, target.y*y_times):
for x in range(1, target.x*x_times):
cave[y, x] = ((cave[y, x-1] * cave[y-1, x]) + depth) % MOD
cave[target.y, target.x] = depth
return cave % 3
def fst_star(cave, target):
return cave[:target.y+1,:target.x+1].sum()
def snd_star(cave, target):
G = networkx.Graph()
for y, x, val in iterate(cave):
if val == 0: G.add_edge((y, x, 'gear'), (y, x, 'torch'), weight=7)
if val == 1: G.add_edge((y, x, 'gear'), (y, x, 'neither'), weight=7)
if val == 2: G.add_edge((y, x, 'torch'), (y, x, 'neither'), weight=7)
for (ny, nx), nval in get_neighbor_items(cave, y, x):
if val == 0 and nval == 0:
G.add_edge((y, x, 'gear'), (ny, nx, 'gear'), weight=1)
G.add_edge((y, x, 'torch'), (ny, nx, 'torch'), weight=1)
if val == 0 and nval == 1:
G.add_edge((y, x, 'gear'), (ny, nx, 'gear'), weight=1)
if val == 0 and nval == 2:
G.add_edge((y, x, 'torch'), (ny, nx, 'torch'), weight=1)
if val == 1 and nval == 0:
G.add_edge((y, x, 'gear'), (ny, nx, 'gear'), weight=1)
if val == 1 and nval == 1:
G.add_edge((y, x, 'gear'), (ny, nx, 'gear'), weight=1)
G.add_edge((y, x, 'neither'), (ny, nx, 'neither'), weight=1)
if val == 1 and nval == 2:
G.add_edge((y, x, 'neither'), (ny, nx, 'neither'), weight=1)
if val == 2 and nval == 0:
G.add_edge((y, x, 'torch'), (ny, nx, 'torch'), weight=1)
if val == 2 and nval == 1:
G.add_edge((y, x, 'neither'), (ny, nx, 'neither'), weight=1)
if val == 2 and nval == 2:
G.add_edge((y, x, 'torch'), (ny, nx, 'torch'), weight=1)
G.add_edge((y, x, 'neither'), (ny, nx, 'neither'), weight=1)
return networkx.shortest_path_length(
G, (0, 0, 'torch'),
(target.y, target.x, 'torch'),
weight='weight'
)
if __name__ == '__main__':
cave = scan_cave(510, Pos(10, 10))
assert fst_star(cave, Pos(10, 10)) == 114
assert snd_star(cave, Pos(10, 10)) == 45
cave = scan_cave(10689, Pos(11, 722))
print(fst_star(cave, Pos(11, 722)))
print(snd_star(cave, Pos(11, 722))) | true |
60e82367887d127aa441291f059f6e91b5b3823b | Python | sweekar52/APS-2020 | /Daily-Codes/togglingCases.py | UTF-8 | 163 | 3.71875 | 4 | [] | no_license | char = input("Input the character whose case has to be toggled ")
asci = ord(char)
if asci < 97 :
asci |= 32
else :
asci &= 95
char = chr(asci)
print(char) | true |
b26cf9fe1ce0d046915104e1cbca34dc516521ed | Python | wiktorski/data-intensive-systems-ed1 | /multiline_input.py | UTF-8 | 805 | 2.53125 | 3 | [] | no_license | from mrjob.job import MRJob
import random
class MRMultilineInput(MRJob):
def mapper_init(self):
self.message_id = ''
self.in_body = False
self.body = []
def mapper(self, _, line):
line = line.strip()
if line.find('Message-ID:') == 0:
self.message_id = line[13:len(line)-1]
if not line and not self.in_body:
self.in_body=True
if line.find('From general') == 0 and self.in_body:
yield self.message_id, ''.join(self.body)
self.message_id = ''
self.body = []
self.in_body = False
if self.in_body:
self.body.append(line)
if __name__ == '__main__':
MRMultilineInput.run() | true |
d8d038f0862006370b1771b6d2993370403117f9 | Python | lmlmsan/pyblog | /www/hannuota.py | UTF-8 | 370 | 3.59375 | 4 | [] | no_license | #! /usr/local/bin/python3.7
# -*- coding: utf-8 -*-
#汉诺塔规则:
#每次只能挪动一个,且大盘在下小盘在上,借助中间盘从开始盘挪到终点盘
def mov(n,a,b,c):
if n == 0:
print("不能输入为0")
if n == 1:
print(a+" --> "+c)
if n == 2:
print(a+" --> "+b)
print(a+" --> "+c)
print(b+" --> "+c)
else:
mov(2,'A','B','C')
| true |
f140d478f5b5f903b9334f1cbf0af3d2b9d5ec08 | Python | hppRC/nlp100knocks | /chapter2/17.py | UTF-8 | 168 | 3.28125 | 3 | [] | no_license | with open("popular-names.txt") as f:
first_columns = set(line.strip().split("\t")[0] for line in f.readlines())
print(*sorted(first_columns), sep="\n", end="")
| true |
4fc6bd0a14c0cafc513527aeb5bb163be3e7ec97 | Python | ajagdev/seng480a | /python/p3.py | UTF-8 | 4,302 | 2.5625 | 3 | [] | no_license | import tkinter
import visualization
import time
import random
from multiprocessing import *
from visualization import TrafficVisualization
# Generates poison distribution for triggers with a mean time of 1 per 20 seconds for each trigger, not exceeding 30 seconds.
# If nonDeterminism is false just sleeps
def triggers(ew, ns, ew_sensor, ns_sensor, nonDeterminism):
ew_ped_timer = min(random.expovariate(0.00005), 30000) #Should give 1 instance every 20000 miliseconds
ns_ped_timer = min(random.expovariate(0.00005), 30000)
ew_sensor_timer = min(random.expovariate(0.00005), 30000)
ns_sensor_timer = min(random.expovariate(0.00005), 30000)
#triggers:
while (True):
if nonDeterminism.value:
if (ew_ped_timer <= 0):
ew.value = True
ew_ped_timer = min(random.expovariate(0.00005), 30000)
if (ns_ped_timer <= 0):
ns.value = True
ns_ped_timer = min(random.expovariate(0.00005), 30000)
if (ew_sensor_timer <= 0):
ew_sensor.value = True
ew_sensor_timer = min(random.expovariate(0.00005), 30000)
if (ns_sensor_timer <= 0):
ns_sensor.value = True
ns_sensor_timer = min(random.expovariate(0.00005), 30000)
sleep_time = min(ew_ped_timer,ns_ped_timer,ew_sensor_timer,ns_sensor_timer)
ew_ped_timer -= sleep_time
ns_ped_timer -= sleep_time
ew_sensor_timer -= sleep_time
ns_sensor_timer -= sleep_time
time.sleep(sleep_time/1000.0)
else:
time.sleep(0.1)
timer = -1
EW = 'green'
NS = 'red'
NS_ped = 'red'
EW_ped = 'red'
EW_ped_button = Value('b', False)
NS_ped_button = Value('b', False)
EW_sensor = Value('b', False)
NS_sensor = Value('b', False)
nonDeterminism = Value('b', False)
def mainLoop(vis, singleStep=False):
#Added for ease of unit testing
global timer
global EW
global NS
global NS_ped
global EW_ped
global EW_ped_button
global NS_ped_button
global EW_sensor
global NS_sensor
global nonDeterminism
#loop:
while True:
if (timer >= 0):
timer = timer - 1
if (timer == 3):
if (EW_ped == 'green'):
EW_ped = 'yellow'
if (NS_ped == 'green'):
NS_ped = 'yellow'
#ped_yellow:
if (timer == 2):
if (EW == 'red'):
NS = 'yellow'
else:
EW = 'yellow'
#t0:
if (timer == 0):
EW_ped = 'red'
NS_ped = 'red'
if (EW == 'red'):
NS = 'red'
if (EW_ped_button.value == True):
EW_ped_button.value = False
#ped1:
EW_ped = 'green'
#ew_green:
EW_sensor.value = False
EW = 'green'
else:
EW = 'red'
if (NS_ped_button.value == True):
NS_ped_button.value = False
#ped2:
NS_ped = 'green'
#ns_green:
NS_sensor.value = False
NS = 'green'
#sensor:
if (timer == -1):
if ((EW == 'green' and NS_sensor.value == True) or \
(NS == 'green' and EW_sensor.value == True) or \
NS_ped_button.value == True or \
EW_ped_button.value == True):
#timer_reset:
timer = 5
if not singleStep:
for i in range(0,10):
vis.setEWLights(EW)
vis.setNSLights(NS)
vis.setEWPedLights(EW_ped)
vis.setNSPedLights(NS_ped)
vis.setPedButtonVisible('EW', EW_ped_button.value)
vis.setPedButtonVisible('NS', NS_ped_button.value)
vis.setSensorVisible('NS', NS_sensor.value)
vis.setSensorVisible('EW', EW_sensor.value)
vis.readClick()
if vis.checkQuit():
return
if vis.checkToggle():
nonDeterminism.value = not nonDeterminism.value
vis.setToggleMode(nonDeterminism.value)
if vis.checkEWPed():
EW_ped_button.value = True
if vis.checkNSPed():
NS_ped_button.value = True
if vis.checkEWCar():
EW_sensor.value = True
if vis.checkNSCar():
NS_sensor.value = True
time.sleep(0.1)
else:
return
#Triggers use a different proccess so we need to assert that only the parent process
# runs the main code
if __name__ == '__main__':
#This process will trigger events based on a poison distribution
p = Process(target=triggers, args=(EW_ped_button, NS_ped_button, EW_sensor, NS_sensor, nonDeterminism))
p.daemon = True
p.start()
vis = TrafficVisualization(True, True)
mainLoop(vis)
vis.close()
| true |
df51d188ceffbf8119ac425702b0747684e5d244 | Python | Kristin-ZLCHEN/spektral | /spektral/layers/ops.py | UTF-8 | 15,047 | 2.859375 | 3 | [
"MIT"
] | permissive | import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.ops.linalg.sparse import sparse as tfsp
modes = {
'S': 1, # Single (rank(a)=2, rank(b)=2)
'M': 2, # Mixed (rank(a)=2, rank(b)=3)
'iM': 3, # Inverted mixed (rank(a)=3, rank(b)=2)
'B': 4, # Batch (rank(a)=3, rank(b)=3)
'UNK': -1 # Unknown
}
################################################################################
# Graph-related ops
################################################################################
def normalize_A(A):
"""
Computes symmetric normalization of A, dealing with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:return: Tensor or SparseTensor of rank k.
"""
D = degrees(A)
D = tf.sqrt(D)[:, None] + K.epsilon()
perm = (0, 2, 1) if K.ndim(A) == 3 else (1, 0)
output = (A / D) / transpose(D, perm=perm)
return output
def degrees(A):
"""
Computes the degrees of each node in A, dealing with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:return: Tensor or SparseTensor of rank k - 1.
"""
if K.is_sparse(A):
D = tf.sparse.reduce_sum(A, axis=-1)
else:
D = tf.reduce_sum(A, axis=-1)
return D
def degree_matrix(A, return_sparse_batch=False):
"""
Computes the degree matrix of A, deals with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:param return_sparse_batch: if operating in batch mode, return a
SparseTensor. Note that the sparse degree Tensor returned by this function
cannot be used for sparse matrix multiplication afterwards.
:return: SparseTensor of rank k.
"""
D = degrees(A)
batch_mode = K.ndim(D) == 2
N = tf.shape(D)[-1]
batch_size = tf.shape(D)[0] if batch_mode else 1
inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1))
if batch_mode:
if return_sparse_batch:
outer_index = repeat(
tf.range(batch_size), tf.ones(batch_size) * tf.cast(N, tf.float32)
)
indices = tf.concat([outer_index[:, None], inner_index], 1)
dense_shape = (batch_size, N, N)
else:
return tf.linalg.diag(D)
else:
indices = inner_index
dense_shape = (N, N)
indices = tf.cast(indices, tf.int64)
values = tf.reshape(D, (-1, ))
return tf.SparseTensor(indices, values, dense_shape)
################################################################################
# Sparse utils
################################################################################
def sp_matrix_to_sp_tensor(x):
"""
Converts a Scipy sparse matrix to a SparseTensor.
:param x: a Scipy sparse matrix.
:return: a SparseTensor.
"""
if not hasattr(x, 'tocoo'):
try:
x = sp.coo_matrix(x)
except:
raise TypeError('x must be convertible to scipy.coo_matrix')
else:
x = x.tocoo()
out = tf.SparseTensor(
indices=np.array([x.row, x.col]).T,
values=x.data,
dense_shape=x.shape
)
return tf.sparse.reorder(out)
def sp_batch_to_sp_tensor(a_list):
"""
Converts a list of Scipy sparse matrices to a rank 3 SparseTensor.
:param a_list: list of Scipy sparse matrices with the same shape.
:return: SparseTensor of rank 3.
"""
tensor_data = []
for i, a in enumerate(a_list):
values = a.tocoo().data
row = a.row
col = a.col
batch = np.ones_like(col) * i
tensor_data.append((values, batch, row, col))
tensor_data = list(map(np.concatenate, zip(*tensor_data)))
out = tf.SparseTensor(
indices=np.array(tensor_data[1:]).T,
values=tensor_data[0],
dense_shape=(len(a_list), ) + a_list[0].shape
)
return out
def dense_to_sparse(x):
"""
Converts a Tensor to a SparseTensor.
:param x: a Tensor.
:return: a SparseTensor.
"""
indices = tf.where(tf.not_equal(x, 0))
values = tf.gather_nd(x, indices)
shape = tf.shape(x, out_type=tf.int64)
return tf.SparseTensor(indices, values, shape)
################################################################################
# Matrix multiplication
################################################################################
def filter_dot(fltr, features):
"""
Wrapper for matmul_A_B, specifically used to compute the matrix multiplication
between a graph filter and node features.
:param fltr:
:param features: the node features (N x F in single mode, batch x N x F in
mixed and batch mode).
:return: the filtered features.
"""
mode = autodetect_mode(fltr, features)
if mode == modes['S'] or mode == modes['B']:
return dot(fltr, features)
else:
# Mixed mode
return mixed_mode_dot(fltr, features)
def dot(a, b, transpose_a=False, transpose_b=False):
"""
Dot product between a and b along innermost dimensions, for a and b with
same rank. Supports both dense and sparse multiplication (including
sparse-sparse).
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with same rank as a.
:param transpose_a: bool, transpose innermost two dimensions of a.
:param transpose_b: bool, transpose innermost two dimensions of b.
:return: Tensor or SparseTensor with rank 2 or 3.
"""
a_is_sparse_tensor = isinstance(a, tf.SparseTensor)
b_is_sparse_tensor = isinstance(b, tf.SparseTensor)
if a_is_sparse_tensor:
a = tfsp.CSRSparseMatrix(a)
if b_is_sparse_tensor:
b = tfsp.CSRSparseMatrix(b)
out = tfsp.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if hasattr(out, 'to_sparse_tensor'):
return out.to_sparse_tensor()
return out
def mixed_mode_dot(a, b):
"""
Computes the equivalent of `tf.einsum('ij,bjk->bik', a, b)`, but
works for both dense and sparse input filters.
:param a: rank 2 Tensor or SparseTensor.
:param b: rank 3 Tensor or SparseTensor.
:return: rank 3 Tensor or SparseTensor.
"""
s_0_, s_1_, s_2_ = K.int_shape(b)
B_T = transpose(b, (1, 2, 0))
B_T = reshape(B_T, (s_1_, -1))
output = dot(a, B_T)
output = reshape(output, (s_1_, s_2_, -1))
output = transpose(output, (2, 0, 1))
return output
def matmul_A_B(a, b):
"""
Computes A * B, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = autodetect_mode(a, b)
if mode == modes['M']:
# Mixed mode (rank(a)=2, rank(b)=3)
output = mixed_mode_dot(a, b)
elif mode == modes['iM']:
# Inverted mixed (rank(a)=3, rank(b)=2)
_, s_1_a, s_2_a = K.int_shape(a)
_, s_1_b = K.int_shape(b)
a_flat = reshape(a, (-1, s_2_a))
output = dot(a_flat, b)
output = reshape(output, (-1, s_1_a, s_1_b))
else:
# Single (rank(a)=2, rank(b)=2) and batch (rank(a)=3, rank(b)=3) mode
output = dot(a, b)
return output
def matmul_AT_B(a, b):
"""
Computes A.T * B, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = autodetect_mode(a, b)
if mode == modes['S'] or mode == modes['M']:
# Single (rank(a)=2, rank(b)=2)
# Mixed (rank(a)=2, rank(b)=3)
a_t = transpose(a)
elif mode == modes['iM'] or mode == modes['B']:
# Inverted mixed (rank(a)=3, rank(b)=2)
# Batch (rank(a)=3, rank(b)=3)
a_t = transpose(a, (0, 2, 1))
else:
raise ValueError('Expected ranks to be 2 or 3, got {} and {}'.format(
K.ndim(a), K.ndim(b)
))
return matmul_A_B(a_t, b)
def matmul_A_BT(a, b):
"""
Computes A * B.T, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = autodetect_mode(a, b)
if mode == modes['S'] or mode == modes['iM']:
# Single (rank(a)=2, rank(b)=2)
# Inverted mixed (rank(a)=3, rank(b)=2)
b_t = transpose(b)
elif mode == modes['M'] or mode == modes['B']:
# Mixed (rank(a)=2, rank(b)=3)
# Batch (rank(a)=3, rank(b)=3)
b_t = transpose(b, (0, 2, 1))
else:
raise ValueError('Expected ranks to be 2 or 3, got {} and {}'.format(
K.ndim(a), K.ndim(b)
))
return matmul_A_B(a, b_t)
def matmul_AT_B_A(a, b):
"""
Computes A.T * B * A, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
at_b = matmul_AT_B(a, b)
at_b_a = matmul_A_B(at_b, a)
return at_b_a
################################################################################
# Ops related to the modes of operation (single, mixed, batch)
################################################################################
def autodetect_mode(a, b):
"""
Return a code identifying the mode of operation (single, mixed, inverted mixed and
batch), given a and b. See `ops.modes` for meaning of codes.
:param a: Tensor or SparseTensor.
:param b: Tensor or SparseTensor.
:return: mode of operation as an integer code.
"""
if K.ndim(b) == 2:
if K.ndim(a) == 2:
return modes['S']
elif K.ndim(a) == 3:
return modes['iM']
elif K.ndim(b) == 3:
if K.ndim(a) == 2:
return modes['M']
elif K.ndim(a) == 3:
return modes['B']
return modes['UNK']
################################################################################
# Misc ops
################################################################################
def transpose(a, perm=None, name=None):
"""
Transposes a according to perm, dealing automatically with sparsity.
:param a: Tensor or SparseTensor with rank k.
:param perm: permutation indices of size k.
:param name: name for the operation.
:return: Tensor or SparseTensor with rank k.
"""
if K.is_sparse(a):
transpose_op = tf.sparse.transpose
else:
transpose_op = tf.transpose
if perm is None:
perm = (1, 0) # Make explicit so that shape will always be preserved
return transpose_op(a, perm=perm, name=name)
def reshape(a, shape=None, name=None):
"""
Reshapes a according to shape, dealing automatically with sparsity.
:param a: Tensor or SparseTensor.
:param shape: new shape.
:param name: name for the operation.
:return: Tensor or SparseTensor.
"""
if K.is_sparse(a):
reshape_op = tf.sparse.reshape
else:
reshape_op = tf.reshape
return reshape_op(a, shape=shape, name=name)
def matrix_power(a, k):
"""
If a is a square matrix, computes a^k. If a is a rank 3 Tensor of square
matrices, computes the exponent of each inner matrix.
:param a: Tensor or SparseTensor with rank 2 or 3. The innermost two
dimensions must be the same.
:param k: int, the exponent to which to raise the matrices.
:return: Tensor or SparseTensor with same rank as the input.
"""
x_k = a
for _ in range(k - 1):
x_k = matmul_A_B(a, x_k)
return x_k
################################################################################
# Custom ops
################################################################################
def repeat(x, repeats):
"""
Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D
tensors).
:param x: rank 1 Tensor;
:param repeats: rank 1 Tensor with same shape as x, the number of
repetitions for each element;
:return: rank 1 Tensor, of shape `(sum(repeats), )`.
"""
x = tf.expand_dims(x, 1)
max_repeats = tf.reduce_max(repeats)
tile_repeats = [1, max_repeats]
arr_tiled = tf.tile(x, tile_repeats)
mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1))
result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1])
return result
def segment_top_k(x, I, ratio, top_k_var):
"""
Returns indices to get the top K values in x segment-wise, according to
the segments defined in I. K is not fixed, but it is defined as a ratio of
the number of elements in each segment.
:param x: a rank 1 Tensor;
:param I: a rank 1 Tensor with segment IDs for x;
:param ratio: float, ratio of elements to keep for each segment;
:param top_k_var: a tf.Variable created without shape validation (i.e.,
`tf.Variable(0.0, validate_shape=False)`);
:return: a rank 1 Tensor containing the indices to get the top K values of
each segment in x.
"""
I = tf.cast(I, tf.int32)
num_nodes = tf.math.segment_sum(tf.ones_like(I), I) # Number of nodes in each graph
cumsum = tf.cumsum(num_nodes) # Cumulative number of nodes (A, A+B, A+B+C)
cumsum_start = cumsum - num_nodes # Start index of each graph
n_graphs = tf.shape(num_nodes)[0] # Number of graphs in batch
max_n_nodes = tf.reduce_max(num_nodes) # Order of biggest graph in batch
batch_n_nodes = tf.shape(I)[0] # Number of overall nodes in batch
to_keep = tf.math.ceil(ratio * tf.cast(num_nodes, tf.float32))
to_keep = tf.cast(to_keep, I.dtype) # Nodes to keep in each graph
index = tf.range(batch_n_nodes)
index = (index - tf.gather(cumsum_start, I)) + (I * max_n_nodes)
y_min = tf.reduce_min(x)
dense_y = tf.ones((n_graphs * max_n_nodes,))
# subtract 1 to ensure that filler values do not get picked
dense_y = dense_y * tf.cast(y_min - 1, dense_y.dtype)
dense_y = tf.cast(dense_y, top_k_var.dtype)
# top_k_var is a variable with unknown shape defined in the elsewhere
top_k_var.assign(dense_y)
dense_y = tf.tensor_scatter_nd_update(top_k_var, index[..., None], tf.cast(x, top_k_var.dtype))
dense_y = tf.reshape(dense_y, (n_graphs, max_n_nodes))
perm = tf.argsort(dense_y, direction='DESCENDING')
perm = perm + cumsum_start[:, None]
perm = tf.reshape(perm, (-1,))
to_rep = tf.tile(tf.constant([1., 0.]), (n_graphs,))
rep_times = tf.reshape(tf.concat((to_keep[:, None], (max_n_nodes - to_keep)[:, None]), -1), (-1,))
mask = repeat(to_rep, rep_times)
perm = tf.boolean_mask(perm, mask)
return perm
| true |
70f8ecf35547b101a4b82f026c8e8c749c558ebd | Python | stephenbradshaw/pentesting_stuff | /clients/tcp_sender.py | UTF-8 | 1,231 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | import socket
import ssl
def send_receive(host: str, port: int, filename: str, wrap_ssl: bool=False, sni_hostname: str=None, timeout: int=5):
'''Connect to host on given TCP port, with optional ssl wrapping, send data from provided filename, and return response'''
client_socket = socket.socket()
client_socket.settimeout(timeout)
if wrap_ssl:
#client_socket = ssl.wrap_socket(client_socket, ssl_version=ssl.PROTOCOL_TLSv1_2)
#context = ssl._create_unverified_context(protocol=ssl.PROTOCOL_TLSv1_2)
context = ssl._create_unverified_context()
if sni_hostname:
client_socket = context.wrap_socket(client_socket, server_hostname=sni_hostname)
else:
client_socket = context.wrap_socket(client_socket)
client_socket.connect((host, port))
client_socket.send(open(filename, 'rb').read())
out = b''
moar = True
while moar:
try:
d = client_socket.recv(1024)
if len(d) < 1:
moar = False
out += d
except (TimeoutError, ConnectionResetError):
moar = False
except:
break
client_socket.close()
return out
| true |
2ee25a438eee2ad91512155a8b0809ba0d14ebdc | Python | jjspetz/FCC-backend-projects | /URLshortener/main.py | UTF-8 | 1,374 | 3.28125 | 3 | [] | no_license | import random
import validators
from flask import Flask, redirect
app = Flask(__name__)
# 1. create variable for storage ****
# 2. /new/<url> route assigns short random url and saves url into storage ****
# 3. random url in storage redirect to original page ****
# 4. url validator ****
# 5. add basic homepage
# 6. deploy somewhere
# delcare url dictionary
urls = {'thispageshome': 1000};
reverse_urls = {1000: 'thispageshome'}
# random short url creator
def url_creator():
short = 1000
# creates new random 4 digit string
while short in reverse_urls:
short = random.randint(1001,9999)
return short
# routes
@app.route('/')
def homepage():
return 'Homepage'
@app.route('/new/<path:url>')
def save_url(url=None):
# validate url
if validators.url(url):
#check to see if url already in dictionary
if url in urls:
return 'website already present. Try path - /' + str(urls[url])
short = url_creator()
urls[url] = short
reverse_urls[short] = url
# print(urls)
# displays results on screen
return str({'original': url, 'short': short})
return 'Invalid URL'
@app.route('/<int:page>')
def redirect_url(page=None):
# checks if path is a valid saved url
if page in reverse_urls:
return redirect(reverse_urls[page])
return 'Invalid Path'
| true |
8958021b4dd17d83804f84e52ff68a2af3c84722 | Python | djchain/ICHI2019-Hospital-NLP | /train_text.py | UTF-8 | 3,701 | 2.515625 | 3 | [] | no_license | """
Created on Jan 17, 2019
Group activity label classification based on text data.
Using transformer structure (self-attention) without any fusion models.
Experiment is based on 67 trauma cases, input samples is sentence-level data.
@author: Yue Gu, Ruiyu Zhang, Xinwei Zhao
"""
from __future__ import print_function
from keras.models import Model
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import GlobalMaxPooling1D
from keras.optimizers import Adam
from data_preprocessing import data
from transformer import Attention
from transformer import Position_Embedding
# Parameter setting
gakki = data(path=r'E:/Yue/Entire Data/CNMC/hospital_data')
saving_path = r'E:/Yue/Entire Data/CNMC/'
gakki.unclear_lbl.append('Monitor Vital Signs')
gakki.auto_process(merge_unclear=True)
gakki.label_mode = 'lower_10'
num_class = 11
epoch = 2000
batch_size = 32
head_num = 8
head_size = 16
# Model Architecture
# Text data
# define text input and shape
text_input = Input(shape=(30,))
# word embedding
em_text = Embedding(len(gakki.word_dic) + 1, 200, weights=[gakki.get_embed_matrix()], trainable=True)(text_input)
x = Position_Embedding()(em_text)
x = Attention(head_num, head_size)([x, x, x])
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
x = Attention(head_num, head_size)([x, x, x])
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
x = Attention(head_num, head_size)([x, x, x])
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
x = Attention(head_num, head_size)([x, x, x])
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
x = Attention(head_num, head_size)([x, x, x])
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
x = Attention(head_num, head_size)([x, x, x])
x = BatchNormalization()(x)
x = GlobalMaxPooling1D()(x)
# decision-making
x = Dense(32)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.25)(x)
x = Dense(16)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
prediction = Dense(num_class, activation='softmax')(x)
print('prediction shape: ', prediction.shape)
text_model = Model(inputs=text_input, outputs=prediction)
# optimizer
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
text_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
text_model.summary()
if __name__ == "__main__":
# Text model training
text_acc = 0
for i in range(epoch):
# data loader (balance data)
test_label, test_text, test_audio_left, test_audio_right = gakki.get_tester(average=True)
train_label, train_text, train_audio_left, train_audio_right = gakki.get_trainer(average=True)
print('text branch, epoch: ', str(i))
text_model.fit(train_text,
train_label,
batch_size=batch_size,
epochs=1,
verbose=1)
# callbacks=[TensorBoard(log_dir=saving_path+'\\hospital_data\\analyze\\log_dir\\')])
loss_t, acc_t = text_model.evaluate(test_text,
test_label,
batch_size=batch_size,
verbose=0)
print('epoch: ', str(i))
print('loss_a', loss_t, ' ', 'acc_a', acc_t)
gakki.write_epoch_acc(i, acc_t, name='Text')
if acc_t >= text_acc:
text_acc = acc_t
"""
if i >= 0:
text_model.save_weights(saving_path + 'text_transformer_weights.h5')
"""
print('final_acc: ', text_acc)
| true |
b271c0c6f96e1e9b3d652f44e79dfe950db1de8f | Python | mamemilk/acrc | /プログラミングコンテストチャレンジブック_秋葉,他/src/2-5-1_01_abc126_d.py | UTF-8 | 505 | 3.109375 | 3 | [] | no_license | # https://atcoder.jp/contests/abc126/tasks/abc126_d
import sys
sys.setrecursionlimit(100000)
N = int(input())
G = [[] for _ in range(N+1)]
color = [-1 for _ in range(N+1)]
for _ in range(N-1):
u, v, w = map(int, input().split())
G[u].append((v,w))
G[v].append((u,w))
def dfs(v, c):
color[v] = c
for u, w in G[v]:
if color[u] != -1:
continue
if w % 2 == 0:
dfs(u, c)
else:
dfs(u, 1 - c)
dfs(1, 0)
print(*color[1:], sep='\n') | true |
cce7850656d0350233b706eb17c8b26862a6bb36 | Python | bernardli/leetcode | /51-100/80_Remove_Duplicates_from_Sorted_Array_II.py | UTF-8 | 728 | 3.359375 | 3 | [] | no_license | from typing import List
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
currNum = nums[0]
currNumCount = 1
tailPointer = 0
for idx in range(1, len(nums)):
if nums[idx] == currNum:
currNumCount += 1
else:
currNum = nums[idx]
currNumCount = 1
if currNumCount <= 2:
tailPointer += 1
nums[tailPointer] = nums[idx]
return tailPointer + 1
def run(self):
print(self.removeDuplicates([1, 1, 1, 2, 2, 3]))
print(self.removeDuplicates([0,0,1,1,1,1,2,3,3]))
foo = Solution()
foo.run()
| true |
6e8c866d6bf7178db5ca3d760ccd9094a3ba56c5 | Python | JRC1995/Tweet-Disaster-Keyphrase | /Data/data_analysis.py | UTF-8 | 1,393 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | import json
count_data = {}
total_count = 0
train_count = 0
dev_count = 0
test_count = 0
def count(filename, key):
global count_data
global train_count
global dev_count
global test_count
global total_count
with open(filename) as file:
for json_data in file:
obj = json.loads(json_data)
source = obj["source"]
if source in count_data:
count_data[source] += 1
else:
count_data[source] = 1
total_count += 1
if key == "train":
train_count += 1
elif key == "dev":
dev_count += 1
elif key == "test":
test_count += 1
else:
print("This shouldn't be happening.")
def display():
global count_data
global train_count
global dev_count
global test_count
global total_count
print("\n\n")
print("Total Data: ", total_count)
print("Total Training Data: ", train_count)
print("Total Validation Data: ", dev_count)
print("Total Test Data: ", test_count)
print("\n\n")
for source in count_data:
print("{}: {}".format(source, count_data[source]))
print("\n\n")
count("disaster_tweet_id_train.json", "train")
count("disaster_tweet_id_dev.json", "dev")
count("disaster_tweet_id_test.json", "test")
display()
| true |
6dd3a4f2726e5724634d7380eaaed9879c6f374c | Python | jmrinaldi/epigenetics-software | /Epigenetics/WaveGenerator/Utilities/AlignedReadObjPET.py | UTF-8 | 798 | 3.125 | 3 | [] | no_license | '''
Created on 2013-01-28
@author: fejes
'''
class AlignedReadObjPET():
'''An object to hold PET aligned read objects, mainly from BAM files'''
def __init__(self, chr_id, le, re, read1, read2):
'''Create the object, store coordinates and reads'''
self.left_end = le
self.right_end = re
self.chromosome_id = chr_id
self.read1 = read1
self.read2 = read2
@staticmethod
def type():
'''return AlignedReadObjPET as a string when asked what type of object this is'''
return "AlignedReadObjPET"
def is_pet(self):
'''Test to see if it is actually a pair, that is to say, holding two reads instead of just one.'''
if self.read2 is None:
return False
else:
return True
| true |
2fdb78dfcc8ccff568731f691750b056df64bab9 | Python | camilapulido/course-material | /exercices/080/solution.py | UTF-8 | 250 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 23 18:42:03 2014
@author: Camila
"""
data = ["a", "b", "c", "d", "e", "f", "g", "h"]
for i in range(len(data)):
for f in range(i+1,len(data),1):
if i!=f:
print (data[i]+data[f]) | true |
4e9d5f36db7b9e40b5a80e394ba752f6214371af | Python | Barbapapazes/dungeons-dragons | /utils/hud.py | UTF-8 | 6,911 | 2.75 | 3 | [
"MIT"
] | permissive | import pygame as pg
from os import path
from sprites.player import Player
from config.window import TILESIZE, WIDTH, HEIGHT
from config.colors import RED, GREEN, CYAN, DARKGREY, GOLD, BLACK
from config.sprites import ITEMS, PLAYER_MAX_HP, PLAYER_MAX_MP
from utils.container import Container
class Hud:
"""Create the hud"""
def __init__(self, game):
self.game = game
self.button_dict = self.create_buttons_dict()
self.buttons = list()
self._size = WIDTH // 20 + WIDTH // 100
self.create_buttons()
def draw_healthbars(self, screen):
"""Draw health bars for all players
Args:
screen (Surface)
"""
player = self.game.turn_manager.get_vision_character()
health = player.health if player is not None else 0
pg.draw.rect(screen, RED,
(WIDTH // 3, 15*HEIGHT // 16, WIDTH // 3, HEIGHT // 36))
pg.draw.rect(screen, GREEN, (WIDTH // 3, 15 * HEIGHT // 16, ((WIDTH // 3)
* health) // PLAYER_MAX_HP, HEIGHT // 36))
for i in range(1, len(self.game.turn_manager.players)):
pg.draw.rect(screen, RED,
(21.5*WIDTH // 24, (10 + i) * HEIGHT // 16, WIDTH // 12, HEIGHT // 64))
i = 1
for p in self.game.turn_manager.players:
if p != self.game.turn_manager.get_vision_character():
pg.draw.rect(screen, GREEN, (21.5 * WIDTH // 24, (10 + i) * HEIGHT // 16,
((WIDTH // 12) * p.health) // PLAYER_MAX_HP, HEIGHT // 64))
i += 1
def draw_manabars(self, screen):
"""Draw the mana bar
Args:
screen (Surface)
"""
player = self.game.turn_manager.get_vision_character()
mana = player.MP if player is not None else 0
pg.draw.rect(screen, DARKGREY,
(WIDTH // 3, 15.5*HEIGHT // 16, WIDTH // 3, HEIGHT // 66))
pg.draw.rect(screen, CYAN, (WIDTH // 3, 15.5*HEIGHT // 16, ((WIDTH // 3)
* mana) // PLAYER_MAX_MP, HEIGHT // 68))
for i in range(1, len(self.game.turn_manager.players)):
pg.draw.rect(screen, DARKGREY,
(21.5*WIDTH // 24, (10.25 + i)*HEIGHT // 16, WIDTH // 12, HEIGHT // 128))
i = 1
for p in self.game.turn_manager.players:
if p != self.game.turn_manager.get_vision_character():
pg.draw.rect(screen, CYAN, (21.5*WIDTH // 24, (10.25 + i)*HEIGHT // 16,
((WIDTH // 12)*p.MP) // PLAYER_MAX_MP, HEIGHT // 128))
i += 1
def draw_xpbar(self, screen):
"""Draw the xp bar
Args:
screen (Surface)
"""
player = self.game.turn_manager.get_vision_character()
xp = player.xp if player is not None else 0
pg.draw.rect(screen, DARKGREY,
(WIDTH // 3, 14.75*HEIGHT // 16, WIDTH // 3, HEIGHT // 128))
pg.draw.rect(screen, GOLD, (WIDTH // 3, 14.5*HEIGHT // 16, ((WIDTH // 3)
* xp) // 100, HEIGHT // 128))
for i in range(1, len(self.game.turn_manager.players)):
pg.draw.rect(screen, DARKGREY,
(21.5*WIDTH // 24, (9.9 + i)*HEIGHT // 16, WIDTH // 12, HEIGHT // 254))
i = 1
for p in self.game.turn_manager.players:
if p != self.game.turn_manager.get_vision_character():
pg.draw.rect(screen, GOLD, (21.5*WIDTH // 24, (10 + i)*HEIGHT // 16,
((WIDTH // 12)*p.xp) // 100, HEIGHT // 64))
i += 1
def draw_shapes(self, screen):
"""Draw the background health bar
Args:
screen (Surface)
"""
bar_img = pg.image.load(path.join("assets", "img", "bar.png")).convert_alpha()
bar_img = pg.transform.scale(bar_img, (3 * WIDTH // 8, 7 * HEIGHT // 128))
screen.blit(bar_img, (15*WIDTH // 48, 14.90 * HEIGHT // 16))
def create_buttons_dict(self):
return {
"quests": {
"state": "menu",
"on_click": None,
"image": path.join(self.game.img_folder, "items", "book_01g.png"),
"rect": None
},
"inventory": {
"state": "inventory",
"on_click": None,
"image": path.join(self.game.assets_folder, "sprites", "chest", "3.png"),
"rect": None
},
"stats": {
"state": "stats",
"on_click": None,
"image": path.join(self.game.img_folder, "items", "cotton_01a.png"),
"rect": None
},
"map": {
"state": "map",
"on_click": None,
"image": path.join(self.game.img_folder, "location.png"),
"rect": None
}
}
def create_buttons(self):
max_x = WIDTH
min_x = WIDTH - self._size*len(self.button_dict)
for _x in range(min_x, max_x, self._size):
self.buttons.append(HudButton(_x, 0, self._size, BLACK))
for i, (k, _) in enumerate(self.button_dict.items()):
self.buttons[i].item = pg.image.load(self.button_dict[k]["image"]).convert_alpha()
self.buttons[i].set_name(k)
def get_all_buttons(self):
return self.buttons
def draw_all_buttons(self, screen):
"""Draw"""
for b in self.get_all_buttons():
b.draw(screen)
b.draw_image(screen)
def get_relate_button_state(self, mouse_pos):
"""Get the pressed button
Args:
mouse_pos (typle): x, y
Returns:
str: the button pressed button
"""
for button in self.get_all_buttons():
if button.rect.collidepoint(mouse_pos):
return button.name
def draw(self, screen):
"""Draw the whole HUD
"""
self.draw_healthbars(screen)
self.draw_manabars(screen)
self.draw_xpbar(screen)
self.draw_all_buttons(screen)
self.draw_shapes(screen)
class HudButton(Container):
def set_name(self, name):
self.name = name
def draw_image(self, screen):
"""Draw the image"""
self.offset = 14
if self.item:
image = pg.transform.scale(self.item, (self.size - self.offset, self.size - self.offset))
_x = image.get_width()
_y = image.get_height()
# pg.draw.rect(screen, (0, 255, 0), pg.Rect(self.x, self.y, _x, _y), 1)
screen.blit(image, (self.x + self.offset // 2, self.y + self.offset // 2))
| true |
2974e8c24549e95fd7a18b68e1a8db9f74352f37 | Python | xbw1266/self_driving_rc | /train_data/train.py | UTF-8 | 3,096 | 2.65625 | 3 | [] | no_license | import csv
import cv2
import numpy as np
filepath = '/home/bowen/data_new/'
filename = 'record.csv'
lines = []
key_map = {"a": [15, 30], "w": [25, 25], "s": [-25,-25], "d": [30, 15], " ": [0, 0]}
with open(filepath + filename) as f:
reader = csv.reader(f)
for line in reader:
lines.append(line)
images = []
measurements_Y1 = []
measurements_Y2 = []
for idx, line in enumerate(lines):
img_path = line[1]
image = cv2.imread(img_path)
images.append(image)
measurement_Y1 = key_map[line[-1]][0]
measurement_Y2 = key_map[line[-1]][1]
measurements_Y1.append(measurement_Y1)
measurements_Y2.append(measurement_Y2)
image_extra = cv2.flip(image, 1)
if idx == 1:
cv2.imshow('img_after', image_extra)
cv2.imshow('img_before', image)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
images.append(image_extra)
if line[-1] == 'w':
measurements_Y1.append(key_map[line[-1]][0])
measurements_Y2.append(key_map[line[-1]][1])
elif line[-1] == 'a':
new_key = 'd'
measurements_Y1.append(key_map[new_key][0])
measurements_Y2.append(key_map[new_key][1])
elif line[-1] == 'd':
new_key = 'a'
measurements_Y1.append(key_map[new_key][0])
measurements_Y2.append(key_map[new_key][1])
elif line[-1] == ' ':
measurements_Y1.append(25)
measurements_Y2.append(25)
# # now adding data agumentation:
# images.append(cv2.flip(image, 1))
# measurements.append(180-measurement)
from data_preprocess import Img_process
from matplotlib import pyplot as plt
print('image loaded, the dimension is {}'.format(image.shape))
if image.shape != (160,320,3):
print('The dimension is not comptiable, resizing ...')
for idx, image in enumerate(images):
image = Img_process(image)
image_resized = image.resize(320, 160)
image_gray = image.hsv()
image_blur = image.blur()
image_edge = image.detect()
if idx == 1:
plt.subplot(131), plt.imshow(image_gray, 'hsv'), plt.title('hsv image')
plt.subplot(132), plt.imshow(image_blur, 'hsv'), plt.title('blurred image')
plt.subplot(133), plt.imshow(image_edge, 'hsv'), plt.title('hsv edge image')
plt.show()
images[idx] = image_edge
X_train = np.array(images)
Y_train1 = np.array(measurements_Y1)
Y_train2 = np.array(measurements_Y2)
from RCNN import Mycnn
my_model = Mycnn(X_train, Y_train1, Y_train2)
my_model.train()
#
## the following is the structures of the CNN:
#from keras.models import Sequential
#from keras.layers import Flatten, Dense, Lambda
#from keras.layers.convolutional import Convolution2D
#from keras.layers.pooling import MaxPooling2D
#
#
#model = Sequential()
#model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)) # normalize the data and centralize the data
#model.add(Convolution2D(6,5,5,activation='relu'))
#model.add(MaxPooling2D())
#model.add(Convolution2D(6,5,5,activation='relu'))
#model.add(MaxPooling2D())
#model.add(Flatten())
#model.add(Dense(120))
#model.add(Dense(84))
#model.add(Dense(1))
#
#model.compile(loss='mse', optimizer='adam')
#model.fit(X_train, Y_train, validation_split=0.2, shuffle=True, nb_epoch=7)
#
#model.save('model.h5')
#
| true |
3acf8dc82abdfcdffd259d984f0fbbbc10059aca | Python | ModelOriented/MAIR | /scripts/citations_graph.py | UTF-8 | 2,193 | 2.96875 | 3 | [] | no_license | # get all arxiv ids
# fetch metadata from Semantic Scholar for all of them and dump it
# visualize the citation graph
# what is most cited?
# What are the most frequent authors by name/id?
import json
import os
import time
from collections import Counter
import semanticscholar as sch
GRAPH_JSON = "../data/arxiv_dump/citations_graph.json"
SEMANTIC_SCHOLAR_PAPER_BASE_URL = "https://api.semanticscholar.org/arXiv"
PAPERS_METADATA_PATH = "../data/arxiv_dump/semantic_scholar.json"
def get_paper_ids():
sources = "../data/arxiv_dump/sources"
paper_ids = set(os.listdir(sources))
paper_ids = set(filter(lambda s: not s.endswith("tar.gz"), paper_ids))
paper_ids = set(s.split("v")[0] for s in paper_ids)
return paper_ids
def fetch_paper_information(arxiv_id):
print("Fetching: {}".format(arxiv_id))
time.sleep(2.5)
paper = sch.paper(
"arXiv:{}".format(arxiv_id), timeout=10, include_unknown_references=True
)
try:
return paper, [
p["arxivId"] for p in paper["references"] if p["arxivId"] is not None
]
except KeyError:
return paper, []
def build_citations_graph(paper_ids):
graph = dict()
papers_data = dict()
for i, arxiv_id in enumerate(paper_ids):
if i % 20 == 19:
print("Fetched {}/{} documents".format(i, len(paper_ids)))
papers_data[arxiv_id], graph[arxiv_id] = fetch_paper_information(arxiv_id)
with open(GRAPH_JSON, "w") as f:
json.dump(graph, f, indent=2)
with open(PAPERS_METADATA_PATH, "w") as f:
json.dump(papers_data, f, indent=2)
def calculate_graph_stats():
with open(GRAPH_JSON, "r") as f:
graph = json.load(f)
with open(PAPERS_METADATA_PATH, "r") as f:
metadata = json.load(f)
c = Counter()
for g in graph.values():
c.update(g)
for id_, count in c.most_common():
if count > 10:
title = ""
if id_ in metadata:
title = metadata[id_]["title"]
print(id_, count, id_ in graph, title)
if __name__ == "__main__":
paper_ids = get_paper_ids()
# build_citations_graph(list(paper_ids))
calculate_graph_stats()
| true |
5e72821d37e5dd6c277cd9de5ccdb446c71be772 | Python | shaojim12/my_leetcode | /91.decode-ways.py | UTF-8 | 538 | 2.859375 | 3 | [] | no_license | #
# @lc app=leetcode id=91 lang=python3
#
# [91] Decode Ways
#
# @lc code=start
class Solution:
def numDecodings(self, s: str) -> int:
if not s:
return 0
dp = [0 for x in range(len(s) + 1)]
dp[0] = 1
dp[1] = 1 if 0 < int(s[0]) <= 9 else 0
for i in range(1, len(s)):
print(i)
dp[i+1] = dp[i] if 0 < int(s[i]) <= 9 else 0
dp[i+1] += dp[i-1] if 10 <= (int(s[i]) + int(s[i-1])*10) <= 26 else 0
return dp[-1]
# @lc code=end
| true |
3474378aa70150e4b54a45b0ba55400470c2165d | Python | cash2one/5173.com | /monitor/test/b64.py | UTF-8 | 646 | 2.78125 | 3 | [] | no_license | import base64
import time
import datetime
a = ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
c = 'YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWF\r\nhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYW\r\nFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFh'
b = base64.encodestring(a)
d = base64.decodestring(c)
#t = datetime.datetime.today()
print b
print d
#print time.gmtime(0x4ba315b5)
#print t.microsecond
#print time.ctime(0x4B9DC621)
#print b
#print time.time() | true |
25adb355927e76e915a63c44c9c0de9e5a8c846e | Python | KunalSharma3197/Flappy-Bird | /bird/Bird.py | UTF-8 | 1,892 | 3.390625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# Class representing bird in the game
import pygame
from pygame.locals import *
class Bird(pygame.sprite.Sprite) :
def __init__(self, x, y, flying, game_over) :
pygame.sprite.Sprite.__init__(self)
self.images = []
self.index = 0
self.counter = 0
for i in range(1, 4) :
img = pygame.image.load(f"./images/bird{i}.png")
self.images.append(img)
self.image = self.images[self.index]
self.rect = self.image.get_rect()
self.rect.center = [x, y]
self.vel = 0
self.clicked = False
self.flying = flying
self.game_over = game_over
def update(self) :
if self.flying == True :
# Applying gravity
self.vel += 0.5
if self.vel > 8 :
self.vel = 8
# print(self.vel)
if self.rect.bottom < 576 :
self.rect.y += int(self.vel)
# Jump on mouse clicks
if self.game_over == False :
if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False :
self.clicked = True
self.vel = -10
if pygame.mouse.get_pressed()[0] == 0 and self.clicked == True :
self.clicked = False
# Handle animation
self.counter += 1
flap_cooldown = 5
if self.counter > flap_cooldown :
self.counter = 0
self.index += 1
if self.index == len(self.images) :
self.index = 0
self.image = self.images[self.index]
# Rotate the bird
self.image = pygame.transform.rotate(self.images[self.index], self.vel * -2)
else :
self.image = pygame.transform.rotate(self.images[self.index], -90)
| true |
2e271acb8f05aa8ecc637943b2b244e13a961d3e | Python | python402837617/python2 | /联手.py | UTF-8 | 815 | 3.03125 | 3 | [] | no_license | from enum import IntEnum,unique
from 类 import Human
class Sb(Human):
sex=18
def __init__(self,name,age):
super(Sb,self).__init__(name,age)
def sb_name(self):
print(self.name)
@classmethod
def sb_sex(cls):
print(cls.sex)
@unique
class shitang(IntEnum):
yst=1
est=2
sst=3
def zsq(f):
def wrapper():
print("jyf is a boy")
f()
return wrapper
#@zsq
def wb(b):
def nb():
nonlocal b
b=b+1
print(b)
return nb
@zsq
def hss():
l=[0,1,2,3,4,4,2,3,0,0,0]
l1=map(lambda x:x+1,l)
from functools import reduce
l2=reduce(lambda x,y:x-y,l,-1)
l3=filter(lambda x:True if x!=0 else False,l)
print(list(l1),l2,list(l3))
sb1=Sb("lch",20)
print(sb1.name+"is"+shitang.yst.name)
f=wb(5)
f()
hss()
| true |
14ef524c1e72391412da3120bc30352f1521f3a2 | Python | VikMaamaa/python-projects | /Project 7/curse.py | UTF-8 | 869 | 3.3125 | 3 | [] | no_license | import random
Noun = ('Goat','Fool','Thief','Bastard','Dog','Fish','Idiot','asshole','bitch','nigga','empty brain','gold digger')
Adjective = ('Ugly', 'poor', 'Sweet', 'Red', 'Black', 'Bitter',
'wonderful', 'gentle', 'unfortunate', 'sleepy', 'foolish', 'quick',
'Crazy','stupid','rotten','stubborn')
print("-"*60 +"\n\t\tProgram to generate random curses\n" + "-"*60)
NumCurses = int(input("\nEnter number of curses: "))
for Curse in range(NumCurses):
def goback():
ad1 = random.randrange(len(Adjective))
ad2 = random.randrange(len(Adjective))
if ad1 != ad2:
n = random.randrange(len(Noun))
print("You",Adjective[ad1],Adjective[ad2], Noun[n])
print("+"*60 + "\n"+"_"*60 +"\n")
else:
goback()
goback()
| true |
cdb95feed58cf60a4a09c7b147aae4a9f6bf666b | Python | abhay27chauhan/Python-concepts | /operator.py | UTF-8 | 1,183 | 4.875 | 5 | [] | no_license | '''
# 1 operations in python
2 + 3
7 - 4
3 * 2
10 % 2
6 / 3 -> 2.0 (always give float)
2 ** 3 -> 2*2*2
10 * 'str' -> 'strstrstrstrstrstrstrstrstrstr'
# 2 operators precedence -
1. () -> parentheses
2. ** -> exponents
3. * / -> both have same precedence, anyone of the two operators coming 1st in
equation will be given priority.
4. + - -> both have same precedence, anyone of the two operators coming 1st in
equation will be given priority.
'''
# Example
print("Welcome to the tip calculator!")
bill = float(input("What was the total bill? $")) # string converted to float
tip = int(input("How much tip would you like to give? 10, 12, or 15? "))
people = int(input("How many people to split the bill?"))
tip_as_percent = tip / 100
total_tip_amount = bill * tip_as_percent
total_bill = bill + total_tip_amount
bill_per_person = total_bill / people
final_amount = round(bill_per_person, 2) # round-off to 2 decimal places
#If you want the final_amount to always have 2 decimal places.
#e.g. $12 becomes $12.00
# You can do this instead of line 33
final_amount = "{:.2f}".format(bill_per_person)
print(f"Each person should pay: ${final_amount}")
| true |
3c613a16faa3ee20f5280f0f4d667b9e6f88e72b | Python | AjinkyaTaranekar/AlgorithmX | /Codeforces/118/B.py | UTF-8 | 241 | 3.46875 | 3 | [] | no_license | n = int(input())
for i in range(-n, n+1):
top = n - abs(i)
for j in range(abs(i)):
print(" ", end="")
for j in range(top):
print(j, end=" ")
for j in range(top, 0, -1):
print(j, end=" ")
print(0)
| true |
991a5a4b0cc82cb4e4cbaf5bf120c0150d5049a6 | Python | cr21/neuralNetwork | /nn/conv/lenet.py | UTF-8 | 1,069 | 2.53125 | 3 | [] | no_license | from tensorflow.keras.layers import Conv2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras import backend as K
class Lenet():
@staticmethod
def build(height, width, depth, classes):
model = Sequential()
inputShape = (height, width, depth)
if K.image_data_format == "channels_first":
inputShape = (depth, height, width)
model.add(Conv2D(20,(5,5), padding='same', input_shape=inputShape))
model.add(Activation('relu'))
model.add(MaxPool2D((2,2), strides=(2,2)))
model.add(Conv2D(50, (5, 5), padding='same', input_shape=inputShape))
model.add(Activation('relu'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation('softmax'))
return model
| true |
82edb74d29671c73df9f1f7254b52f516b1d33e5 | Python | sparckix/BlurConcurrentProgramming | /blur.py | UTF-8 | 3,406 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | '''Archivo de ayuda para el procesamiento de la imagen. Hay que tener en cuenta que se trata de una simulacion y que por tanto
se usa memoria compartida. En un caso real distribuido la imagen podria estar en un servidor central y cada nodo cogeria su "trozo", por ejemplo'''
import sys
import Image, numpy, ImageFilter
if len(sys.argv) < 2 or len(sys.argv) > 2:
sys.exit('ERROR. Uso: python dsFiltrado.py images/imagen')
try:
img = Image.open(sys.argv[1])
except:
sys.exit("ERROR. No se ha podido abrir la imagen")
imgArr = numpy.asarray(img) # readonly
#Ancho y alto en pixeles.
imgWidth = imgArr.shape[1]
imgHeight = imgArr.shape[0]
'''SE DEJA COMENTADO. NO RESULTO VIABLE.
# blur radius in pixels
radius = 2
# blur window length in pixels
windowLen = radius*2+1
#simple box/window blur
def doblur(imgArr, width, height, k, l):
# create array for processed image based on input image dimensions
k = l
imgB = numpy.zeros((imgHeight,width,3),numpy.uint8)
imgC = numpy.zeros((imgHeight,width,3),numpy.uint8)
# blur horizontal row by row
for ro in range(1,height):
# RGB color values
totalR = 0
totalG = 0
totalB = 0
# calculate blurred value of first pixel in each row
for rads in range(-radius, radius+1):
if (rads) >= 0 and (rads) <= width-1:
totalR += imgArr[ro,rads][0]/windowLen
totalG += imgArr[ro,rads][1]/windowLen
totalB += imgArr[ro,rads][2]/windowLen
imgB[ro,l] = [totalR,totalG,totalB]
# calculate blurred value of the rest of the row based on
# unweighted average of surrounding pixels within blur radius
# using sliding window totals (add incoming, subtract outgoing pixels)
for co in range(l,width):
if (co-radius-1) >= 0:
totalR -= imgArr[ro,co-radius-1][0]/windowLen
totalG -= imgArr[ro,co-radius-1][1]/windowLen
totalB -= imgArr[ro,co-radius-1][2]/windowLen
if (co+radius) <= width-1:
totalR += imgArr[ro,co+radius][0]/windowLen
totalG += imgArr[ro,co+radius][1]/windowLen
totalB += imgArr[ro,co+radius][2]/windowLen
# put average color value into imgB pixel
imgB[ro,co] = [totalR,totalG,totalB]
# blur vertical
for co in range(l,width):
totalR = 0
totalG = 0
totalB = 0
for rads in range(-radius, radius+1):
if (rads) >= 0 and (rads) <= height-1:
totalR += imgB[rads,co][0]/windowLen
totalG += imgB[rads,co][1]/windowLen
totalB += imgB[rads,co][2]/windowLen
imgC[0,co] = [totalR,totalG,totalB]
for ro in range(1,height):
if (ro-radius-1) >= 0:
totalR -= imgB[ro-radius-1,co][0]/windowLen
totalG -= imgB[ro-radius-1,co][1]/windowLen
totalB -= imgB[ro-radius-1,co][2]/windowLen
if (ro+radius) <= height-1:
totalR += imgB[ro+radius,co][0]/windowLen
totalG += imgB[ro+radius,co][1]/windowLen
totalB += imgB[ro+radius,co][2]/windowLen
imgC[ro,co] = [totalR,totalG,totalB]
return imgB
# number of times to run blur operation
blurPasses = 1
# temporary image array for multiple passes
imgTmp = imgArr'''
| true |
5126769342ac1c4d27ad95f9967011d09b2006e5 | Python | jfsawyer88/CodeEval | /2-Moderate/076-StringRotation/StringRotation.py | UTF-8 | 501 | 3.1875 | 3 | [] | no_license | ## CodeEval
## String Rotation
import sys
f = open(sys.argv[1], 'r')
for line in f:
line = line.strip()
if line:
line = line.split(',')
s = line[0]
t = line[1]
out = False
if len(s) == len(t):
for i in xrange(len(t)):
if s == t[i:] + t[:i]:
out = True
break
if out:
sys.stdout.write('True\n')
else:
sys.stdout.write('False\n')
f.close()
| true |
7ea8dcfc70253ef558495255b24113f76151f2be | Python | horseno/userStudy | /process_result/parse_result.py | UTF-8 | 1,521 | 2.828125 | 3 | [] | no_license | import os
import pandas as pd
import pre_process
import numpy as np
'''Retrieve data from DB and parse result to determine rejections.
Rules:
- If all the responses are the same.
- If there are more than 3 occurrences with rt<=500ms => reject
- If there are more than 5 occurrences with obvious wrong answers.
- If the error rate is higher than 50% (worse than random guesses)
'''
data_path = "./out/"
approve_list= "approve_list"
reject_list= "reject_list"
def get_seq(df):
df = df.loc[df.loc[:,'phase'] == "TEST"]
df.loc[:,"distance"] = abs(df.loc[:,'targetR'] -df.loc[:,'variableR'])
return df.loc[:,'distance'].tolist()
pre_process.pre_process(data_path,approve_list,reject_list)
reject_f = open(reject_list, 'r')
rejected = reject_f.readline().split(" ")
JND_dic = {0.2:[],0.3:[],0.4:[],0.5:[],0.6:[],0.7:[]}
for fileName in os.listdir(data_path):
assignmentID = fileName.split(".")[0]
if not assignmentID in rejected:
dataframe = pd.read_csv(data_path+fileName)
dataframe = dataframe.loc[dataframe['phase'] == "TEST"]
targetR = round(dataframe.iloc[0]['targetR'],2)
distance = np.array(get_seq(dataframe))
#print "targetR " + str(targetR)
JND_dic[targetR].append(distance[-24:].mean())
for i in JND_dic:
print i
print JND_dic[i]
if len(JND_dic[i]):
print sum(JND_dic[i])/float(len(JND_dic[i]))
#print JND_dic
if __name__=="__main__":
pass
| true |
e93407ef888a1760b724682a5e489870c3c4b7df | Python | rhorrace/CS445-MachineLearning | /HW2/experiment2.py | UTF-8 | 2,355 | 3.25 | 3 | [] | no_license | # Robert Horrace
# 967743553
import numpy as np
import network as N
# begin function declarations
# Convert actual to output targets
def identity(n):
matrix = np.identity(n, dtype = float)
matrix = np.where(matrix == 1, 0.9,0.1)
return matrix
# end convert
# Begin Assigment 2 program
#epochs
epochs = 50
# Number of inputs
num_inputs = 785
# Number of Hidden/Output perceptrons
H_n = 50
O_n = 10
network = N.Network(num_inputs, H_n, O_n)
# Read from training data csv
training = np.genfromtxt('mnist_train.csv', dtype=int, delimiter=',')
# Read from testing data csv
testing = np.genfromtxt('mnist_test.csv', dtype=int, delimiter=',')
# number of training inputs
num_training = np.size(training,axis=0)
# Number of testing outputs
num_testing = np.size(testing,axis=0)
# Shuffle training data by row
np.random.shuffle(training)
# capture actual values
train_actuals = np.array(training[:,0], dtype=int)
training = np.delete(training,0,1)
test_actuals = np.array(testing[:,0], dtype = int)
testing = np.delete(testing,0,1)
O_t = identity(O_n)
# scale data for smaller weights
training = np.divide(training, 255.0)
testing = np.divide(testing, 255.0)
# Epoch 0
train_predictions = network.predict(training)
test_predictions = network.predict(testing)
print("Epoch 0):")
# Display training accuracy
print("Training data Accuracy: %.2f %%" % (np.sum(train_predictions == train_actuals) / num_training * 100.0))
# Display testing accuracy
print("Testing data Accuracy: %.2f %%" % (np.sum(test_predictions == test_actuals) / num_testing * 100.0))
# Epochs
for i in range(epochs):
print("Epoch %d):" % (i+1))
# Training
for i in range(num_training):
t = O_t[train_actuals[i]]
network.back_prop(training[i,:], t)
# end training
train_predictions = network.predict(training)
test_predictions = network.predict(testing)
# display training accuracy
print("Training data Accuracy: %.2f %%" % (np.sum(train_predictions == train_actuals) / num_training * 100.0))
# Display testing accuracy
print("Testing data Accuracy: %.2f %%" % (np.sum(test_predictions == test_actuals) / num_testing * 100.0))
# end epoch for loop
# Confusion matrix
confusion = np.zeros(100, dtype=int).reshape((10,10))
for i in range(num_testing):
confusion[test_actuals[i],test_predictions[i]] += 1
print("Confusion matrix:\n", confusion)
| true |
a4991f9b9cfbab1a9dd23d4849dc9b15d0ac47c9 | Python | zzxuanyuan/machine-learning-project | /merge_preempt.py | UTF-8 | 951 | 2.890625 | 3 | [] | no_license | # This file merges LightPreempted, HeavyPreempted and Weird(A job id only occurs once but the last job activity was Busy)
# into Preempted. The output file will be used for classification
import sys
import csv
preemptDict = dict()
lines = list()
with open(sys.argv[1], 'r') as fr:
for row in csv.reader(fr):
if(row[-1] == 'LightPreempted'):
row[-1] = 'Preempted'
if row[0] not in preemptDict:
preemptDict[row[0]] = 0
row[-2] = preemptDict[row[0]]
else:
preemptDict[row[0]] += 1
row[-2] = preemptDict[row[0]]
elif(row[-1] == 'HeavyPreempted'):
row[-1] = 'Preempted'
if row[0] not in preemptDict:
preemptDict[row[0]] = 0
row[-2] = preemptDict[row[0]]
else:
preemptDict[row[0]] += 1
row[-2] = preemptDict[row[0]]
elif(row[-1] == 'Weird'):
row[-1] = 'Preempted'
lines.append(row)
with open(sys.argv[2], 'w') as fw:
for row in lines:
csv.writer(fw).writerow(row)
fr.close()
fw.close()
| true |
528d56a0a74db01e931e26f3dbc10d99ed0cac48 | Python | sudeepigntion/machine_learning | /Language Recognizer/Init.py | UTF-8 | 2,028 | 3.265625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
stop = stopwords.words('english')
# print(stop)
train = pd.read_csv("reviews.csv")
# Note *
# We shall make use if the Ney York Times user comments (from Kaggle Datasets)
# Once we create the language classifier, we will use other data
# Until then, let's rely on an English natural language source
print(train.head())
# now we first put everything to lower case
train["Summary_lower"] = train["Summary"].str.lower()
train["Summary_no_punctuation"] = train['Summary_lower'].astype(str).str.replace('[^\w\s]','')
# lets check how the text looks like now! well everything is lowercase and no ugly characters
print(train["Summary_no_punctuation"].head())
Tf_train = train
train['Summary_no_punctuation'] = train['Summary_no_punctuation'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
Tf_train['Summary_no_punctuation'] = train['Summary_no_punctuation'].fillna("fillna")
print(Tf_train['Summary_no_punctuation'].head())
# We first assign our current data frame to another to keep track of our work then we read the first sentence and count words that result to be 21.
print(Tf_train['Summary_no_punctuation'][1])
print(Tf_train['Summary_no_punctuation'][1].count(' '))
max_features = 5000 # we set maximum number of words to 5000
maxlen = 100 # and maximum sequence length to 100
tok = tf.keras.preprocessing.text.Tokenizer(num_words=max_features) # Tokenizer step
tok.fit_on_texts(list(Tf_train['Summary_no_punctuation'])) # Fit to cleaned text
Tf_train = tok.texts_to_sequences(list(Tf_train['Summary_no_punctuation'])) # this is how we create sequences
print(type(Tf_train))
print(len(Tf_train[1]))
print(Tf_train[1])
# Lets execute the pad steps
Tf_train = tf.keras.preprocessing.sequence.pad_sequences(Tf_train, maxlen=maxlen)
print(len(Tf_train[1]))
print(Tf_train[1])
print(train['Summary_no_punctuation'][1])
Tf_train = pd.DataFrame(Tf_train)
print(Tf_train.head()) | true |
38fedaa30d0e0dd3725af37d40ee57740ab16bf5 | Python | ttanay/GSFacebookLeads | /src/db_io.py | UTF-8 | 3,773 | 2.75 | 3 | [] | no_license | import sqlite3
import time
def open_connection(db_name='gs_leads.db'):
conn = sqlite3.connect(db_name)
return conn
def create_table_leads(conn):
sql_statement = '''CREATE TABLE IF NOT EXISTS leads(
fb_profile_link TEXT UNIQUE NOT NULL,
name TEXT NULL,
emails TEXT NULL,
booking_agent TEXT NULL,
contact_address TEXT NULL,
phone TEXT NULL,
category TEXT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
);
'''
with conn:
conn.execute(sql_statement)
return
def lead_exists(conn, fb_profile_link):
sql_statement = '''SELECT count(*) FROM leads WHERE fb_profile_link = ?;'''
cur = conn.cursor()
cur.execute(sql_statement, (fb_profile_link,))
if cur.fetchall() == [(0,)]:
return False
else:
return True
def add_leads_to_db(conn, fb_profile_link, name, emails, booking_agent, contact_address, phone, category):
if lead_exists(conn, fb_profile_link):
return
else:
sql_statement = '''INSERT INTO leads(fb_profile_link, name, emails, booking_agent, contact_address, phone, category) VALUES(:fb_profile_link,:name, :emails,:booking_agent,:contact_address,:phone, :category);'''
data = {
'fb_profile_link': fb_profile_link,
'name': name,
'emails': str(emails),
'booking_agent': booking_agent,
'contact_address': contact_address,
'phone': phone,
'category': category
}
with conn:
conn.execute(sql_statement, data)
return
def get_all_leads(conn):
sql_statement = '''SELECT * FROM leads;'''
cur = conn.cursor()
cur.execute(sql_statement)
result = cur.fetchall()
cur.close()
return result
def create_table_unexplored(conn):
sql_statement = '''CREATE TABLE IF NOT EXISTS unexplored(
fb_profile_link TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
);'''
with conn:
conn.execute(sql_statement)
return
def get_all_unexplored_leads(conn):
sql_statement = '''SELECT fb_profile_link FROM unexplored ORDER BY timestamp;'''
cur = conn.cursor()
cur.execute(sql_statement)
result = cur.fetchall()
cur.close()
return result
def get_unexplored_lead(conn):
sql_statement = '''SELECT fb_profile_link FROM unexplored ORDER BY timestamp LIMIT 1;'''
cur = conn.cursor()
cur.execute(sql_statement)
result = cur.fetchone()
cur.close()
return result
def add_to_unexplored_leads(conn, list_of_fb_profile_link):
for fb_profile_link in list_of_fb_profile_link:
sql_statement = '''SELECT count(*) FROM unexplored WHERE fb_profile_link = ?'''
cur = conn.cursor()
cur.execute(sql_statement, fb_profile_link)
if cur.fetchall() != [(0,)]:
list_of_fb_profile_link.remove(fb_profile_link)
sql_statement = '''INSERT INTO unexplored(fb_profile_link) VALUES(?);'''
with conn:
conn.executemany(sql_statement, list_of_fb_profile_link)
return
def delete_all_from_unexplored(conn):
sql_statement = '''DELETE FROM unexplored;'''
with conn:
conn.execute(sql_statement)
return
def delete_from_unexplored(conn, list_of_fb_profile_link):
sql_statement = '''DELETE FROM unexplored WHERE fb_profile_link = ?;'''
with conn:
conn.executemany(sql_statement, list_of_fb_profile_link)
return
def close_connection(conn):
conn.close()
| true |
6a64a664d63ce7d5a33d37a25b62fb857366cc43 | Python | dc-liska/python-challenge | /PyBank/main.py | UTF-8 | 1,825 | 3.109375 | 3 | [] | no_license | #main.py for PyBank, by David Liska
import os
import csv
csvpath = "C:/Users/Lightman/Documents/Boot Camp/Assignments/03-Python/Instructions/PyBank/Resources/budget_data.csv"
monthcount =0
cashtotal =0
prevRowValue =0
profitChange =0
runningChange =0
averageChange =0
topProfits =0
topLoss =0
changes = []
with open(csvpath, newline="") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvfile)
for row in csvreader:
monthcount += 1
cashtotal = cashtotal + int(row[1])
if monthcount >1:
profitChange= int(row[1]) -prevRowValue
changes.append(profitChange)
if profitChange >topProfits:
topProfits =profitChange
topProfitMonth = str(row[0])
if profitChange <topLoss:
topLoss =profitChange
topLossMonth = str(row[0])
else:
pass
prevRowValue = int(row[1])
averageChange = float(sum(changes)/len(changes))
#****Terminal output****
print("\nFinancial Analysis\n-------------------------")
print("Total Months: " + str(monthcount))
print("Total: $" + str(cashtotal))
print("Average Change: $" + str(round(averageChange,2)))
print("Greatest Increase in Profits: " + topProfitMonth + " ($"+ str(topProfits)+")")
print("Greatest Decrease in Profits: " + topLossMonth + " ($"+ str(topLoss)+")")
#****File output****
fileHandler= open('pyBankOutput.txt','w')
fileHandler.write('Financial Analysis\n------------------------\nTotal Months: ' + str(monthcount) +
'\nTotal: $' + str(cashtotal) +
'\nAverage Change: $' + str(round(averageChange,2))+
'\nGreatest Increase in Profits: ' + topProfitMonth + ' ($'+ str(topProfits)+')' +
'\nGreatest Decrease in Profits: ' + topLossMonth + ' ($'+ str(topLoss)+')')
fileHandler.close()
| true |
f4db2f7a71676afb368ed100c4fd01ae38d465ec | Python | Latishfaction/python-OOP-practice | /encapsulation/nameMangling-privateFeils.py | UTF-8 | 962 | 4.75 | 5 | [] | no_license | '''Encapsulation: Name Mangling and Private Feilds '''
'''Private Feilds: To make the initialization to private variable so that we can not access it directly but we can access it from function and make use of that variable and also through "name mangling"'''
'''Name Mangling: It is th method to access the variables in private feilds'''
class Employee:
def __init__(self):
#used __(underscore-before variable name) to make the variables private
self.__name="Josh"
self.__id=7
def display(self):
#accessing private feild variable by using them in a method
self.__name='Kash'
print(self.__name)
print(self.__id)
emp=Employee()
print('"Traditional Method"')
emp.display() #calling display (method) to show the private feilds variable
print()
print('"Name Mangling"')
print(emp._Employee__name) #accessing through Name Mangling
print(emp._Employee__id) #accessing through Name Mangling | true |
963e02cb026c38a49cbd4609a8959f7196564fbd | Python | frigid-sll/python | /python实习代码/_1one_month/15/last.py | UTF-8 | 257 | 2.9375 | 3 | [] | no_license | class A(object):
b=2
def __init__(self):
self.a=1
@classmethod
def q(cls):
cls.b=3
@staticmethod
def w():
A.b=3
A.a=4
z=A()
print(A.b)
z.q()
print(A.b)
print(z.a)
z.w()
print(z.b)
print(z.a)
print(A.a) | true |
dd9ef8ec2f5ce4c87a96da8c5388e0e77d4d0e88 | Python | suhaibani/WS-Evaluation | /evaluator.py | UTF-8 | 1,904 | 2.75 | 3 | [] | no_license | # coding: utf-8
import numpy as np
from collections import defaultdict
from Queue import PriorityQueue
import sys
from scipy import spatial
from scipy import stats
def load_benchmark(filename, all_words):
scores = {}
for line in open(filename):
w1, w2, score = line.strip().split()
w1 = w1.lower()
w2 = w2.lower()
all_words.add(w1)
all_words.add(w2)
scores[(w1,w2)]=float(score)
return scores
def extract_vectors(corpus_path, all_words):
vectors = {}
for line in open(corpus_path):
first_space = line.find(' ')
word = line[:first_space]
if word in all_words:
data = line.strip().split()
vectors[word] = np.array(map(float, data[1:]))
vectors[word] /= np.linalg.norm(vectors[word])
return vectors
def get(vectors, word):
if word in vectors:
return vectors[word]
else:
return np.zeros(300)
all_words = set()
benches = ["MC-pairs-EN-30.txt", "MEN-pairs-EN-3000.txt",
"RG-pairs-EN-65.txt", "RW-pairs-EN-2034.txt", "SCWS-pairs-EN-2023.txt",
"SimLex-pairs-EN.txt", "WS-pairs-EN-353.txt"]
for bench in benches:
load_benchmark("sim_benchs/" + bench, all_words)
#change the vector filename here
#vectors = extract_vectors("vectors.my", all_words)
def evaluate(vectors):
for bench in benches:
#change the folder with benchmarks here
bench1 = load_benchmark("sim_benchs/" + bench, all_words)
gold_val = []
my_val = []
for (w1,w2), sc in bench1.iteritems():
#if w1 in vectors and w2 in vectors:
gold_val.append(sc)
scor = get(vectors, w1).dot(get(vectors, w2))
my_val.append(scor)
print bench, stats.spearmanr(gold_val, my_val)[0]
#change vectors file
evaluate(extract_vectors(sys.argv[1], all_words))
| true |
21d87fe80ea20ee1546936ec8b9ad7a1940e5aed | Python | touchstoneanalytics/Biterm_Topic_Modeling_BigQuery_skashif | /Biterm_Topic_Modeling_BigQuery/src/extract_tweets.py | UTF-8 | 1,653 | 3.359375 | 3 | [] | no_license | """ Code to extract tweets for a particular company """
import os
import sys
import pandas as pd
class Tweets_Extractor():
def __init__(self):
"""
Checking whether the tweet file exists or not
"""
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.file_path = os.path.join(self.dir_path, "../input/tweets.csv")
if not os.path.isfile(self.file_path):
print("Input File {} doesn't exist".format(self.file_path))
sys.exit()
#df.to_csv(os.path.join(self.dir_path, '../input/headlines.txt'), index=False)
def extract_tweets(self, company_twitter_id):
""" Function to get tweets for a particular company
Returns:
DataFrame: containing the tweets for that particular company
"""
tweets_df = pd.read_csv(self.file_path)
print('Total # of tweets: {}'.format(len(tweets_df)))
print(tweets_df.dtypes)
inbound_tweets = tweets_df.loc[(tweets_df['inbound'] == True) & (tweets_df['in_response_to_tweet_id'].isnull())]
self.relevant_tweets = inbound_tweets.loc[inbound_tweets['text'].str.contains(company_twitter_id)]
print('Relevant Number of Tweets: {}'.format(len(self.relevant_tweets)))
def save_tweets(self, output_file_name):
self.relevant_tweets.to_csv(os.path.join(self.dir_path, '../input/' + output_file_name), index=False)
if __name__ == "__main__":
tw = Tweets_Extractor()
tw.extract_tweets('@AppleSupport')
tw.save_tweets('apple_complaints.csv')
#tw.extract_tweets('@AmazonHelp')
#tw.save_tweets('amazon.csv') | true |
a678a7b634bfc52c6aebdc6915bd28e84164c510 | Python | mccarvik/python_for_finance | /dx/portfolio.py | UTF-8 | 13,750 | 3.03125 | 3 | [] | no_license | import math
from .frame import *
import scipy.optimize as sco
import scipy.interpolate as sci
from pandas_datareader import data as web
class mean_variance_portfolio(object):
'''
Class to implement the mean variance portfolio theory of Markowitz
'''
def __init__(self, name, mar_env):
self.name = name
try:
self.symbols = mar_env.get_list('symbols')
self.start_date = mar_env.pricing_date
except:
raise ValueError('Error parsing market environment.')
self.number_of_assets = len(self.symbols)
try:
self.final_date = mar_env.get_constant('final date')
except:
self.final_date = dt.date.today()
try:
self.source = mar_env.get_constant('source')
except:
self.source = 'google'
try:
self.weights = mar_env.get_constant('weights')
except:
self.weights = np.ones(self.number_of_assets, 'float')
self.weights /= self.number_of_assets
try:
weights_sum = sum(self.weights)
except:
msg = 'Weights must be an iterable of numbers.'
raise TypeError(msg)
if round(weights_sum, 6) != 1:
raise ValueError('Sum of weights must be one.')
if len(self.weights) != self.number_of_assets:
msg = 'Expected %s weights, got %s'
raise ValueError(msg % (self.number_of_assets,
len(self.weights)))
self.load_data()
self.make_raw_stats()
self.apply_weights()
def __str__(self):
string = 'Portfolio %s \n' % self.name
string += len(string) * '-' + '\n'
string += 'return %10.3f\n' % self.portfolio_return
string += 'volatility %10.3f\n' % math.sqrt(self.variance)
string += 'Sharpe ratio %10.3f\n' % (self.portfolio_return /
math.sqrt(self.variance))
string += '\n'
string += 'Positions\n'
string += 'symbol | weight | ret. con. \n'
string += '--------------------------- \n'
for i in range(len(self.symbols)):
string += '{:<6} | {:6.3f} | {:9.3f} \n'.format(
self.symbols[i], self.weights[i], self.mean_returns[i])
return string
def load_data(self):
'''
Loads asset values from the web.
'''
self.data = pd.DataFrame()
# if self.source == 'yahoo' or self.source == 'google':
for sym in self.symbols:
try:
self.data[sym] = web.DataReader(sym, self.source,
self.start_date,
self.final_date)['Close']
except:
print('Can not find data for source %s and symbol %s.'
% (self.source, sym))
print('Will try other source.')
try:
if self.source == 'yahoo':
source = 'google'
if self.source == 'google':
source = 'yahoo'
self.data[sym] = web.DataReader(sym, source,
self.start_date,
self.final_date)['Close']
except:
msg = 'Can not find data for source %s and symbol %s'
raise IOError(msg % (source, sym))
self.data.columns = self.symbols
# To do: add more sources
def make_raw_stats(self):
'''
Computes returns and variances
'''
self.raw_returns = np.log(self.data / self.data.shift(1))
self.mean_raw_return = self.raw_returns.mean()
self.raw_covariance = self.raw_returns.cov()
def apply_weights(self):
'''
Applies weights to the raw returns and covariances
'''
self.returns = self.raw_returns * self.weights
self.mean_returns = self.returns.mean() * 252
self.portfolio_return = np.sum(self.mean_returns)
self.variance = np.dot(self.weights.T,
np.dot(self.raw_covariance * 252, self.weights))
def test_weights(self, weights):
'''
Returns the theoretical portfolio return, portfolio volatility
and Sharpe ratio for given weights.
Please note:
The method does not set the weight.
Parameters
==========
weight: iterable,
the weights of the portfolio content.
'''
weights = np.array(weights)
portfolio_return = np.sum(self.raw_returns.mean() * weights) * 252
portfolio_vol = math.sqrt(
np.dot(weights.T, np.dot(self.raw_covariance * 252, weights)))
return np.array([portfolio_return, portfolio_vol,
portfolio_return / portfolio_vol])
def set_weights(self, weights):
'''
Sets new weights
Parameters
==========
weights: interable
new set of weights
'''
try:
weights = np.array(weights)
weights_sum = sum(weights).round(3)
except:
msg = 'weights must be an interable of numbers'
raise TypeError(msg)
if weights_sum != 1:
raise ValueError('Sum of weights must be one')
if len(weights) != self.number_of_assets:
msg = 'Expected %s weights, got %s'
raise ValueError(msg % (self.number_of_assets,
len(weights)))
self.weights = weights
self.apply_weights()
def get_weights(self):
'''
Returns a dictionary with entries symbol:weights
'''
d = dict()
for i in range(len(self.symbols)):
d[self.symbols[i]] = self.weights[i]
return d
def get_portfolio_return(self):
'''
Returns the average return of the weighted portfolio
'''
return self.portfolio_return
def get_portfolio_variance(self):
'''
Returns the average variance of the weighted portfolio
'''
return self.variance
def get_volatility(self):
'''
Returns the average volatility of the portfolio
'''
return math.sqrt(self.variance)
def optimize(self, target, constraint=None, constraint_type='Exact'):
'''
Optimize the weights of the portfolio according to the value of the
string 'target'
Parameters
==========
target: string
one of:
Sharpe: maximizes the ratio return/volatility
Vol: minimizes the expected volatility
Return: maximizes the expected return
constraint: number
only for target options 'Vol' and 'Return'.
For target option 'Return', the function tries to optimize
the expected return given the constraint on the volatility.
For target option 'Vol', the optimization returns the minimum
volatility given the constraint for the expected return.
If constraint is None (default), the optimization is made
without concerning the other value.
constraint_type: string, one of 'Exact' or 'Bound'
only relevant if constraint is not None.
For 'Exact' (default) the value of the constraint must be hit
(if possible), for 'Bound', constraint is only the upper/lower
bound of the volatility or return resp.
'''
weights = self.get_optimal_weights(target, constraint, constraint_type)
if weights is not False:
self.set_weights(weights)
else:
raise ValueError('Optimization failed.')
def get_capital_market_line(self, riskless_asset):
'''
Returns the capital market line as a lambda function and
the coordinates of the intersection between the captal market
line and the efficient frontier
Parameters
==========
riskless_asset: float
the return of the riskless asset
'''
x, y = self.get_efficient_frontier(100)
if len(x) == 1:
raise ValueError('Efficient Frontier seems to be constant.')
f_eff = sci.UnivariateSpline(x, y, s=0)
f_eff_der = f_eff.derivative(1)
def tangent(x, rl=riskless_asset):
return f_eff_der(x) * x / (f_eff(x) - rl) - 1
left_start = x[0]
right_start = x[-1]
left, right = self.search_sign_changing(
left_start, right_start, tangent, right_start - left_start)
if left == 0 and right == 0:
raise ValueError('Can not find tangent.')
zero_x = sco.brentq(tangent, left, right)
opt_return = f_eff(zero_x)
cpl = lambda x: f_eff_der(zero_x) * x + riskless_asset
return cpl, zero_x, float(opt_return)
def get_efficient_frontier(self, n):
'''
Returns the efficient frontier in form of lists containing the x and y
coordinates of points of the frontier.
Parameters
==========
n : int >= 3
number of points
'''
if type(n) is not int:
raise TypeError('n must be an int')
if n < 3:
raise ValueError('n must be at least 3')
min_vol_weights = self.get_optimal_weights('Vol')
min_vol = self.test_weights(min_vol_weights)[1]
min_return_weights = self.get_optimal_weights('Return',
constraint=min_vol)
min_return = self.test_weights(min_return_weights)[0]
max_return_weights = self.get_optimal_weights('Return')
max_return = self.test_weights(max_return_weights)[0]
delta = (max_return - min_return) / (n - 1)
if delta > 0:
returns = np.arange(min_return, max_return + delta, delta)
vols = list()
rets = list()
for r in returns:
w = self.get_optimal_weights('Vol', constraint=r,
constraint_type='Exact')
if w is not False:
result = self.test_weights(w)[:2]
rets.append(result[0])
vols.append(result[1])
else:
rets = [max_return, ]
vols = [min_vol, ]
return np.array(vols), np.array(rets)
def get_optimal_weights(self, target, constraint=None,
constraint_type='Exact'):
if target == 'Sharpe':
def optimize_function(weights):
return -self.test_weights(weights)[2]
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
elif target == 'Vol':
def optimize_function(weights):
return self.test_weights(weights)[1]
cons = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}, ]
if constraint is not None:
d = dict()
if constraint_type == 'Exact':
d['type'] = 'eq'
d['fun'] = lambda x: self.test_weights(x)[0] - constraint
cons.append(d)
elif constraint_type == 'Bound':
d['type'] = 'ineq'
d['fun'] = lambda x: self.test_weights(x)[0] - constraint
cons.append(d)
else:
msg = 'Value for constraint_type must be either '
msg += 'Exact or Bound, not %s' % constraint_type
raise ValueError(msg)
elif target == 'Return':
def optimize_function(weights):
return -self.test_weights(weights)[0]
cons = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}, ]
if constraint is not None:
d = dict()
if constraint_type == 'Exact':
d['type'] = 'eq'
d['fun'] = lambda x: self.test_weights(x)[1] - constraint
cons.append(d)
elif constraint_type == 'Bound':
d['type'] = 'ineq'
d['fun'] = lambda x: constraint - self.test_weights(x)[1]
cons.append(d)
else:
msg = 'Value for constraint_type must be either '
msg += 'Exact or Bound, not %s' % constraint_type
raise ValueError(msg)
else:
raise ValueError('Unknown target %s' % target)
bounds = tuple((0, 1) for x in range(self.number_of_assets))
start = self.number_of_assets * [1. / self.number_of_assets, ]
result = sco.minimize(optimize_function, start,
method='SLSQP', bounds=bounds, constraints=cons)
if bool(result['success']) is True:
new_weights = result['x'].round(6)
return new_weights
else:
return False
def search_sign_changing(self, l, r, f, d):
if d < 0.000001:
return (0, 0)
for x in np.arange(l, r + d, d):
if f(l) * f(x) < 0:
ret = (x - d, x)
return ret
ret = self.search_sign_changing(l, r, f, d / 2.)
return ret
if __name__ == '__main__':
ma = market_environment('ma', dt.date(2010, 1, 1))
ma.add_constant('symbols', ['AAPL', 'GOOG', 'MSFT', 'DB'])
ma.add_constant('final date', dt.date(2014, 3, 1))
port = mean_variance_portfolio('My Portfolio', ma) | true |
f82175b3af4f6fa5350d4387b8a392afeb2c39d0 | Python | leehyehwan/Learning_Python | /gorvernment_newpost_bot/new_post_bot.py | UTF-8 | 8,139 | 2.75 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
from slacker import Slacker
from private.tokens import slack as slack_token
def post_bot1():
# 고용노동부
# 제목 확인을 위한 파서
req = requests.get('http://www.moel.go.kr/news/notice/noticeList.do')
req.encoding = 'utf-8'
html = req.text
soup = BeautifulSoup(html, 'html.parser')
posts = soup.select('a.b_tit')
post_list = []
post_title_list = []
before_title_list = []
# 슬랙 토큰
slack = Slacker(slack_token)
attachments_dict = dict()
# 내용을 깨끗하게 확인하기 위해 공백을 제거
for i in range(0,10):
post = str(posts[i])
post = post.replace('\n', '')
post = post.replace('\r', '')
post = post.replace('\t', '')
post_list.append(post)
# 기존의 10개 리스트를 불러옴
with open('latest.txt', 'r', encoding='utf-8') as f_read:
befores = f_read.readlines()
# 기존의 10개와 실행당시의 10개를 비교할 수 있도록 타이틀 분류
for i in range(0,10):
post_title = post_list[i][251:-12]
post_title = post_title.strip('\n')
post_title = post_title.strip('</a>')
post_title_list.append(post_title)
before_title = befores[i][251:-13]
before_title = before_title.strip('\n')
before_title = before_title.strip('</a>')
before_title_list.append(before_title)
# 중복 게시글 확인 후 중복되지 않는 것은 슬랙으로 알림을 보냅니다.
for i in range(0,10):
if before_title_list[0] != post_title_list[i] and before_title_list[1] != post_title_list[i] \
and before_title_list[2] != post_title_list[i] and before_title_list[3] != post_title_list[i] \
and before_title_list[4] != post_title_list[i] and before_title_list[5] != post_title_list[i] \
and before_title_list[6] != post_title_list[i] and before_title_list[7] != post_title_list[i] \
and before_title_list[8] != post_title_list[i] and before_title_list[9] != post_title_list[i] :
URL = post_list[i][23:175]
TITLE = post_list[i][251:-12]
# slack bot setting
attachments_dict['pretext'] = "고용노동부 사이트에 새로운 공지사항이 등록되었습니다. :mememe:"
attachments_dict['title'] = TITLE
attachments_dict['title_link'] = "http://www.moel.go.kr"+URL
# slack bot에서 내용도 확인할 수 있도록 한번 더 parser합니다.
req2 = requests.get("http://www.moel.go.kr"+URL)
req2.encoding = 'utf-8'
html2 = req2.text
soup2 = BeautifulSoup(html2, 'html.parser')
post_detail = soup2.select('div.b_content')
post_detail = str(post_detail.pop(0))
post_detail = post_detail.strip('<div class="b_content">\n')
# 신규 내용을 new_post.txt에 저장합니다.
with open('new_post.txt', 'w', encoding='utf-8') as f_write2:
f_write2.write(post_detail)
# new_post.txt에 들어있는 내용을 표현하기 예쁘게 가공합니다.(공백제거, 태그제거 등)
post_detail_inrow =[]
post_datail_toslack = ''
with open('new_post.txt', 'r', encoding='utf-8') as f_read2:
rows = f_read2.readlines()
for row in rows :
row = row.strip()
row = row.strip('<br/>')
row = row.strip('<div style="text-align: center;">')
row = row.strip('<div style="text-align: right;">')
row = row.strip('<div>')
row = row.strip('</div>')
row = row.strip('</')
row = row.strip('<strong>')
row = row.strip('</strong>')
row = row.strip('ong>')
row = row.strip('</stro')
if row != '':
post_detail_inrow.append(row)
# 해당 내용이 마지막줄이 아니면 개행하고, 마지막줄이면 넘어갑니다.
count_inrow = len(post_detail_inrow)
for idx, row in enumerate(post_detail_inrow) :
post_datail_toslack = post_datail_toslack+str(row)
if idx < count_inrow-1 :
post_datail_toslack = post_datail_toslack+'\n'
attachments_dict['fallback'] = "for_ government new post_to_ slack bot"
attachments_dict['text'] = post_datail_toslack
attachments_dict['mrkdwn_in'] = ["text", "pretext"]
attachments = [attachments_dict]
# slack bot으로 알림을 보냅니다.
slack.chat.post_message(channel='# 98_알림_정부정책', attachments=attachments, as_user=True)
print(TITLE)
# 이번에 가져온 새로운 10개의 내용을 저장합니다.
with open('latest.txt', 'w', encoding='utf-8') as f_write:
for i in range(0,10):
f_write.write(post_list[i])
f_write.write('\n')
print('new_post_bot Done')
def post_bot2():
# 기업마당당
# 제목 확인을 위한 파서
req = requests.get('http://www.bizinfo.go.kr/see/seea/selectSEEA100.do')
req.encoding = 'utf-8'
html = req.text
soup = BeautifulSoup(html, 'html.parser')
posts = soup.select('td.txtAgL')
post_list = []
post_title_list = []
before_list = []
for i in posts:
post_title_list.append(i.text)
# 슬랙 토큰
slack = Slacker(slack_token)
attachments_dict = dict()
# 내용을 깨끗하게 확인하기 위해 공백을 제거
for i in range(0,20):
post = str(post_title_list[i])
post = post.replace('\n', '')
post = post.replace('\r', '')
post = post.replace('\t', '')
post = post.rstrip()
post_list.append(post)
# 기존의 20개 리스트를 불러옴
with open('latest2.txt', 'r', encoding='utf-8') as f_read:
befores = f_read.readlines()
for i in range(0,20):
before = str(befores[i])
before = before.replace(' \n', '')
before_list.append(before)
# 중복 게시글 확인 후 중복되지 않는 것은 슬랙으로 알림을 보냅니다.
for i in range(0,20):
if before_list[0] != post_list[i] and before_list[1] != post_list[i] \
and before_list[2] != post_list[i] and before_list[3] != post_list[i] \
and before_list[4] != post_list[i] and before_list[5] != post_list[i] \
and before_list[6] != post_list[i] and before_list[7] != post_list[i] \
and before_list[8] != post_list[i] and before_list[9] != post_list[i] \
and before_list[10] != post_list[i] and before_list[11] != post_list[i] \
and before_list[12] != post_list[i] and before_list[13] != post_list[i] \
and before_list[14] != post_list[i] and before_list[15] != post_list[i] \
and before_list[16] != post_list[i] and before_list[17] != post_list[i] \
and before_list[18] != post_list[i] and before_list[19] != post_list[i]:
# slack bot setting
attachments_dict['pretext'] = "기업마당 사이트에 새로운 공지사항이 등록되었습니다. :mememe:"
attachments_dict['title'] = post_list[i]
attachments_dict['title_link'] = "http://www.bizinfo.go.kr/see/seea/selectSEEA100.do"
attachments = [attachments_dict]
# slack bot으로 알림을 보냅니다.
slack.chat.post_message(channel='# 98_알림_정부정책', attachments=attachments, as_user=True)
print(post_list[i])
# 이번에 가져온 새로운 20개의 내용을 저장합니다.
with open('latest2.txt', 'w', encoding='utf-8') as f_write:
for i in range(0,20):
f_write.write(post_list[i])
f_write.write('\n')
print('new_post_bot2 Done') | true |
fc2697965e4049777392a4c2f67cb92528309b03 | Python | kiyoshiWK/nlp_100_python | /007/nlp100_007.py | UTF-8 | 463 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
def template(args_list):
if len(args_list) != 3:
sys.exit('ERROR: num of args is not 3. please set 3 args.')
else:
return str(args_list[0]) + '時の' + str(args_list[1]) + 'は' + str(args_list[2])
def main(args_list):
print(template(args_list))
if __name__ == '__main__':
args_list = []
args_list.append(12)
args_list.append('気温')
args_list.append(22.4)
main(args_list) | true |
eeae148c3681c07a4c3983297847bb7a250cd1b5 | Python | mberkay/computer-graphics | /lights/light.py | UTF-8 | 1,415 | 2.734375 | 3 | [] | no_license | # CENG 487 Assignment5 by
# Mustafa Berkay Özkan
# StudentId: 230201005
# 12 2019
import math
from OpenGL.GL import *
from transform import Transform
from vec3d import Vec3d
class Light:
count = 0 # Static light counter
def __init__(self, ambient, diffuse, specular, transform: Transform):
if Light.count is 7:
raise Exception("Can not create more than 8 light")
else:
self.gl_light = getattr(OpenGL.GL, f"GL_LIGHT{Light.count}")
Light.count += 1
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.transform = transform
self.is_enabled = True
self.animation_count = 0
def animate(self):
# self.transform.position = Vec3d(10 * math.cos(time.time()), 3, 10 * math.sin(time.time()))
self.transform.position = Vec3d(10 * math.cos(self.animation_count), 3, 10 * math.sin(self.animation_count))
self.animation_count = (self.animation_count + 0.005) % 360
self.transform.look_at(Vec3d(0, 0, 0))
# self.transform.rotate(0, time.time(), 0)
@property
def direction(self):
return self.transform.forward
def enable(self):
glEnable(self.gl_light)
def disable(self):
glDisable(self.gl_light)
# TODO Maybe make Light Abstract Using abc library
def draw(self):
pass
def light(self):
pass
| true |
0ef236b94fa785e36fd5bcfee6891610e2f0d9bb | Python | sssilvar/funny_coding | /vtk/00_cube_render.py | UTF-8 | 1,054 | 3.078125 | 3 | [] | no_license | import vtk
from vtk.util.colors import azure
# 1. Source
# Generate polygon data for a cube: cube
cube = vtk.vtkCubeSource()
# 2. Mapper
# Create a mapper for the cube data: cube_mapper
cube_mapper = vtk.vtkPolyDataMapper()
cube_mapper.SetInputConnection(cube.GetOutputPort())
# 3. Actor
# Connect the mapper to an actor: cube_actor
cube_actor = vtk.vtkActor()
cube_actor.SetMapper(cube_mapper)
cube_actor.GetProperty().SetColor(azure)
# 4. Renderer
# Create a renderer and add the cube actor to it
renderer = vtk.vtkRenderer()
renderer.SetBackground(0.1, 0.2, 0.4)
renderer.AddActor(cube_actor)
# 5. Render Window
# Create a render window
render_window = vtk.vtkRenderWindow()
render_window.SetWindowName('Simple VTK scene')
render_window.SetSize(400, 400)
render_window.AddRenderer(renderer)
# 6. Interactor
# Create an interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(render_window)
# Initialize de interactor and start the
# rendering loop
interactor.Initialize()
render_window.Render()
interactor.Start()
| true |
993641ac55c9cae5425a108f02b8be99a1c121a4 | Python | marcmir70/Python3 | /PythonPro PythonBirds+Django/novo/exercicios/Pkg2.1.10_Atributo_Complexo/pessoa..py | UTF-8 | 722 | 3.765625 | 4 | [] | no_license | class Pessoa:
def __init__(self, *filhos, nome = None, idade=35):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá, {self.nome} -id:{id(self)}'
if __name__ == '__main__':
mell = Pessoa(nome='Mell', idade=26)
thu = Pessoa(nome='Thu', idade=21)
marcelo = Pessoa(mell, thu, nome='Marcelo') # mell e thu filhos de marcelo
print(Pessoa.cumprimentar(marcelo))
print(id(marcelo))
print(marcelo.cumprimentar())
print(marcelo.nome)
print(marcelo.idade)
print('filhos...')
for filho in marcelo.filhos:
print(f' - id:{id(filho)}, nome:{filho.nome}, {filho.idade} anos')
| true |
349ce64b6e4a9543126731e14f5b5f221abbe947 | Python | jesadrperez/Fundamentals-of-Computing | /An Introduction to Interactive Programming in Python/Mini-project #6 - Blackjack.py | UTF-8 | 6,738 | 3.3125 | 3 | [] | no_license | # Mini-project #6 - Blackjack
#http://www.codeskulptor.org/#user42_yUWSsWhf55_12.py
import simplegui
import random
# load card sprite - 936x384 - source: jfitz.com
CARD_SIZE = (72, 96)
CARD_CENTER = (36, 48)
card_images = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png")
CARD_BACK_SIZE = (72, 96)
CARD_BACK_CENTER = (36, 48)
card_back = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png")
# initialize some useful global variables
in_play = False
outcome = "Test"
score = [0, 0]
# define globals for cards
SUITS = ('C', 'S', 'H', 'D')
RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
# define card class
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print "Invalid card: ", suit, rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def draw(self, canvas, pos):
card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),
CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))
canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)
class Hand:
def __init__(self):
self.cards = [] # create Hand object
def __str__(self):
s = 'There are ' + str(len(self.cards)) + ' cards in the hand: '
for card in self.cards:
s += str(card) + ' '
return s # return a string representation of a hand
def add_card(self, card):
self.cards.append(card) # add a card object to a hand
def get_value(self):
# count aces as 1, if the hand has an ace, then add 10 to hand value if it doesn't bust
# compute the value of the hand, see Blackjack video
NumAces = 0
total = 0
for card in self.cards:
if (card.get_rank() == 'A'):
total += 11
NumAces += 1
else:
total += VALUES[card.get_rank()]
while NumAces > 0:
if total > 21:
total -= 10
NumAces -= 1
return total
def draw(self, canvas, pos):
row1 = 0
row2 = 0
for card in self.cards:
if row1 < 6:
card.draw(canvas, [pos[0]+row1*CARD_SIZE[0], pos[1]])
else:
card.draw(canvas, [pos[0]+row2*CARD_SIZE[0],
pos[1]+CARD_SIZE[1]])
row2 += 1
row1 += 1
# define deck class
class Deck:
def __init__(self):
# create a Deck object
self.cards = []
for suit in SUITS:
for rank in RANKS:
card = Card(suit, rank)
self.cards.append(card)
def shuffle(self):
# shuffle the deck
random.shuffle(self.cards)
def deal_card(self):
# deal a card object from the deck
card = self.cards[-1]
self.cards.pop(-1)
return card
def __str__(self):
# return a string representing the deck
s = 'Deck contains ' + str(len(self.cards)) + " cards: "
for card in self.cards:
s += str(card) + ' '
return s # return a string representation of a hand
#define event handlers for buttons
def deal():
global outcome, in_play, deck, player_hand, dealer_hand
deck = Deck()
deck.shuffle()
player_hand = Hand()
dealer_hand = Hand()
player_hand.add_card(deck.deal_card())
player_hand.add_card(deck.deal_card())
dealer_hand.add_card(deck.deal_card())
dealer_hand.add_card(deck.deal_card())
if in_play:
score[1] += 1
outcome = 'You lose!'
in_play = True
outcome = "Hit or stand?"
def hit():
global in_play, outcome, score
# if the hand is in play, hit the player
if in_play and (player_hand.get_value() < 22):
player_hand.add_card(deck.deal_card())
if (player_hand.get_value() > 21):
outcome = 'You busted! New deal?'
in_play = False
score[1] += 1
def stand():
# if hand is in play, repeatedly hit dealer until his hand has value 17 or more
# assign a message to outcome, update in_play and score
global in_play, outcome, score
print ''
if in_play:
while dealer_hand.get_value() < 17:
dealer_hand.add_card(deck.deal_card())
if dealer_hand.get_value() > 21:
outcome = 'Dealer busted! New deal?'
score[0] += 1
else:
if dealer_hand.get_value() < player_hand.get_value():
outcome = 'You won! New deal?'
score[0] += 1
else:
outcome = 'You lose! New deal?'
score[1] += 1
in_play = False
else:
outcome = 'You busted! New Deal?'
score[1] += 1
# draw handler
def draw(canvas):
dealer_hand.draw(canvas, [CARD_SIZE[0], 0])
player_hand.draw(canvas, [CARD_SIZE[0], 400])
idx = 0
for letter in 'BLACKJACK':
if idx < 5:
canvas.draw_text(letter, (5, 75+idx*60), 75, 'Black')
else:
canvas.draw_text(letter, (5, 75+idx*60), 75, 'Red')
idx += 1
canvas.draw_text(outcome, (75, 300), 50, 'Black')
if in_play:
canvas.draw_image(card_back, CARD_BACK_CENTER, CARD_BACK_SIZE,
(CARD_BACK_CENTER[0]+CARD_SIZE[0], CARD_BACK_CENTER[1]), CARD_BACK_SIZE)
canvas.draw_text('Won: '+ str(score[0]), (500, 450), 30, 'Black')
canvas.draw_text('Lost: '+ str(score[1]), (500, 480), 30, 'Black')
# initialization frame
frame = simplegui.create_frame("Blackjack", 600, 600)
frame.set_canvas_background("Green")
#create buttons and canvas callback
frame.add_button("Deal", deal, 200)
frame.add_button("Hit", hit, 200)
frame.add_button("Stand", stand, 200)
frame.set_draw_handler(draw)
# get things rolling
deal()
frame.start() | true |
7bd69f76ff69975f5c3c7911cf05477ea4222a59 | Python | Rickmaru/Jinryu_analysis | /s3_ul.py | UTF-8 | 2,509 | 2.546875 | 3 | [] | no_license | #author;R.Kunimoto, TAKENAKA co.
#coding:utf-8
import boto3
import os
import re
import time
import sys
from uhurusys_json_trans import main_jsontrans
seiki_csv = re.compile("TRJ.+csv")
# pathは人流csvデータが保存されるディレクトリのパス、jpathはuhuruシステム用のjsonを掃出したいディレクトリのパスを指定してください。
# エクスプローラからコピーしたパスではディレクトリ間の接続が「\」で表現されていますが、これを「\\」にしてください。
# このプログラムを稼働させている間、常に最新の人流csvデータおよびjsonファイルをAWSへアップロード・更新し続けます。
# jsonファイルの自動更新を止めたい場合、以下に指示が書いてありますのでそれに従ってください。
# jsonの自動更新のために、このプログラムは別のuhurusys_json_trans.pyが必要になりますので、
# 当プログラムとuhurusys_json_trans.pyは同じディレクトリ内に保存するようにしてください。
path = "C:\\Program Files\\LaserRadar\\LOG\\CsvLog\\TRJ(ActCtl)"
pathj = 'C:\Program Files\LaserRadar\LOG\\CsvLog'
s3 = boto3.resource('s3')
i = 0
while True:
dirl = os.listdir(path)
time.sleep(1)
dirltemp = os.listdir(path)
sa = list(set(dirltemp) -set(dirl))
if len(sa) == 1:
sys.stdout.write("\ryes")
sys.stdout.flush()
fn = dirl[-1]
print(path +"\\" +fn.replace("'",""))
data = open(path +"\\" +fn.replace("'",""), 'rb')
# ここでバケット名、ディレクトリパスを入力すると任意の自動アップロード場所を指定できます。
s3.Bucket('yahoo.lodge').put_object(Key='rowdata/'+fn, Body=data)
data.close()
# jsonアップロードを止める場合、↓をコメントアウトしてください
main_jsontrans(fn,path,pathj)
dataj = open(pathj +"\\" +"trajects.json", 'rb')
# ここでバケット名、ディレクトリパスを入力すると任意の自動アップロード場所を指定できます。
s3.Bucket('yahoo.lodge').put_object(Key='statics/trajects.json', Body=dataj)
dataj.close()
# jsonアップロードを止める場合、↑をコメントアウトしてください
else:
i +=1
sys.stdout.write("\r %d" %i)
sys.stdout.flush()
print("fin")
| true |
cf14392e41bf45d3ff0adacba8f4ec7475e80e26 | Python | SunJingpeng6/MachineLearning | /kd_tree.py | UTF-8 | 4,135 | 3.609375 | 4 | [] | no_license | import numpy as np
# 最近邻 kd tree
class KdNode():
def __init__(self, dom_element, left=None, right=None):
# n维向量节点(n维空间中的一个样本点)
self.dom_element = dom_element
# 该结点分割超平面左子空间构成的kd-tree
self.left = left
# 该结点分割超平面右子空间构成的kd-tree
self.right = right
class KdTree():
"""
最近邻搜索 kd tree
具体推导过程见李航《统计学习方法》第3章 k近邻法 3.3 k近邻法的实现:kd树
Parameters:
-----------------------
dataset 数据集
------------------------
返回距离测试样本 x 最近的点
tips:
kd树是二叉树,表示对k维空间的一个划分,其每个结
点对应于k维空间划分中的一个超矩形区域。利用kd树可以省去对大部分数据点的搜索,
从而减少搜索的计算量。
"""
def __init__(self, dataset):
self.root = self._create(dataset)
# 递归的方式创建 kdNode
def _create(self, dataset, depth=0):
if len(dataset):
# m 样本数, n 数据维度
m, n = np.shape(dataset)
# index 的中位数
mid_index = m // 2
# axis 分割数据集的维度
axis = depth % n
# 在axis维上对数据进行排序
sorted_dataset = self._sort(dataset, axis)
# 递归的创建节点
node = KdNode(sorted_dataset[mid_index])
left_dataset = sorted_dataset[:mid_index]
right_dataset = sorted_dataset[mid_index+1:]
node.left = self._create(left_dataset, depth+1)
node.right = self._create(right_dataset, depth+1)
return node
# 在axis维上对数据进行排序
def _sort(self, dataset, axis):
m, n = np.shape(dataset)
index = np.argsort(dataset[:, axis])
sort_dataset = dataset[index]
return sort_dataset
# 递归的打印节点
def print_tree(self, node=None):
if not node:
node = self.root
print(node.dom_element)
if node.left:
self.print_tree(node.left)
if node.right:
self.print_tree(node.right)
# 搜索样本中距离 x 最近的点
def search(self, x):
# 初始化
self.nearest_point = None
self.nearest_value = float('inf')
# 搜索
self._travel(self.root)
return self.nearest_point
def _travel(self, node, depth=0):
if node != None:
n = len(x)
axis = depth % n
# 递归的搜索距离x 最近的叶子节点
if x[axis] < node.dom_element[axis]:
self._travel(node.left, depth+1)
else:
self._travel(node.right, depth+1)
# 由叶节点向上回溯, 寻找距离 X 最近的样本
dist_node_x = self._dist(x, node.dom_element)
if self.nearest_point is None:
self.nearest_point = node.dom_element
self.nearest_value = dist_node_x
elif (self.nearest_value > dist_node_x):
self.nearest_point = node.dom_element
self.nearest_value = dist_node_x
print(node.dom_element, depth, self.nearest_value, node.dom_element[axis], x[axis])
# 判断是否需要节点的其他区域寻找
if (abs(x[axis]- node.dom_element[axis]) < self.nearest_value):
if x[axis] < node.dom_element[axis]:
self._travel(node.right, depth+1)
else:
self._travel(node.left, depth+1)
def _dist(self, x1, x2):
return np.sqrt(((np.array(x1) - np.array(x2)) **2).sum())
if __name__ == '__main__':
dataset = [[2, 3],
[5, 4],
[9, 6],
[4, 7],
[8, 1],
[7, 2]]
dataset = np.array(dataset)
kdtree = KdTree(dataset)
kdtree.print_tree()
x = np.array([3, 4.5])
nearest_point = kdtree.search(x)
print(nearest_point)
| true |
1c97996db37eb8099a982b09736bd49738dcdb11 | Python | SrinathAkkem/ATM | /login.py | UTF-8 | 778 | 3.03125 | 3 | [] | no_license | from menu2 import clear_screen, menu2
def login(acc_list):
login_id = input('Please enter your information (to return, click "Ctrl+C"). \n>>ID: ')
login_password = input('>>Password: ')
found = False
for account in acc_list:
if account[0] == login_id and account[2] == login_password:
found = True
clear_screen()
menu2(account)
break
else:
continue
if not found:
clear_screen()
print('Invalid credentials')
login(acc_list)
else:
acc_file = open('Accounts.txt', 'w')
print('Saving changes...')
for acc in acc_list:
for elements in acc:
acc_file.write("%s\t" % elements)
acc_file.write('\n')
| true |
6bb9b28f275c4b6fac50d0285545ed8ce1317241 | Python | iotTesting/led | /python code/led python/UdemyLedButton.py | UTF-8 | 1,093 | 2.90625 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
#Pin Defination
pwmPin =18
ledPin =23
butPin =17
#Brigness Control
duty =75
#setup GPIO
GPOI.setmode(GPIO.BCM)
GPIO.setup(ledPin, GPIO.OUT)
GPIO.setup(pwmPin, GPIO.OUT)
GPIO.setup(butPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#200 beeing the frequency in Hz
pwm = GPIO.PWM(pwmPin,200)
GPIO.output(ledPin,GPIO.LOW)
pwn.start(duty)
#################### STEUP #####################
try:
while 1:
# if butoon not presss do something
if GPIO.input(butPin):
# Blink Red
pwm.ChangeDutyCycle(duty)
#green
GPIO.output(ledPin , GPIO.LOW)
# if butoon presss
else:
#Blink Greeen
pwm.ChangeDutyCycle(duty)
GPIO.output(ledPin , GPIO.HIGH)
time.sleep(0.5)
#Blink Greeen red low
pwm.ChangeDutyCycle(100-duty)
GPIO.output(ledPin , GPIO.LOW)
time.sleep(0.5)
except KeyboardInterrupt:
pwm.stop()
GPIO.cleanup()
| true |
e4ffc31db05a5432481119c32384842c6a8989b3 | Python | jgpstuart/Kattis-Solutions | /busnumbers.py | UTF-8 | 590 | 3.1875 | 3 | [] | no_license | stops = int(input())
bus = [int(x) for x in input().split()]
answer = []
bus.sort()
z=0
i=0
while i < len(bus):
if i+1 == len(bus):
compare = bus[i]
else:
compare = bus[i+1]
if bus[i]+1 == compare:
z += 1
if bus[i]+1 != compare:
if z == 0:
answer.append(str(bus[i]))
if z == 1:
answer.append(str(bus[i-1]))
answer.append(str(bus[i]))
if z > 1:
answer.append(str(bus[i-z]) + "-" + str(bus[i]))
z = 0
i += 1
for j in range(len(answer)):
print(answer[j], end=" ") | true |
01c46bc9f88c9866560d0d5e715eec62e25096cf | Python | sun1638650145/RetinaNet | /RetinaNet/model/feature_pyramid_network.py | UTF-8 | 2,631 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | from tensorflow.keras import layers
from tensorflow.keras.activations import relu
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Model
def get_backbone():
"""获取ResNet50的骨架网络, 返回ResNet50提取的特征信息."""
backbone = ResNet50(include_top=False, input_shape=[None, None, 3])
c3_output, c4_output, c5_output = [
backbone.get_layer(layer_name).output
for layer_name in ['conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out']
]
return Model(inputs=backbone.inputs, outputs=[c3_output, c4_output, c5_output])
class FeaturePyramidNetwork(layers.Layer):
"""特征金字塔网络(基于ResNet50实现).
Reference:
- [Lin, T. Y. , et al., 2017](https://arxiv.org/abs/1612.03144v2)
"""
def __init__(self):
"""初始化特征金字塔网络."""
super(FeaturePyramidNetwork, self).__init__(name='FeaturePyramidNetwork')
self.backbone = get_backbone()
self.conv_c3_1x1 = layers.Conv2D(filters=256, kernel_size=1, strides=1, padding='same')
self.conv_c4_1x1 = layers.Conv2D(filters=256, kernel_size=1, strides=1, padding='same')
self.conv_c5_1x1 = layers.Conv2D(filters=256, kernel_size=1, strides=1, padding='same')
self.conv_p3_3x3 = layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='same')
self.conv_p4_3x3 = layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='same')
self.conv_p5_3x3 = layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='same')
self.conv_c6_3x3 = layers.Conv2D(filters=256, kernel_size=3, strides=2, padding='same')
self.conv_p6_3x3 = layers.Conv2D(filters=256, kernel_size=3, strides=2, padding='same')
self.upsample_2x = layers.UpSampling2D(size=2)
def call(self, inputs, training=False, **kwargs):
"""实例化特征金字塔网络.
Args:
inputs: tf.Tensor,
输入网络层.
training: bool, default=False,
网络是否可训练.
Returns:
特征金字塔的五个尺度输出.
"""
C3, C4, C5 = self.backbone(inputs, training=training)
P5 = self.conv_c5_1x1(C5)
P5 = self.conv_p5_3x3(P5)
P4 = self.conv_c4_1x1(C4)
P4 += self.upsample_2x(P5)
P4 = self.conv_p4_3x3(P4)
P3 = self.conv_c3_1x1(C3)
P3 += self.upsample_2x(P4)
P3 = self.conv_p3_3x3(P3)
P6 = self.conv_c6_3x3(C5)
P7 = self.conv_p6_3x3(relu(P6)) # 使用relu激活.
return P3, P4, P5, P6, P7
| true |
600939929846b57b5aa8543ff8a7a7774ed8827c | Python | jongchan-kim524/Python-Selenium | /create_100_kakao.py | UTF-8 | 2,417 | 2.984375 | 3 | [] | no_license | from selenium import webdriver
import time
import sys
import os
kakao_email = input('카카오 이메일: ')
kakao_pwd = input('카카오 비밀번호: ')
n = int(input('몇개나 만들까요?: '))
driver = webdriver.Chrome()
stg_dev = input('운영이면 0, stg면 1을, dev면 2를 눌러주세요')
if stg_dev == '0':
url = 'https://www.tripcody.com/itinerary/182592'
elif stg_dev =='1':
url = 'https://stg.tripcody.com/itinerary/182592'
elif stg_dev =='2':
url = 'https://dev.tripcody.com/itinerary/182592'
driver.get('https://www.tripcody.com/itinerary/182592')
time.sleep(3)
# 새 일정을 만들어주는 함수, input으로 일정의 이름을 받는다.
def new_schedule(schedule_name):
# 새 일정 만들기 //*[@id="__layout"]/div/div[3]/div[1]/button/div
driver.find_element_by_xpath('//*[@id="__layout"]/div/div[3]/div[2]/button').click()
time.sleep(2)
# 일정 제목 //*[@id="__layout"]/div/div[3]/div/div[1]/input
driver.find_element_by_xpath('//*[@id="__layout"]/div/div[3]/div/div[2]/input').send_keys(schedule_name)
# 일정 시작과 끝 선택 //*[@id="__layout"]/div/div[3]/div/div[1]/section/div[2]/div[30]/div/div[3] //*[@id="__layout"]/div/div[3]/div/div[1]/section/div[2]/div[33]/div/div[3]
driver.find_element_by_xpath('//*[@id="__layout"]/div/div[3]/div/div[2]/section/div[2]/div[17]/div/div[3]').click()
driver.find_element_by_xpath('//*[@id="__layout"]/div/div[3]/div/div[2]/section/div[2]/div[34]/div/div[3]').click()
# 새 일정 생성하기 //*[@id="bottom-button"]/button
driver.find_element_by_xpath('//*[@id="bottom-button"]/button').click()
time.sleep(2)
# 일정 목록으로 가기 //*[@id="__layout"]/div/div[1]/div/header/div/button[1]
driver.find_element_by_xpath('//*[@id="__layout"]/div/div[1]/div/div[2]/header/div/button/span').click()
time.sleep(2)
# 카카오 로그인 클릭
driver.find_element_by_xpath('//*[@id="__layout"]/div/div[3]/div/div[4]/div[2]/div').click()
time.sleep(2)
# //*[@id="__layout"]/div/div[3]/div/div[3]/div[2]/div
# 이메일과 패스워드 입력
driver.find_element_by_id('id_email_2').send_keys(kakao_email)
driver.find_element_by_id('id_password_3').send_keys(kakao_pwd)
time.sleep(2)
# 로그인 클릭
driver.find_element_by_xpath('//*[@id="login-form"]/fieldset/div[8]/button[1]').click()
time.sleep(5)
for i in range(n):
new_schedule(str(i+1)) | true |
0abd2027899bfbf4067983413495e3aba6da1b60 | Python | Bogdan-Moskovchenko/Programming-Basics | /homeworks/Bogdan.Moskovchenko_Bogdan-Moskovchenko/Homework-2/Homework-2.py | UTF-8 | 1,211 | 3.984375 | 4 | [] | no_license | #intro
print('Our WORLD OF SEACREATURES welcomes you newcomer')
print('This game is for people who loves to observe all kinds of seadwellers')
print('For yours and ours conveniece please input some personal information')
#request
name = input('What is your name?\n')
surname = input ('What is your surname?\n')
age = int(input('How old are you?\n'))
creature = input('What kind of seacreatures do you want to observe the most?\n')
#result
result = ("Your name is %s %s, you are %i years old and you are fond of %s" %(name,surname,age,creature))
#additional request
print('For better communication with our clients we would like to ask for some additional information')
hobby = input('what kind of hoobby do you have?\n')
pet = input('Do you have a pet?\n')
colour = input('what is your favorite colour\n')
#dictionaries
dict_a = {
'Name' : name,
'Surname' : surname,
'Age' : age,
'Creature' : creature
}
dict_b = {
'Hobby' : hobby,
'Pet' : pet,
'Colour' : colour
}
dict_a['additional information'] = dict_b
print(dict_a)
#save
f = open('Seatamers.txt', 'w')
f.write(result)
f.close()
print(result)
print(' We welcome you newcomer\n Your journey begins right now') | true |
5ba7112e90631b98c6582c1565d1e75aecf2e5fe | Python | adldotori/DeepKnowledgeTracing | /main.py | UTF-8 | 7,149 | 2.546875 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from torchvision import datasets, transforms
import pandas as pd
import argparse
from tqdm import tqdm
from sklearn import metrics
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class DKT(nn.Module):
def __init__(self, input_size, hidden_size, dropout=0.0):
super().__init__()
self.seq_model = nn.LSTM(2 * input_size, hidden_size, num_layers=2, dropout=dropout)
self.decoder = nn.Sequential(
nn.Linear(hidden_size, input_size),
nn.Sigmoid(),
)
def forward(self, seq_in):
seq_out, hidden = self.seq_model(seq_in)
return self.decoder(seq_out)
def model_test():
batch_size, input_size, hidden_size, seq_len = 2, 18, 200, 3
model = DKT(input_size,hidden_size).to(device)
data = torch.randint(0, seq_len, (1, batch_size, 2 * input_size), dtype=torch.float32).to(device)
res = model(data)
print(res.shape)
class Dataset(data.Dataset):
def __init__(self, mode):
self.data = self.read_csv(f'data/assistments/builder_{mode}.csv')
def read_csv(self, file):
data = []
self.max_prob = 0
self.max_sqlen = 0
with open(file, 'r') as f:
while f.readline():
prob = [int(i) for i in f.readline().split(',')[:-1]]
ans = [int(i) for i in f.readline().split(',')[:-1]]
data.append((prob, ans))
for i in prob:
if self.max_prob < i + 1:
self.max_prob = i + 1
if self.max_sqlen < len(prob):
self.max_sqlen = len(prob)
final_data = []
for prob, ans in data:
prob = torch.tensor(prob).unsqueeze(1)
prob_onehot = torch.zeros(self.max_sqlen, self.max_prob)
prob_onehot.scatter_(1, prob, 1)
correct = prob_onehot.clone()
for i in range(len(ans)):
if ans[i] == 0:
correct[i] = torch.zeros(self.max_prob)
emb = torch.cat([prob_onehot, correct], axis=1)
final_data.append(emb)
return final_data
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def data_test():
dataset = Dataset('train')
print(dataset.max_prob)
print(dataset.max_sqlen)
class Trainer():
def __init__(self, args, hidden_size):
self.args = args
self.train_dataset = Dataset('train')
self.data_loader = data.DataLoader(\
self.train_dataset, batch_size=self.args.batch_size
)
self.max_sqlen = self.train_dataset.max_sqlen
self.input_size = self.train_dataset.max_prob
self.hidden_size = hidden_size
self.model = DKT(self.input_size, self.hidden_size).to(device)
self.optimizer = optim.Adam(self.model.parameters())
self.loss = nn.BCELoss()
def train(self):
# self.model.load_state_dict(torch.load(f'{self.args.name}.pt'))
for epoch in range(self.args.epoch):
pbar = tqdm(self.data_loader)
loss_sum = 0
for batch in pbar:
data = batch.to(device).permute(1,0,2)
output = self.model(data[:-1])
label = (data[:,:,:data.shape[2]//2]==1)[1:].to(device)
output = torch.where(label, output, torch.tensor(0.))
ans = torch.where(label, data[1:,:,data.shape[2]//2:], torch.tensor(-1.))
loss = self.loss(output[output>0], ans[ans!=-1])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_sum += loss
pbar.set_description(f'Loss : {loss:.2f}')
print(f'Loss : {loss_sum/len(self.data_loader):.2f}')
torch.save(self.model.state_dict(), f'{self.args.name}.pt')
def infer(self):
self.model.load_state_dict(torch.load(f'{self.args.name}.pt'))
self.model.eval()
self.test_dataset = Dataset('test')
self.data_loader = torch.utils.data.DataLoader(\
self.test_dataset, batch_size=1
)
y_true = []
y_pred = []
for batch in self.data_loader:
data = batch.to(device).permute(1,0,2)
output = self.model(data[:-1])
label = (data[:,:,:data.shape[2]//2]==1)[1:].to(device)
output = torch.where(label, output, torch.tensor(0.))
ans = torch.where(label, data[1:,:,data.shape[2]//2:], torch.tensor(-1.))
y_pred += output[output>0].data.numpy().tolist()
y_true += ans[ans!=-1].data.numpy().tolist()
print(metrics.roc_auc_score(np.array(y_true), np.array(y_pred)))
def seq_op(self, seq, length):
self.model.load_state_dict(torch.load(f'{self.args.name}.pt'))
self.model.eval()
prob, ans = seq
for i in range(length):
emb = self._emb(prob, ans)
output = self.model(emb)
pred_cor = output[-1]
max_reward = -1
max_idx = -1
for j in range(self.input_size):
emb = self._emb(prob + [j], ans + [1])
output = self.model(emb)
reward_1 = output[-1].mean()
emb = self._emb(prob + [j], ans + [0])
output = self.model(emb)
reward_0 = output[-1].mean()
reward = reward_1 * pred_cor[0,j] + reward_0 * (1 - pred_cor[0,j])
if reward > max_reward:
max_reward = reward
max_idx = j
print(max_reward.data, max_idx)
prob = prob + [max_idx]
ans = ans + [1 if pred_cor[0,max_idx]>0.5 else 0]
def _emb(self, prob, ans):
prob = torch.tensor(prob).unsqueeze(1)
prob_onehot = torch.zeros(len(prob), self.input_size)
prob_onehot.scatter_(1, prob, 1)
correct = prob_onehot.clone()
for i in range(len(ans)):
if ans[i] == 0:
correct[i] = torch.zeros(self.input_size)
emb = torch.cat([prob_onehot, correct], axis=1)
emb.unsqueeze_(1)
return emb
def get_args():
parser = argparse.ArgumentParser(description='Trainer')
parser.add_argument('--name', type=str, default='base')
parser.add_argument('-e', '--epoch', type=int, default=10)
parser.add_argument('-b', '--batch_size', type=int, default=100)
parser.add_argument('-m', '--mode', type=str, default='infer')
args = parser.parse_args()
return args
def main():
args = get_args()
trainer = Trainer(args, 200)
if args.mode == 'train':
trainer.train()
elif args.mode == 'infer':
trainer.infer()
elif args.mode == 'seq_op':
trainer.seq_op(([1,1,2,2,2],[1,1,0,0,0]), 5)
if __name__ == '__main__':
# model_test()
# data_test()
main() | true |
b9364f0ac0571d9e8e6d23f439e114f978b8a915 | Python | malmgrens4/discord_bot | /src/utils/format_helper.py | UTF-8 | 812 | 3.078125 | 3 | [] | no_license | def format_number(value):
return str('{:,}'.format(value))
def discord_display_at_username(user_id):
return '<@!%s>'%user_id
def create_display_table(headers, rows, col_length=15):
header_display=''
underline = ''
bottom = ''
side_bar = '|'
for header in headers:
header_display+= (side_bar + str(header)).ljust(col_length)
underline+= ''.ljust(col_length, '=')
bottom+=''.ljust(col_length, '=')
header_display += '\n'
underline += '\n'
bottom += '\n'
rows_display=''
for row in rows:
row_display=''
for value in row:
row_display+= (side_bar + str(value)).ljust(col_length)
row_display+= side_bar + '\n'
rows_display+=row_display
return header_display + underline + rows_display + bottom | true |
35fd5c9e98a0f4a04f013bbae84f41b81c70c4df | Python | SoadB/100DaysOfCode-Python | /Day_31.py | UTF-8 | 417 | 3.859375 | 4 | [] | no_license | # Ex1
def my_function():
print("Hello in my function")
my_function()
print("-------------------------")
# Ex2
def my_fun(fname):
print(fname + " Refsnes")
my_fun("Email")
my_fun("Tobies")
my_fun("linus")
print("-------------------------")
# Ex3
def fun_country(country = "Norway"):
print("I am from " + country)
fun_country("Yemen")
fun_country("Swedan")
fun_country()
print("-------------------------")
| true |
6f420493023c59e27f1acad40f1dd990e644a2f9 | Python | ginsstaahh/455-reactor-design-project | /37280147 30639141 Oregonator model for BZ reaction.py | UTF-8 | 8,464 | 3.25 | 3 | [] | no_license |
# coding: utf-8
# ## Introduction:
#
# BZ reaction was first discovered by a Russian scientist Belousov. In this reaction the system undergoes a reaction with an oscillatory change of color. For a long time his findings were dismissed, since people were uncomfortable with an idea that system may go back and forth from the thermodynamical equilibrium. It was much later when Zhabotinsky confirmed the findings of Belousov, the reaction became so famous reaction, that UBC CHBE department decided to use that to show off CHBE research to the high school students.
#
# In[1]:
from IPython.display import YouTubeVideo
YouTubeVideo('wxSa9BMPwow')
# The overall BZ-reaction is the oxidation of malonic
# acid by $BrO^{–}_3$ to form $CO_2$ and $H_2O$:
#
# $ 3 CH_2 (CO_2H)_2 + 4 BrO^{-}_3 \to 4 Br^{-} + 9 CO_2 + 6 H_2 O$
#
# It looks simple, but don't be deceived by the looks. Below are shown all the reactions happening in the system.
# In[4]:
from IPython.display import Image
from IPython.core.display import HTML
Image(url= "http://www.scholarpedia.org/w/images/5/5f/BZ_Core_scheme.gif", width=200, height=200)
# ## Mathematical model
#
# The simplest model to explain the BZ reaction is the oregonator model [A. Garza 2011]. In this model the important chemical species are
# \begin{align}
# A&= {\rm BrO_3^-}\ ,& \ P&= {\rm HOBr} \ ,& \ B&={\rm oxidizable\
# organic\ species} \nonumber \\
# X&= {\rm HBrO_2} \ ,& \ Y&= {\rm Br^-} \ ,& \ Z&={\rm Ce}^{4+}
# \end{align}
#
#
# and its dynamics is described by the scheme
#
# \begin{align}
# &A+Y \stackrel{k_1}{\longrightarrow} X+P \ (1),& \ X+Y&
# \stackrel{k_2}{\longrightarrow} 2P \ (2),&
# \ A+X& \stackrel{k_3}{\longrightarrow} 2X+2Z \ (3), \nonumber \\
# &2X \stackrel{k_4}{\longrightarrow} A+P \ (4),& \ B+Z&
# \stackrel{k_5}{\longrightarrow} \frac{f}{2} Y \ . (5)&
# \end{align}
#
#
# The first two reactions describe the consumption of bromide
# Br$^-$, whereas the last three ones model the buildup of HBrO$_2$
# and Ce$^{4+}$ that finally leads to bromide recovery, and then to
# a new cycle.
#
# By assuming that the bromate concentration $[A]$
# remains constant as well as $[B]$, and noting that $P$ enters only
# as a passive product of the dynamics, the law of mass action leads
# to
#
#
#
# $\frac{dX}{dt} = k_1 A Y - k_2 X Y + k_3 A X - 2 k_4 X^2$
#
# $\frac{dY}{dt} = -k_1 A Y - k_2 X Y + k_5 \frac{f}{2} B Z$
#
# $\frac{dZ}{dt} = 2 k_3 A X - k_5 B Z$
#
#
#
# In chemical engineering we always want to bring our variables to dimensionless values. We can do that here by rescaling time and concentrations:
#
# $x= \dfrac{X}{X_0} $,
# $y= \dfrac{Y}{Y_0}$,
# $z= \dfrac{Z}{Z_0}$,
# $\tau= \dfrac{t}{t_0}$
#
# where the scaling factors are:
#
#
# $X_0=\frac{k_3 A}{2 k_4}$
#
# $Y_0 = \frac{k_3 A}{k_2} $
#
# $Z_0 = \frac{(k_3 A)^2}{k_4 k_5 B}$
#
# $t_0 = \frac{1}{k_5 B} $
#
# $\epsilon_1=\frac{k_5 B}{k_3 A} $
#
# $\epsilon_2= \frac{2 k_4 k_5 B}{k_2 k_3 A} $
#
# $q= \frac{2 k_1 k_4}{k_2 k_3}$
#
# In terms of these variables, the model reads (*)
#
# $\epsilon_1 \frac{dx}{d\tau} = q y -x y +x(1-x)$
#
# $\epsilon_2 \frac{dy}{d\tau} = -q y - x y + f z $
#
# $\frac{dz}{d\tau} = x - z $
#
# At certain combination of the parameter values for the system it shows an oscillatory behaviour.
#
# Use these parameters for this model:
#
# $\epsilon_1 = 9.9 × 10^{−3}$,
# $\epsilon_2 = 1.98×10^{−5}$,
# $q = 7.62×10^{−5}$,
# $f = 1$,
# $x(t = 0) = 0$,
# $y(t = 0) = 0.001$,
# $z(t = 0) = 0$.
#
#
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
get_ipython().run_line_magic('matplotlib', 'inline')
from scipy.integrate import odeint
from mpl_toolkits.mplot3d import Axes3D
from ipywidgets import interact
from IPython.display import clear_output, display, HTML
def BZrxn(A, B, x_init, y_init, z_init, P_init):
#A = 0.06 #BrO3-
#B = 0.02 #Oxidizable organic species
k1 = 1.28
k2 = 2.4*10**6
k3 = 33.6
k4 = 2400
k5 = 1
e1 = 9.9*10**-3
e2 = 1.98*10**-5
q = 7.62*10**-5
f = 1.
#x_init = 1
#y_init = 1
#z_init = 1
X0 = k3*A/(2*k4)
Y0 = k3*A/k2
Z0 = (k3*A)**2/(k4*k5*B)
t0 = 1/(k5*B)
#P_init = 0 # No product formed at beginning of rxn
t_init = 0
t_final = 50
N = 10000
t_array = np.linspace(t_init, t_final, N)
def ode_xyzp(dim_list, t_array):
x = dim_list[0]
y = dim_list[1]
z = dim_list[2]
dxdtau = (q*y - x*y + x*(1-x))/e1
dydtau = (-q*y - x*y + f*z)/e2
dzdtau = x - z
dpdtau = k1*A*y*Y0 + 2*k2*x*X0*y*Y0 + k4*(x*X0)**2
return [dxdtau, dydtau, dzdtau, dpdtau]
dim_num_list = odeint(ode_xyzp, [x_init, y_init, z_init, 0], t_array) #4th param is not used therefore it is 0
x_num = dim_num_list[:,0]
y_num = dim_num_list[:,1]
z_num = dim_num_list[:,2]
p_num = dim_num_list[:,3]
X = x_num*X0 # X = x*X0
Y = y_num*Y0 # Y = y*Y0
Z = z_num*Z0 # Z = z*Z0
P = p_num*t0
plt.figure(0)
plt.plot(t_array, np.log(x_num), 'r--', label = "x")
plt.plot(t_array, np.log(y_num), 'b--', label = "y")
plt.plot(t_array, np.log(z_num), 'g--', label = "z")
plt.xlabel('tau')
plt.ylabel("log of dimensionless values")
plt.figure(1)
plt.plot(t_array, (X), 'r--', label = "X")
plt.plot(t_array, (Y), 'b--', label = "Y")
plt.plot(t_array, (Z), 'g--', label = "Z")
plt.plot(t_array, (P), 'k--', label = "P")
plt.xlabel('time (s)')
plt.ylabel('C (mol/L)')
plt.legend()
fig = plt.figure(2)
plt.plot(t_array, X, 'r--', label = "X")
plt.xlabel('time (s)')
plt.ylabel('C (mol/L)')
plt.legend()
fig = plt.figure(3)
plt.plot(t_array, Y, 'b--', label = "Y")
plt.xlabel('time (s)')
plt.ylabel('C (mol/L)')
plt.legend()
fig = plt.figure(4)
plt.plot(t_array, Z, 'g--', label = "Z")
plt.xlabel('time (s)')
plt.ylabel('C (mol/L)')
plt.legend()
fig = plt.figure(5)
plt.plot(t_array, P, 'k--', label = "P")
plt.xlabel('time (s)')
plt.ylabel('C (mol/L)')
plt.legend()
fig = plt.figure(6)
ax = fig.gca(projection='3d')
ax.plot(X, Y, Z, label='3D plot of X, Y, and Z')
plt.xlabel('X')
plt.ylabel('Y')
plt.ylabel('Z')
ax.legend()
plt.show()
interact(BZrxn,
A = (0,1.2,0.1),
B = (0,0.4,0.1),
x_init = (0, 2, 0.5),
y_init = (0,2, 0.5),
z_init = (0,2,0.5),
P_init = (0,1,0.5))
# 3
# the concentrations of X, Y, and Z oscillate in a contained range of concentration after an initial spike.
# P cyclically increases over time
# A and B are constant (in excess for the reaction)
# 4
# X, Y, and Z are intermediate products
# therefore they are able to still oscillate while entropy increases.
# Entropy increases as P increases and the reactants (A and B) are used up.
# Because the reactants are in excess, the concentrations of X, Y, and Z are able to oscillate
# in a contained range
# ## Problem statement:
#
# 1. Solve the ODE (*) for the concentrations of intermediaries X, Y, Z as well as the product P.
# 2. Plot your curves
# 3. What can you tell about the behvaiour of each species?
# 4. Generally, the concentration of all products in chemical reactions have to increase (otherwise the entropy of the universe is decreasing and our thermodynamics teachers wouldn't be happy) However, something is clearly oscillating here. How can you explain this?
#
# 5. (Bonus) Create a slider using the template that we used for our Zombie example and play with the input parameters.
# 6. (Bonus) Plot a 3D plot with the concentrations of X, Y, and Z in a 3d diagram
#
# you may use the template below:
# https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html
# ```python
# import matplotlib as mpl
# from mpl_toolkits.mplot3d import Axes3D
# import numpy as np
# import matplotlib.pyplot as plt
#
# mpl.rcParams['legend.fontsize'] = 10
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
# z = np.linspace(-2, 2, 100)
# r = z**2 + 1
# x = r * np.sin(theta)
# y = r * np.cos(theta)
# ax.plot(x, y, z, label='parametric curve')
# ax.legend()
#
# plt.show()
# References:
#
# 1. Garza 2011
# https://pdfs.semanticscholar.org/2876/0e30e84817a29a22966fcde4fd619d6eeabb.pdf
# 2. R. Noyes 1989
# https://pubs.acs.org/doi/pdf/10.1021/ed066p190
| true |
4f8857974fd9caa3744959d869bf866426b44c86 | Python | rachel-dev/minion_raspberry | /main.py | UTF-8 | 1,151 | 2.578125 | 3 | [] | no_license | from flask import Flask
from flask import Flask, render_template, request
import time
import serial
ser = serial.Serial('/dev/ttyS0', 9600, timeout=1)
ser.isOpen()
app = Flask(__name__)
@app.route("/")
def index():
# return "Hello, Rachel"
return render_template("robot.html")
@app.route('/left_side')
def left_side():
data_1 = "LEFT"
ser.write("l")
response = ser.readline()
print response
return 'true'
@app.route('/right_side')
def right_side():
data_1 = "RIGHT"
ser.write("r")
response = ser.readline()
print response
return 'true'
@app.route('/up_side')
def up_side():
data_1 = "UP"
ser.write("u")
response = ser.readline()
print response
return 'true'
@app.route('/down_side')
def down_side():
data_1 = "DOWN"
ser.write("d")
response = ser.readline()
print response
return 'true'
@app.route('/stop')
def stop():
data_1 = "STOP"
ser.write("s")
response = ser.readline()
print response
return 'true'
if __name__ == '__main__' :
app.run(host='192.168.2.121',port=5000)
| true |
2da44c450b21fb110506eae2d2e1258a8a9f2211 | Python | GerardoRodas/orto2 | /app/informacion/choices.py | UTF-8 | 121 | 2.671875 | 3 | [] | no_license |
genero_choices = (
(1, ("Masculino")),
(2, ("Femenino"))
)
YES_OR_NO = (
(True, 'Yes'),
(False, 'No')
) | true |
1f2cdb1e2b3394c5377c20cb2e9f16ff25691948 | Python | joshimiloni/Client-Server-Polling-System | /client.py | UTF-8 | 1,191 | 2.90625 | 3 | [] | no_license | import socket,pickle
from threading import Thread
import Tkinter as tk
def button_func():
sendname()
top.destroy()
def sendname():
global E1,E2,E3
string1 = E1.get()
string2 = E2.get()
string3 = E3.get()
data_array=[string1,string2,string3]
data_string = pickle.dumps(data_array)
client_socket.send(data_string)
client_socket.close()
top = tk.Tk()
top.title("Poll")
L1 = tk.Label(top, text="Your Name: ")
L1.grid(row=0, column=0)
E1 = tk.Entry(top, bd = 5)
E1.grid(row=0, column=1)
L2 = tk.Label(top, text="Your Email: ")
L2.grid(row=1, column=0)
E2 = tk.Entry(top, bd = 5)
E2.grid(row=1, column=1)
L4 = tk.Label(top, text="Which is your favourite subject? ")
L4.grid(row=2, column=0)
L5 = tk.Label(top, text="1) NPL 2) MCC 3) DDB")
L5.grid(row=2, column=1)
L3 = tk.Label(top, text="Your Choice: ")
L3.grid(row=3, column=0)
E3 = tk.Entry(top, bd = 5)
E3.grid(row=3, column=1)
MyButton1 = tk.Button(top, text="Submit", width=10, command=button_func)
MyButton1.grid(row=4, column=1)
HOST=socket.gethostname()
PORT=6667
ADDR = (HOST, PORT)
client_socket=socket.socket()
client_socket.connect(ADDR)
tk.mainloop() # Starts GUI execution.
| true |
9d704a76d1f76a4e90b2875fb16bece2bea9e7a5 | Python | Aasthaengg/IBMdataset | /Python_codes/p02835/s408265612.py | UTF-8 | 72 | 2.640625 | 3 | [] | no_license | A = map(int, input().split())
print('bust' if(sum(A) >= 22) else 'win')
| true |
2d61dfbecee12199babb38e0ee068e4306c60c78 | Python | lujinda/pylot | /fileInput.py | UTF-8 | 208 | 2.640625 | 3 | [] | no_license | import fileinput
import re
#for line in fileinput.input(inplace=True):
# line=line.rstrip('\n')
# print '%s # %2d'%(line,fileinput.lineno())
for lines in fileinput.input(inplace=False):
print lines
| true |
6a69b377db4fd47122444bcb400900f23a62e920 | Python | HYUNAHSHIM/algorithmProblem | /BaekJoon18870.py | UTF-8 | 211 | 2.8125 | 3 | [] | no_license | # BaekJoon18870.py
N = int(input())
arr = list(map(int, input().split()))
sorted_arr = sorted(list(set(arr)))
dic = {sorted_arr[i] : i for i in range(len(sorted_arr))}
for i in arr:
print(dic[i], end = " ") | true |
762fa4b07a9ba430d371d99b0e41fc69f4ff1094 | Python | ChristianMeyndt/aicoe-osc-demo | /src/components/utils/nq_utils.py | UTF-8 | 647 | 3.1875 | 3 | [] | no_license | import re
def get_text_section(doc_tokens):
"""Return section of text from a whitespace separated document"""
return " ".join(doc_tokens[0].split(" ")[doc_tokens[1] : doc_tokens[2]])
def contains_table(text):
"""Returns True if a string contains an HTML table"""
if re.search(r"<Table>.*</Table>", text) is not None:
return True
else:
return False
def remove_html_tags(text):
"""Remove HTML tags from a string"""
return re.sub(r"<[^>]+>\s?", "", text)
def is_not_short(text, n=10):
"""Returns True if string has more than n whitespace separated tokens"""
return len(text.split(" ")) > n
| true |
29592e0a26d94d517e2c71cb0d0d5b41b587226e | Python | PNapi90/Fuzzy-Bayes-Tracking | /perm_list/test_fac.py | UTF-8 | 186 | 2.875 | 3 | [] | no_license | from numpy import *
def fac(a):
retval = 1
for i in range(1,a+1):
retval *= i
return retval
a = 0
for i in range(1,11):
a += fac(i)*i
print(a*4/(1024*1024)) | true |
85b895be148e24629795599152f956043375e9c9 | Python | brghena/appkit-statements | /cover_letters/build_script.py | UTF-8 | 495 | 2.59375 | 3 | [] | no_license | #! /usr/bin/env python3
import glob
import os
print("Building cover letters...")
letters = glob.glob("cover_letters/*.tex")
for letter in letters:
loc = letter.split('.')[0].split('_')[-1]
if loc == 'base':
print("Skipping base file")
continue
output = 'pdfs/brghena-cover-{}.pdf'.format(loc)
print("\nBuilding {}".format(output))
cmd = './latexrun --latex-args="-shell-escape" -Wall {} -o {}'
os.system(cmd.format(letter, output))
print("\nComplete")
| true |
8d002fbce20a3a5d47196a8414a02d9b3dabd62c | Python | timo-stoettner/cryptocompare-client | /cryptocompare_client/core.py | UTF-8 | 13,514 | 2.546875 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
import logging
import time
from requests.exceptions import ConnectionError
from threading import Thread
import pymongo
import requests
import json
import sys
import socketIO_client
from . import masks
from . import customized_methods
from six import iteritems
from six import string_types as basestring
setattr(socketIO_client.transports.XHR_PollingTransport,
'recv_packet', customized_methods.custom_recv_packet)
class CryptocompareClient(object):
def __init__(self, sub_strings=None, websocket_url='https://streamer.cryptocompare.com',
mongo_col=None, namespace=None):
"""CryptocompareClient connects to the Websocket and Rest APIs of Cryptocompare.
Args:
sub_strings (optional): Websocket subscriptions, defaults to None.
The strings must have the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'
sub_strings must either be a list of strings or a single strings
websocket_url (optional): The url used to connect to the websocket.
Defaults to 'https://streamer.cryptocompare.com'
mongo_col (optional): MongoDB (pymongo) collection to insert messages into.
Defaults to None
namespace (optional): socketIO Namespace used to handle events.
Defaults to None.
"""
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
if isinstance(sub_strings, list):
self.sub_strings = sub_strings[:]
else:
self.sub_strings = sub_strings
self.url = websocket_url
self.mongo_col = mongo_col
self.namespace = namespace
self.restart_after = None
self._init_websocket()
def _init_websocket(self):
if self.namespace is None:
self.socket = socketIO_client.SocketIO(self.url)
else:
self.socket = socketIO_client.SocketIO(self.url, Namespace=self.namespace)
self.socket.on('m', self._on_message)
if self.sub_strings is not None:
self.subscribe(sub_strings=self.sub_strings[:])
def restart(self):
"""Restart websocket"""
logging.info("Restarting Cryptocompare Client...")
self.stop()
if hasattr(self, "thread"):
self.thread.join()
self._init_websocket()
self.listen(self.seconds, self.restart_after)
def listen(self, seconds=None, restart_after=None):
"""Start listening to the websocket.
Args:
seconds: Number of seconds to listen. Defaults to None.
If not specified, client will listen forever.
restart_after: Number of seconds to wait until restart,
when no messages are received. If not specified,
client will not restart.
"""
self.seconds = seconds
self.restart_after = restart_after
self.start_time = time.time()
self.received_messages = []
if restart_after is None:
if self.seconds is not None:
self.socket.wait(seconds=seconds)
else:
self.socket.wait()
else:
def _wait_thread():
if self.seconds is not None:
self.socket.wait(seconds=seconds)
else:
self.socket.wait()
self.thread = Thread(target=_wait_thread)
self.thread.start()
try:
if restart_after is not None:
time.sleep(restart_after)
while True:
n_messages = len(filter(lambda message_time:
time.time()-message_time < restart_after,
self.received_messages))
logging.debug("Number of messages in last %s seconds: %s",
restart_after, n_messages)
if restart_after is not None:
if n_messages == 0:
self.restart()
break
time.sleep(1)
except KeyboardInterrupt:
logging.debug("KeyboardInterrupt: Stopping...")
self.stop()
self.thread.join()
def stop(self):
"""Disconnect websocket"""
self.socket.disconnect()
def get_coin_list(self, base_url='https://www.cryptocompare.com/api/data/'):
"""Return coin list, see https://www.cryptocompare.com/api/#-api-data-coinlist-"""
r = requests.get('{}coinlist/'.format(base_url))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_coin_snapshot(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):
"""Return coin snapshot, see https://www.cryptocompare.com/api/#-api-data-coinsnapshot-"""
r = requests.get('{}coinsnapshot/?fsym={}&tsym={}'.format(base_url,fsym,tsym))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_top_pairs(self, fsym, limit=2000, base_url='https://min-api.cryptocompare.com/data/'):
"""Return top currency pairs by volume, see https://www.cryptocompare.com/api/#-api-data-toppairs-"""
r = requests.get('{}top/pairs?fsym={}&limit={}'.format(base_url, fsym, limit))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_all_coins(self, base_url='https://www.cryptocompare.com/api/data/'):
"""Return a list of all coins that are available on CryptoCompare"""
coin_list = self.get_coin_list(base_url=base_url)
return [coin for coin,d in iteritems(coin_list['Data'])]
def get_all_exchanges(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):
"""Return a list of all exchanges that trade a currency pair"""
res = self.get_coin_snapshot(fsym, tsym, base_url=base_url)
try:
exchanges = res['Data']['Exchanges']
markets = [x['MARKET'] for x in exchanges]
return sorted(markets)
except KeyError:
return res
def query_rest_api(self, api_name, base_url='https://min-api.cryptocompare.com/data/', **params):
"""Query the Rest API with specified params"""
query_params = '&'.join(['{}={}'.format(k,v) for k,v in iteritems(params)])
query_string = base_url + api_name + '?' + query_params
r = requests.get(query_string)
if r.status_code == 200:
return r.json()
else:
return r.status_code
def subscribe(self, method=None, exchange=None, currency_pair=None, sub_strings=None):
"""Subscribe to websocket channels
The channels must either be specified by the parameter sub_strings or by a combination
of the parameters method, exchange and currency_pair.
Args:
method (optional): The method must either be 'TRADE', 'CURRENT', 'CURRENTAGG' or
one of the corresponding SubsciptionIDs (0, 2 or 5).
See https://www.cryptocompare.com/api/#-api-web-socket-subscribe- for more
information.
exchange (optional): A valid exchange name that is recognized by the cryptocompare API.
currency_pair (optional): A tuple of currency symbols that are recognized by the
cryptocompare API, such as ('BTC','USD')
sub_strings (optional): Subscription strings in the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'.
sub_strings must either be a list of strings or a single string-
"""
if method is None and exchange is None and currency_pair is None and sub_strings is None:
raise ValueError("Either sub_strings or method, exchange, and currency_pair must be specified.")
elif sub_strings is not None:
if method is not None or exchange is not None or currency_pair is not None:
raise ValueError("If sub_strings is specified, all other keyword arguments must be None.")
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
elif method is None or exchange is None or currency_pair is None:
raise ValueError("If sub_strings is None, all other keyword arguments must be specified.")
else:
method = self._convert_method_to_number(method)
sub_strings = ['{}~{}~{}~{}'.format(method,
exchange,
currency_pair[0],
currency_pair[1])]
if self.sub_strings is None:
self.sub_strings = []
self.sub_strings.extend(sub_strings)
self.sub_strings = list(set(self.sub_strings))
try:
self.socket.emit('SubAdd', { 'subs': sub_strings })
except ConnectionError as e:
logging.info("ConnectionError: %s", e)
self.restart()
def unsubscribe(self, method=None, exchange=None, currency_pair=None, sub_strings=None):
"""Unubscribe from websocket channels
The channels must either be specified by the parameter sub_strings or by a combination
of the parameters method, exchange and currency_pair.
Args:
method (optional): The method must either be 'TRADE', 'CURRENT', 'CURRENTAGG' or
one of the corresponding SubsciptionIDs (0, 2 or 5).
See https://www.cryptocompare.com/api/#-api-web-socket-subscribe- for more
information.
exchange (optional): A valid exchange name that is recognized by the cryptocompare API.
currency_pair (optional): A tuple of currency symbols that are recognized by the
cryptocompare API, such as ('BTC','USD')
sub_strings (optional): Subscription strings in the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'.
sub_strings must either be a list of strings or a single string-
"""
if sub_strings is not None:
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
self.socket.emit('SubRemove', { 'subs': sub_strings })
else:
method = self._convert_method_to_number(method)
sub_strings = ['{}~{}~{}~{}'.format(method,
exchange,
currency_pair[0],
currency_pair[1])]
self.socket.emit('SubRemove', { 'subs': sub_strings })
def unsubscribe_all(self):
"""Unsubscribe from all channels that have been subscribed"""
self.socket.emit('SubRemove', { 'subs': self.sub_strings })
def _convert_method_to_number(self, method):
"""Convert method name to corresponding SubscriptionId"""
if str(method).upper() not in ['0', '2', '5', 'TRADE', 'CURRENT', 'CURRENTAGG']:
raise ValueError('Method has invalid value: {}'.format(method))
if str(method).upper() == 'TRADE' :
method = '0'
elif str(method).upper() == 'CURRENT':
method = '2'
elif str(method).upper() == 'CURRENTAGG':
method = '5'
return method
def _parse_message(self, response):
"""Parse a message received through websocket and return dictionary
Args:
response (str): The raw message
"""
response_list = response.split('~')
sub_id = response_list[0]
try:
if sub_id == '0': # TRADE
keys = ['SubscriptionId','ExchangeName','CurrencySymbol','CurrencySymbol','Flag','TradeId','TimeStamp','Quantity','Price','Total']
res = dict(zip(keys, response_list))
elif sub_id == '2' or sub_id == '5': # CURRENT / CURRENTAGG
unpacked = {}
mask = int(response_list[-1], 16)
i = 0
for key,value in masks.current:
if value == 0 or mask & value:
unpacked[key] = response_list[i]
i += 1
res = unpacked
else:
logging.debug("Unknown sub_id in message: %s", response)
res = None
except:
logging.warning("Parsing failed for: %s", response)
res = None
return res
def _on_message(self, *args):
"""Handle received messages and write to MongoDB if mongo_col was specified"""
parsed_message = self._parse_message(args[0])
if parsed_message is None:
logging.debug(("Could not parse message: %s", args[0]))
return
logging.debug("Received message: %s", parsed_message)
parsed_message = self.process_message(parsed_message)
if self.mongo_col is not None:
self.mongo_col.insert_one(parsed_message)
def process_message(self, msg):
"""Override this method to alter or handle incoming messages"""
if self.mongo_col is None:
print(msg)
return msg
| true |
68b61475a803d43befe2728b7c73c3d734453b6d | Python | bloomberg/phabricator-tools | /py/phl/phlsys_cppcheck.py | UTF-8 | 4,336 | 2.5625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """Run the external tool 'cppcheck' and process results."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_cppcheck
#
# Public Classes:
# Error
#
# Public Functions:
# run
# parse_output
# result_to_str
# summarize_results
#
# Public Assignments:
# Result
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import xml.etree.ElementTree
import phlsys_fs
import phlsys_subprocess
Result = collections.namedtuple(
'phlsys_cppcheck__Result',
['severity', 'id', 'path', 'line_numbers', 'message'])
class Error(Exception):
pass
def run(dir_path):
"""Return errors from running cppcheck in supplied 'dir_path'.
:dir_path: string of the path to the directory to run in
:returns: list of Result
"""
with phlsys_fs.chdir_context(dir_path):
# XXX: handle the "couldn't find files" exception
return parse_output(phlsys_subprocess.run(
'cppcheck', '-q', '.', '--xml', '--xml-version=2').stderr.strip())
def parse_output(output):
"""Return a list of Result from the supplied 'output'.
:output: string of the output from cppcheck
:returns: list of Result
"""
results = []
root = xml.etree.ElementTree.fromstring(output)
error_list = root.find('errors')
if error_list is not None:
for error in error_list.iterfind('error'):
path = None
line_numbers = []
message = error.get('verbose')
severity = error.get('severity')
identity = error.get('id')
for line in error.iterfind('location'):
path = line.get('file')
line_numbers.append(int(line.get('line')))
if message is None:
raise Error('could not find message: {}'.format(error.items()))
if severity is None:
raise Error(
'could not find severity: {}'.format(error.items()))
if identity is None:
raise Error(
'could not find identity: {}'.format(error.items()))
if path is None:
# oddly this happens with the 'toomanyconfigs' error type
# we'll continue without adding it in this case
continue
results.append(
Result(severity, identity, path, line_numbers, message))
return results
def result_to_str(result):
"""Return a string based on the attributes of the supplied Result.
Usage example:
>>> result_to_str(Result('error', 'nullPointer', 'my.cpp', [1], 'bad'))
'my.cpp (1): (error) nullPointer - bad'
:error: an Result, probably from parse_output()
:returns: a string
"""
return "{} ({}): ({}) {} - {}".format(
result.path,
', '.join([str(i) for i in result.line_numbers]),
result.severity,
result.id,
result.message)
def summarize_results(result_list):
"""Return a string summary of the supplied list of Result.
:result_list: a list of Result, probably from parse_output()
:returns: a string
"""
return "\n".join((result_to_str(x) for x in result_list))
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| true |
fe70584166a4b4bf04447f8a5d5dbf166cb033e0 | Python | RounakChatterjee/New_Assignement | /adder3.py | UTF-8 | 172 | 3.265625 | 3 | [] | no_license | '''
This program depicts how to give default value to arguments
'''
def adder(good = 1,bad =2,ugly = 3):
return good + bad + ugly
print(adder(ugly = 1,good = 5)) | true |
27d11967a24dd415b926c4e0727f42ff4bb9632f | Python | MFC-GAN/mfc-gan | /symbols_MFCGAN.py | UTF-8 | 15,601 | 2.875 | 3 | [
"MIT"
] | permissive | import sys
sys.path.insert(0,'../')
from util import *
import numpy as np
#import cv2
import tensorflow as tf
from numpy import *
import matplotlib as mlp
mlp.use('Agg')
from skimage import color
from skimage import io
from collections import Counter
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
'''
N.B to self but you can give it a go
1. spectral normalization helps
2. gradient penalty not helpful or at least same performance
3. different learning rates for G and D helps (not realy sure?)
4. extra training steps for G is a good decision
5. do not use opencv and plt at the same time (rgb/bgr)
6. best results at 25 epochs upwards
7. batch size plays little role in performance
8. can't do effective G training without batchnorm
9. oversampling works but try to balance fewest among minority
'''
def plot10(samples):
'''
this is an auxiliary function to plot generated samples using pyplot
:param samples: an array of generated images
:return: a matplotlib figure
'''
fig = plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(10, 10)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
# the next 3 lines normalize the image between 0, 255
# this is because gan uses -1 and 1 norm pixels
sample =((sample+1)*(255/2.0)).astype(int)
sample[sample > 255] = 255
sample[sample < 0] = 0
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(64, 64),cmap='Greys_r')
return fig
np.set_printoptions(threshold=np.inf)
def get_minority(k, dat,lbl):
'''
get the minority class of interest k
:param k: the class label of interest
:param dat: the set of Images X
:param lbl: the class labels y
:return: the data, label of class k
'''
min_l = []
min_d = []
ct =0
for l in lbl:
if l==k:
min_d.append(dat[ct])
min_l.append(lbl[ct])
ct+=1
return min_d, min_l
def get_symbols(dir="symbols29/"):
'''
reads symbols images from dir
:param dir: location of symbols in os path
:return:
'''
data=[]
labels =[]
labels_names=[]
labels_idx=[]
label_count = 0
for folder in os.listdir(dir):
for image in os.listdir(dir+folder+'/'):
# print dir+folder+'/'+image
data.append(color.rgb2gray(io.imread(dir+folder+'/'+image)))
labels.append(label_count)
labels_idx.append(label_count)
label_count +=1
labels_names.append(folder)
uniq_labels = np.unique(labels)
label_stat = Counter(labels).values()
print sorted(zip(label_stat, uniq_labels))
print zip(labels_idx,labels_names)
return data, labels, label_count, labels_names
xtrain, ytrain, numclass, label_class = get_symbols()
print 'number of classes : ', numclass
## minority symbols are the minority symbols indexes this might be os dependant
## i,e how the images are ordered and read from the directory/database
## these are my minority indexes
minority_symbols = [1,2,4,5,9,11,13,21,24] # please replace with appropriate indexes
min_data = []
min_label = []
count = 0
### select minority classes into a set
for l in ytrain:
if l in minority_symbols:
min_data.append(xtrain[count])
min_label.append(l)
count+=1
####RE-SAMPLING AMONG THE MINORITY####################
## this is done manually for experiemntation and is dependent on the number of samples in the class
for r in range(2):
d,l=get_minority(11,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
for r in range(2):
d,l=get_minority(24,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
for r in range(2):
d,l=get_minority(5,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
d,l=get_minority(4,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
d,l=get_minority(13,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
for r in range(2):
d,l=get_minority(21,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
d,l=get_minority(2,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
for r in range(5):
d,l=get_minority(9,xtrain,ytrain)
xtrain.extend(d)
ytrain.extend(l)
################### END ###########################
uniq_labels =np.unique(ytrain)
## check symbols distribution
label_stat = Counter(ytrain).values()
print sorted(zip(label_stat,uniq_labels))
mb_size = 64 #batch
X_dim = [64, 64, 1] #image size
y_dim = numclass*2 # double label size to accomodate fake classes
z_dim = 100 # size of noise vector
eps = 1e-8 # a value chosen to avoid NaN error in loss
G_lr = 1e-4 # learning rate for G
D_lr = 1e-4 # learning rate for D
local_dir ='GAN_symbols_complete_rerun_verify/' #lcoation of generated images
#preparing images and labels for training#
xtrain= np.array([np.reshape(x, (64,64,1)) for x in xtrain])
xtrain = ((xtrain.astype(np.float32) - 127.5) / 127.5) #normalizing pixels values between -1 and 1
ytrain = [vectorized_result(y, y_dim) for y in ytrain] # my one-hot encoding
#preparing minority data
min_data= np.array([np.reshape(x, (64,64,1)) for x in min_data])
min_data = ((min_data.astype(np.float32) - 127.5) / 127.5)
min_label = np.array([vectorized_result(y, y_dim) for y in min_label])
print 'shape of minority data :', min_data.shape
print 'shape of minority labels :', min_label.shape
X = tf.placeholder(tf.float32, shape=[None, 64, 64, 1]) #input tensor
y = tf.placeholder(tf.float32, shape=[None, y_dim]) #output tensor for real y
fake_y = tf.placeholder(tf.float32, shape=[None, y_dim]) # output tensor for fake y
z = tf.placeholder(tf.float32, shape=[None, z_dim]) #noise vector tensor
condition = tf.placeholder(tf.int32, shape=[], name="condition") # switcher tensor to train with or without labels
#defining G weight and bias sizes for each layer
G_W0 = tf.Variable(xavier_init([z_dim + y_dim, 1024]), name='gw0')
G_b0 = tf.Variable(tf.zeros(shape=[1024]), name='gb0')
G_W1 = tf.Variable(xavier_init([1024, 128 * 8 * 8]), name='gw1')
G_b1 = tf.Variable(tf.zeros(shape=[128 * 8 * 8]), name='gb1')
G_W2 = tf.Variable(xavier_init([5, 5, 256, 128]), name='gw2')
G_b2 = tf.Variable(tf.zeros([256]), name='gb2')
G_W3 = tf.Variable(xavier_init([5, 5, 128, 256]), name='gw3')
G_b3 = tf.Variable(tf.zeros([128]), name='gb3')
G_W4 = tf.Variable(xavier_init([2, 2, 1, 128]), name='gw4')
G_b4 = tf.Variable(tf.zeros(shape=[1]), name='gb4')
def generator(z, c):
'''
this is the generator network with leaky relu activation, transpose convolution to increase image size and normal
matrix multiplication for forst two FC neurons
:param z: noise vector
:param c: class label
:return: generated images
'''
inputs = tf.concat(axis=1, values=[z, c])
G_h0 = lrelu(tf.matmul(inputs, spectral_norm(G_W0)) + G_b0)
G_h1 = lrelu(tf.matmul(G_h0, spectral_norm(G_W1))+ G_b1)
print 'shape of G_h1 before reshape:', G_h1.get_shape()
G_h1 = tf.reshape(G_h1, [-1, 8, 8, 128])
G_h1 = tf.contrib.layers.batch_norm(G_h1)
print 'shape of G_h1 after reshape:', G_h1.get_shape()
G_h2 = lrelu(tf.nn.bias_add( tf.nn.conv2d_transpose(G_h1, spectral_norm(G_W2), output_shape=[mb_size, 16, 16, 256], strides=[1, 2, 2, 1], padding='SAME'), G_b2))
print 'the shape of G_h2 :', G_h2.get_shape()
G_h2 = tf.contrib.layers.batch_norm(G_h2)
G_h3 = lrelu(tf.nn.bias_add(tf.nn.conv2d_transpose(G_h2, spectral_norm(G_W3), output_shape=[mb_size, 32, 32, 128], strides=[1, 2, 2, 1], padding='SAME'), G_b3))
print 'the shape of G_h3 :', G_h3.get_shape()
G_h3 = tf.contrib.layers.batch_norm(G_h3)
G_log_prob = tf.nn.bias_add(tf.nn.conv2d_transpose(G_h3, spectral_norm(G_W4), output_shape=[mb_size, 64, 64, 1], strides=[1, 2, 2, 1], padding='SAME'),G_b4)
G_prob = tf.nn.tanh(G_log_prob)
return G_prob
## initializing D weights and biases
D_W0 = tf.Variable(xavier_init([5, 5, 1, 16]), name = 'dw0')
D_b0 = tf.Variable(tf.zeros(shape=[16]), name='db0')
D_W1 = tf.Variable(xavier_init([5, 5, 16, 32]), name = 'dw1')
D_b1 = tf.Variable(tf.zeros(shape=[32]), name = 'db1')
D_W2 = tf.Variable(xavier_init([5, 5, 32, 64]), name = 'dw2')
D_b2 = tf.Variable(tf.zeros(shape=[64]), name = 'db2')
## these are the output parameters of the models
## d_w_gan for normal gan output
### d_w_aux for auxiliary classification
D_W1_gan = tf.Variable(xavier_init([4096, 1]), name = 'dwgan')
D_b1_gan = tf.Variable(tf.zeros(shape=[1]), name = 'dbgan')
D_W1_aux = tf.Variable(xavier_init([4096, y_dim]), name = 'dwaux')
D_b1_aux = tf.Variable(tf.zeros(shape=[y_dim]), name ='dbaux')
def discriminator(X):
'''
this is the D network model. uses leaky relu activations and convolution
:param X: samples of real training images
:return: gan probability and auxiliary classification
'''
D_h0 = lrelu(tf.nn.conv2d(X, spectral_norm(D_W0), strides=[1, 2, 2, 1], padding='SAME') + D_b0)
print 'shape of D_h0 :', D_h0.get_shape()
D_h1 = lrelu(tf.nn.conv2d(D_h0, spectral_norm(D_W1), strides=[1, 2, 2, 1], padding='SAME') + D_b1)
print 'shape of D_h1 :', D_h1.get_shape()
D_h2 = lrelu(tf.nn.conv2d(D_h1, spectral_norm(D_W2), strides=[1, 2, 2, 1], padding='SAME') + D_b2)
print 'shape of D_h2 :', D_h2.get_shape()
D_h3 = tf.reshape(D_h2, [mb_size, -1])
out_gan = tf.nn.sigmoid(tf.matmul(D_h3, spectral_norm(D_W1_gan)) + D_b1_gan)
print 'shape of out_gan :', out_gan.get_shape()
out_aux = tf.matmul(D_h3, spectral_norm(D_W1_aux)) + D_b1_aux
print 'shape of out_aux :', out_aux.get_shape()
return out_gan, out_aux
## sets of weights and biases for both D and G. these will be used in training
theta_G = [G_W0, G_W1, G_W2, G_W3, G_W4, G_b0, G_b1, G_b2, G_b3, G_b4]
theta_D = [D_W0, D_W1, D_W2, D_W1_gan, D_W1_aux, D_b0, D_b1, D_b2, D_b1_gan, D_b1_aux]
def sample_z(m, n):
'''
these is the random sample method into noise normal distribution
:param m: batch size
:param n: size of the noise vector
:return: a set of noise inputs for G
'''
return np.random.uniform(-1., 1., size=[m, n])
def cross_entropy(logit, xy):
'''
:param logit: output from D_gan
:param xy: set of labels for corresponding x inputs
:return: softmax loss
'''
return -tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=xy))
G_take = generator(z, y) # g iteration to get generated images
G_sample = G_take
print 'shape of generated images ', G_sample.get_shape()
D_real, C_real = discriminator(X) # d iteration over real images d_real is the gan output, c_real is the classification output
D_fake, C_fake = discriminator(G_sample) # d iteration over generated images
# GAN D loss
D_loss = tf.reduce_mean(tf.log(D_real + eps) + tf.log(1. - D_fake + eps))
# the network switcher is used to determine whether to add label loss or not
DC_loss = tf.cond(condition > 0, lambda: -(D_loss +(cross_entropy(C_real, y) + cross_entropy(C_fake, fake_y))), lambda: -D_loss)
# GAN's G loss
G_loss = tf.reduce_mean(tf.log(D_fake + eps))
# network switcher is used to determine whether to add label loss or not
GC_loss = tf.cond(condition > 0, lambda: -(G_loss +(cross_entropy(C_real, y) + cross_entropy(C_fake, y))), lambda:-G_loss)
# Classification accuracy only if interested in labels classification
correct_prediction = tf.equal(tf.argmax(C_real, 1), tf.argmax(y,1))
accuracy= tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
## defining backprop through D
D_solver = (tf.train.AdamOptimizer(learning_rate=D_lr)
.minimize(DC_loss, var_list=theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate=G_lr)
.minimize(GC_loss, var_list=theta_G))
#setting output directory to collect samples
if not os.path.exists(local_dir):
os.makedirs(local_dir)
#training initiated
sess = tf.Session()
sess.run(tf.global_variables_initializer())
i = 0 #simple count for steps and images
training_labels = np.array(ytrain)
training_data = np.array(xtrain)
for it in range(100000): #make your choice in iteration steps. 100k may not be ideal
## creating my own random batching from the number of parameters and batch size
ind = np.random.choice(training_data.shape[0], mb_size)
X_mb = np.array(training_data[ind])
y_mb = np.array(training_labels[ind])#sample_z(mb_size,y_dim)#
z_mb = sample_z(mb_size, z_dim)
fake_mb = generate_fake(y_mb, numclass) # generating fake labels from real once
#trainining step over all samples
_, DC_loss_curr, acc = sess.run([D_solver, DC_loss, accuracy], feed_dict={X: X_mb, y: y_mb, z: z_mb, fake_y:fake_mb, condition:1})
_, GC_loss_curr = sess.run([G_solver, GC_loss], feed_dict={X: X_mb, y: y_mb, z: z_mb, fake_y:fake_mb, condition:1})
# extra step for G. this has shown to improve performance
ind = np.random.choice(training_data.shape[0], mb_size)
X_mb = np.array(training_data[ind])
y_mb = np.array(training_labels[ind])
z_mb = sample_z(mb_size, z_dim)
fake_mb = generate_fake(y_mb,numclass)
_, GC_loss_curr = sess.run([G_solver, GC_loss], feed_dict={X: X_mb, y: y_mb, z: z_mb, fake_y:fake_mb, condition:1})
if it % 1000 == 0:
## some extra training steps on minority classes
for k in range(10):
ind = np.random.choice(min_data.shape[0], mb_size)
X_mb = np.array(min_data[ind])
y_mb = np.array(min_label[ind])
z_mb = sample_z(mb_size, z_dim)
fake_mb = generate_fake(y_mb, numclass)
_, DC_loss_curr, acc = sess.run([D_solver, DC_loss, accuracy], feed_dict={X: X_mb, y: y_mb, z: z_mb, fake_y:fake_mb, condition:1})
_, GC_loss_curr = sess.run([G_solver, GC_loss], feed_dict={X: X_mb, y: y_mb, z: z_mb, fake_y:fake_mb, condition:1})
ind = np.random.choice(min_data.shape[0], mb_size)
X_mb = np.array(min_data[ind])
y_mb = np.array(min_label[ind])
z_mb = sample_z(mb_size, z_dim)
fake_mb = generate_fake(y_mb,numclass)
_, GC_loss_curr = sess.run([G_solver, GC_loss], feed_dict={X: X_mb, y: y_mb, z: z_mb, fake_y:fake_mb, condition:1})
## generate, save and check samples in the save directory
samples = []
for index in minority_symbols:
s_level = np.zeros([mb_size, y_dim])
s_level[range(mb_size), index] = 1
samples.extend(sess.run(G_sample, feed_dict={z: sample_z(mb_size, z_dim), y: s_level , fake_y:generate_fake(s_level,numclass), condition:1})[:10])
print('Iter: {}; DC_loss: {:0.4}; GC_loss: {:0.4}; accuracy: {:0.4}; '.format(it,DC_loss_curr, GC_loss_curr,acc))
fig = plot10(samples[:100])
plt.savefig(local_dir+'{}.png'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
#####save trained samples##############
## this is a post trainin step to generate more symbols for classification
gen_x = []
gen_y = []
for index in minority_symbols:
for w in range(20):
s_level = np.zeros([mb_size, y_dim])
s_level[range(mb_size), index] = 1
gen_y.extend(s_level)
gen_x.extend(sess.run(G_sample, feed_dict={z: sample_z(mb_size, z_dim), y: s_level , fake_y:generate_fake(s_level,numclass), condition:1})[:50])
samples = np.array(gen_x)
np.savez(local_dir + 'generated_samples.npz', samples)
labels = np.array(gen_y)
np.savez(local_dir+'generated_labels.npz',labels) | true |
13cd62e39489650d94a687cdd34cd8607521180d | Python | wonjun0901/WJ_Develop_Individually | /Lecture_note_ML/2_scikit-learn/1_knn/KNeighborsRegressor_01.py | UTF-8 | 3,096 | 3.203125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
# 최근접 이웃 알고리즘을 활용한 회귀분석
# 학습 데이터 X
# - 키와 성별에 대한 정보
X_train = np.array([
[158, 1],
[170, 1],
[183, 1],
[191, 1],
[155, 0],
[163, 0],
[180, 0],
[158, 0],
[170, 0]])
# 학습 데이터 y
# - 몸무게 정보
y_train = np.array([64, 86, 84, 81, 49, 59,
67, 54, 67])
# 최근접 이웃 알고리즘을 사용하여 수치 값을 예측할 수 있는
# KNeighborsRegressor 클래스
from sklearn.neighbors import KNeighborsRegressor
# 탐색할 인접 데이터의 개수
# - 회귀 분석의 경우 홀수의 값을 지정할 필요는 없음
# - 평균 수치를 반환하기 때문에...
K = 3
# KNeighborsRegressor 클래스의 객체(모델)를 생성
# (이웃의 개수(n_neighbors)에 따라 모델의 성능이 변화됨)
model = KNeighborsRegressor(
n_neighbors=K).fit(X_train, y_train)
# 학습 결과 테스트
# - score 메소드 사용
# - 회귀분석 모델의 score 메소드는 R2(결정계수) 값을 반환
# R2(결정계수) 계산 공식
# 1 - (실제 정답과 모델이 예측한 값의 차이의 제곱 값 합계) /
# (실제 정답과 정답의 평균 값 차이의 제곱값 합계)
# 분석 결과의 이해
# 일반적으로 R2(결정계수)는 -1 ~ 1 사이의 값을 반환
# 1에 가까울수록 좋은 예측을 보이는 모델이라는 의미
# 0에 가까울수록 정답 데이터의 평균치 정도를
# 예측하는 모델이라는 의미
# -1에 가까울수록 평균 수치조차 예측하지 못하는 모델
print('학습 결과 : ', model.score(X_train, y_train))
# R2(결정계수) 스코어를 반환할 수 있는 r2_score 함수
from sklearn.metrics import r2_score
predicted = model.predict(X_train)
print('예측 값 : ', predicted)
print('결정계수(R2) : ', r2_score(y_train, predicted))
# 회귀모델에서 사용하는 평가함수
# - 평균절대오차
# - 실제 정답과 예측값의 차에 대해서 절대값을 취한 후
# 평균값을 반환
# - (실제정답-예측값)의 절대값의 합계를 평균
# - mean_absolute_error 함수를 사용
# - mean_absolute_error(실제정답, 예측값)
# - 실제 정답에 대한 오차의 수치 값을 확인할 수 있음
from sklearn.metrics import mean_absolute_error
# 회귀모델에서 사용하는 평가함수
# - 평균제곱오차
# - 실제 정답과 예측값의 차에 대해서 제곱한 후
# 평균값을 반환
# - (실제정답-예측값)의 제곱의 합계를 평균
# - mean_squared_error 함수를 사용
# - mean_squared_error(실제정답, 예측값)
# - 일반적으로 딥러닝의 학습을 위한 오차함수로 사용
from sklearn.metrics import mean_squared_error
predicted = model.predict(X_train)
print('예측 값 : ', predicted)
print('평균절대오차(MAE) : ',
mean_absolute_error(y_train, predicted))
print('평균제곱오차(MSE) : ',
mean_squared_error(y_train, predicted))
| true |
0ab40eae06f1e58eb58a85be90440e9851d833f1 | Python | mauriciomd/hackerrank-solutions | /hash-tables-ice-cream-parlor.py | UTF-8 | 713 | 2.890625 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the whatFlavors function below.
def whatFlavors(cost, money):
diff_cost_table = {}
cost_arr_len = len(cost)
for i in range(1, cost_arr_len + 1):
diff = money - cost[i-1]
if diff not in diff_cost_table:
diff_cost_table[diff] = i
if cost[i-1] in diff_cost_table and diff_cost_table[cost[i-1]] != i:
print(f'{diff_cost_table[cost[i-1]]} {i}')
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
money = int(input())
n = int(input())
cost = list(map(int, input().rstrip().split()))
whatFlavors(cost, money)
| true |
d14562f5b7d54a833a8be8cc0f02604dc80d783c | Python | nikmalviya/Python | /Practical 1/current_datetime.py | UTF-8 | 96 | 3.171875 | 3 | [] | no_license | import datetime as dt
today = dt.datetime.today()
print('Current Date and Time : ')
print(today) | true |
76ab83da213dcde1fce71a4ecb95aafae07f9577 | Python | robertstephen/PyWeb-06 | /simple-mvc/model.py | UTF-8 | 310 | 3.359375 | 3 | [] | no_license |
class Widget(object):
"""
The Widget class shall present the class method:
get_widget(name)
which retrieves the widget of the given name.
Each widget instance shall present the methods:
get_value()
set_value(value)
which are self-explanatory.
"""
pass
| true |
962ec6ae70955f0d5dcfbfa33bf5fdd2217cf062 | Python | avanadia/hackathon_repo | /transformations/moduleParse.py | UTF-8 | 619 | 2.984375 | 3 | [] | no_license | import xml.etree.ElementTree as ET
from transformations.htmlParser import HtmlParse
#For all HTML in node, creates cummulative list of modules that appear in HTML.
def moduleParse(node):
root = node
#array of modules to return from the body text html
modules = []
for child in root:
name = child.tag
#find the body text
if name == 'bodyText':
#fetch the raw html string
rawHTMLStr = child.text
#do something with the raw HTML str
htmlParse = HtmlParse(rawHTMLStr)
modules = htmlParse.getModules()
return modules | true |
82524756448c14a9da784ca6f88425f702806597 | Python | vpegasus/dnc | /memory.py | UTF-8 | 9,113 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Prince
@file: memory.py
@time: 2018-1-30 01: 42
@license: Apache License
@contact: pegasus.wenjia@foxmail.com
"""
import tensorflow as tf
class Memory:
def __init__(self, num_memory, word_size, num_read_heads, batch_size):
"""
memory and its operation
:param num_memory: N
:param word_size: W
:param num_read_heads:R
:param batch_size: b
"""
self.batch_size = batch_size
self.num_memory = num_memory
self.num_read_heads = num_read_heads
self.word_size = word_size
self.initial_paras()
def initial_paras(self):
self.memory = tf.fill([self.batch_size, self.num_memory, self.word_size], value=1e-8,
name='memory') # b x N x W
self.usage = tf.zeros([self.batch_size, 1, self.num_memory], name='memory_usage')
self.precedence_weight = tf.zeros(shape=[self.batch_size, 1, self.num_memory], dtype=tf.float32,
name='precedence_weights')
self.write_weights = tf.fill([self.batch_size, 1, self.num_memory], value=1e-8, name='write_weight')
self.linkage_matrix = tf.zeros(shape=[self.batch_size, self.num_memory, self.num_memory], dtype=tf.float32,
name='linkage_matrix') # b x N x N
self.read_weights = tf.fill([self.batch_size, self.num_read_heads, self.num_memory], value=1e-8,
name='read_weights') # b x R x N
self.read_heads = tf.fill([self.batch_size, self.num_read_heads, self.word_size], value=1e-8,
name='read_heads') # b x R x W
def content_address(self, keys, strengths):
"""
apply content based addressing.
:param keys: [batch_size,num_of_keys, word_size] #where num_of_keys equals num_of_reads for read,
or 1 for write.
:param strengths:[batch_size, num_of_keys,1]
:return: [batch_size, num_of_keys,num_of_memory]
"""
normalized_memory = tf.nn.l2_normalize(self.memory, 2) # b x N x W
normalized_keys = tf.nn.l2_normalize(keys, 2) # b x r/w x W
memory_trans = tf.transpose(normalized_memory, perm=[0, 2, 1])
similarity = tf.matmul(normalized_keys, memory_trans) # b x r/w x N
return tf.nn.softmax(similarity * strengths, 2) # b x r/w x N
def retention_vector(self, free_gates):
"""
get retention vector
:param free_gates: [batch_size,num_read_heads,1]
:return:
"""
return tf.reduce_prod(1 - self.read_weights * free_gates, axis=1, keepdims=True) # b x 1 x N
def usage_vector(self, retention_vector):
"""
get usage vector
:param retention_vector: b x 1 x N
:return:
"""
self.usage = (self.usage + self.write_weights - self.usage * self.write_weights) * retention_vector
return self.usage
def usage_sort(self, usage):
"""
sort usage
:param usage: b x 1 x N
:return: sorted_usage, and their original order indices.
"""
top_k_values, top_k_indices = tf.nn.top_k(-1 * usage, k=self.num_memory)
return -1 * top_k_values, top_k_indices
def allocation_address(self, sorted_usage, top_k_indices):
"""
get allocation weights
:param sorted_usage:[batch_size,1,num_memory]
:param top_k_indices: [batch_size,1,num_meory]
:return:[batch_size,1,num_memory]
"""
usage_cumprod = tf.cumprod(sorted_usage, axis=2, exclusive=True)
unordered_allocation_weights = (1 - sorted_usage) * usage_cumprod
# Trick! the following use a trick to order the allocation weights: as the allocation weights are all
# no bigger than 1, so its effect could be ignored when sorting with the data(which granularity is 1 or
# bigger) (the data,I use here is just the top_k_indices x 2(in case of the extreme phenomena occur,
# i.e allocation_weight =1)
map_sort = unordered_allocation_weights + tf.cast(top_k_indices, tf.float32) * 2.
allocation, _ = tf.nn.top_k(-1 * map_sort, k=self.num_memory)
idx = tf.range(0, self.num_memory, dtype=tf.float32) * 2.
allocation += idx
return -1 * allocation
def _get_write_weights(self, write_gate, allocation_gate, allocation_address, content_address):
"""
:param write_gate: b x 1 x 1
:param allocation_gate:
:param allocation_address: b x 1 x N
:param content_address: b x 1 x N
:return:
"""
self.write_weights = write_gate * (
allocation_gate * allocation_address + (1 - allocation_gate) * content_address)
def write_to_memory(self, write_vector, erase_vector):
"""
:param write_vector:[batch_size,1, word_size]
:param erase_vector:[batch_size,1, word_size]
:return: [b x N x W]
"""
weight_write = tf.transpose(self.write_weights, perm=[0, 2, 1])
self.memory = self.memory * (1 - tf.matmul(weight_write, erase_vector)) + tf.matmul(weight_write, write_vector)
def write(self, write_key, write_strength, free_gates, write_gate, allocation_gate, erase_vector, write_vector):
"""
:param write_key:
:param write_strength:
:param free_gates:
:param write_gate:
:param allocation_gate:
:param erase_vector:
:param write_vector:
:return:
"""
content_write = self.content_address(write_key, write_strength)
retention = self.retention_vector(free_gates)
usage = self.usage_vector(retention)
sorted_usage, top_k_indices = self.usage_sort(usage)
allocation = self.allocation_address(sorted_usage, top_k_indices)
self._get_write_weights(write_gate, allocation_gate, allocation, content_write)
self.write_to_memory(write_vector, erase_vector)
def precedence_update(self):
"""
:return: [b x 1 x N
"""
self.precedence_weight = (1 - tf.reduce_sum(self.write_weights, axis=2,
keepdims=True)) * self.precedence_weight + self.write_weights
def linkage_matrix_update(self):
"""
:return: b x N x N
"""
reset_factor = self._linkage_reset_factor()
p_weight = tf.transpose(self.precedence_weight, perm=[0, 2, 1])
linkage_matrix = reset_factor * self.linkage_matrix + tf.matmul(p_weight, self.write_weights)
I = tf.eye(num_rows=self.num_memory, batch_shape=[self.batch_size])
self.linkage_matrix = linkage_matrix * (1 - I)
def _linkage_reset_factor(self):
"""
:return:
"""
reshape_weight2 = tf.transpose(self.write_weights, [0, 2, 1]) # b x N x 1
ones1 = tf.ones(shape=[self.batch_size, 1, self.num_memory]) # b x 1 x N
ones2 = tf.ones(shape=[self.batch_size, self.num_memory, 1])
reset_factor = 1 - tf.matmul(reshape_weight2, ones1) - tf.matmul(ones2, self.write_weights)
return reset_factor # b x N x N
def temporal_address(self):
"""
:return: [batch_size, num_read_heads,num_memory]
"""
forward_address = tf.matmul(self.read_weights, self.linkage_matrix) # b x R x N * b x N x N
transpose_link = tf.transpose(self.linkage_matrix, perm=[0, 2, 1])
backward_address = tf.matmul(self.read_weights, transpose_link) # b x R x N * b x N x N
return forward_address, backward_address # b x R x N
def _get_read_weights(self, forward_address, backward_address, content_address, mode_weights):
"""
:param forward_address: b x R x N
:param backward_address: b x R x N
:param content_address:
:param mode_weights:
:return: read weights: b x R x N
"""
mode1 = tf.slice(mode_weights, [0, 0, 0], [-1, -1, 1])
mode2 = tf.slice(mode_weights, [0, 0, 1], [-1, -1, 1])
mode3 = tf.slice(mode_weights, [0, 0, 2], [-1, -1, 1])
self.read_weights = mode1 * forward_address + mode2 * backward_address + mode3 * content_address
def read_memory(self):
"""
generate read heads
:return:
"""
self.read_heads = tf.matmul(self.read_weights, self.memory) # b x R x N * b x N x W
return self.read_heads # b x R x W
def read(self, read_keys, read_strengths, read_modes):
"""
:param read_keys:
:param read_strengths:
:param read_modes:
:return:
"""
content_read = self.content_address(read_keys, read_strengths)
self.linkage_matrix_update()
forward_address, backward_address = self.temporal_address()
self._get_read_weights(forward_address, backward_address, content_read, read_modes)
self.read_memory()
self.precedence_update()
return self.read_heads
if __name__ == '__main__':
pass
| true |
69186bbc724c33bafdd4b08d19bca356797dd120 | Python | vybhav72954/Amazing-Python-Scripts | /ZIP-Function/transpose.py | UTF-8 | 947 | 4.6875 | 5 | [
"MIT"
] | permissive | #how to get transpose of a matrix.
# A normal coder may require some loops to get transpose but using ZIP function we can have it as one liner.
# Know the Basic Usage of the Zip Function
#The zip function aggregates items from different iterables, such as lists, tuples or sets, and returns an iterator.It works just like a physical zip.
#In fact, the zip function in Python is much powerful than a physical zipper. It can deal with any number of iterables at once rather than just two.
#Unfortunately, Python doesn’t have an unzip function. However, if we are familiar with the tricks of asterisks, unzipping is a very simple task.
#In the above example, the asterisk did the unpacking operation, which is unpacking all the four tuples from the record list.
#main code
matrix = [[1, 2, 3], [1, 2, 3]]#the inputted matrix
matrix_T = [list(i) for i in zip(*matrix)]#one liner code for taking transpose of matrix
print(matrix_T)#print to validate | true |
6896682b041713dff84ab145274b7858f3f3c7bc | Python | seo0/MLstudy | /adsa_2016/module13_graphs01/part02_basics02/graphs_etc.py | UTF-8 | 1,760 | 3.84375 | 4 | [] | no_license |
import networkx as nx
import matplotlib.pyplot as plt
from module13_graphs01.part01_basics.graphs_basics import print_graph, display_and_save
def modulo_digraph(N,M):
"""
This function returns a directed graph DiGraph) with the following properties (see also cheatsheet at: http://screencast.com/t/oF8Nr1TdYDbl):
- The graph has N nodes, labelled using numbers from 1 to N-1
- All nodes in the same M-modulo class are on the same path (the path from the node with lower value to highest value)
- All nodes that are multiple of M-1 (or, in other words, for which node % (M-1) == 0) are on the same path (that gos from lower to higher values)
Hint:
- Initialise the DiGraph, for each node you can store two properties (value of % M, value of % (M-1))
- Scan the created graph to create paths based on similar values of the two properties
- Create edges in the graph using the values of the lists of nodes that you created at the previous step
More about DiGraphs at: https://networkx.github.io/documentation/development/reference/classes.digraph.html
"""
pass
def longest_shortest_path(G):
"""
Given a graph G, this functions prints on screen the longest among the shortest paths between two nodes in G.
note that you can check whether a path exists between two nodes using nx.has_path(G, node1, node2)
If there are more than 1 longest shortest path, then it doesn't matter which one is chosen
"""
pass
if __name__ == '__main__':
G = modulo_digraph(6,3)
print_graph(G)
longest_shortest_path(G)
G = modulo_digraph(7, 2)
print_graph(G)
G = modulo_digraph(10, 5)
print_graph(G)
longest_shortest_path(G) | true |
ca4986842e4243896a7b8e5cf8942465d8949874 | Python | Toruitas/MSc-Coding-2 | /w4/scrape_corona.py | UTF-8 | 7,389 | 3.25 | 3 | [] | no_license | from gensim.summarization import summarize
import pandas
import numpy as np
from bs4 import BeautifulSoup
import requests
import datetime
def get_link_response(url: str) -> requests.Response:
"""
get_link_response gets a URL and scrapes it, returning the object.
"""
return requests.get(url)
def choose_next_link(next_link_candidates: list) -> list:
"""
choose_next_link
Given a list of URLs strings from the website scraped, it chooses which link to go to next.
We're using only the MOST TRUSTWORTHY not Fake News papers.
Clearly we'll get non-sensational and useful content.
"untrusted sources"
"""
print()
url_keywords = ["breitbart", "foxnews", "thehill", "dailymail", "drudgereport", "hannity", "onion", "chinadaily",
"rt.com", "redstate", "freebeacon","washingtonexaminer","politico","xinhua","wsj"]
next_links = []
for link in next_link_candidates:
for rightist_url_keyword in url_keywords:
if rightist_url_keyword in link:
next_links.append(link)
return next_links
def parse_page(webpage: requests.Response, dict_list_to_write: list) -> tuple:
"""
This function breaks apart an HTML document and returns the next links.
It also summarizes and prints
:param webpage: Response object for parsing
:return: summary for inclusion in data row, next_link_candidates_cleaned for the next round of scraping
"""
soup = BeautifulSoup(webpage.text, 'html.parser')
next_link_candidates = set([(a.get('href'), a.get_text()) for a in soup.find_all('a')]) # make sure they're unique here
print(next_link_candidates)
next_link_candidates_cleaned = []
if len(next_link_candidates) > 0:
for link in next_link_candidates:
print(link[0])
print(link[1])
print(("corona" in link[1] or "Corona" in link[1] or "COVID-19" in link[1] or "Virus" in link[1] or "virus" in link[1]))
if link: # a.get('href') will sometimes return None if it contains a tag within. Ignore that.
if link[0] != webpage.url and "mailto:" not in link[0] and webpage.url not in dict_list_to_write and \
("corona" in link[1] or "Corona" in link[1] or "COVID-19" in link[1] or "Virus" in link[1] or "virus" in link[1]):
if link[0] == "/":
next_link_candidates_cleaned.append(webpage.url + link[0])
else:
next_link_candidates_cleaned.append(link[0])
paragraphs = soup.find_all('p')
paragraphs = [p.get_text() for p in paragraphs]
if len(paragraphs) > 0:
# Todo: This needs somewhat complicated parsing... Elements that are embedded in <p> tags etc.
# Todo: Recursion?
paragraphs_joined = " ".join(paragraphs)
try:
summary = summarize(paragraphs_joined, word_count=140) # make a twitter summary!
print(f"{webpage.url} summarizes down to {summary}")
except:
print(f"Unable to summarize: {paragraphs_joined}. String too short.")
summary = ""
else:
summary = ""
return summary, next_link_candidates_cleaned
def scrape_page(links: list, dict_list_to_write:list) -> tuple:
"""
Scrape_page coordinates the collection of pages from their links.
Summarizes them.
And stores them into a Pandas Dataframe which will be saved later.
:param dataframe: Dataframe to insert data into
:param links: Page links to process this round.
:return: next_links: links to process in the next round
"""
# First we use Requests to get the Response objects for every page in the links list
responses = [get_link_response(link) for link in links]
# Make sure it's not already in our dict
scraped_data_dict_list = []
filtered_candidates = []
# Then we iterate through them
for response in responses:
# create the summary and get all possible link candidates
summary, next_link_candidates = parse_page(response, dict_list_to_write)
# Then we choose the next links from the candidates
related_links = choose_next_link(next_link_candidates)
filtered_candidates.extend(related_links)
# Then we have what we need. We add it to the dict we'll use to write the dataframe
data_dict = {
"url": response.url,
"summary": summary,
"related_links": related_links
}
scraped_data_dict_list.append(data_dict)
return scraped_data_dict_list, filtered_candidates
if __name__ == "__main__":
# TODO: argparse subject to scrape
starting_time = datetime.datetime.now()
print(f"Scraping the news for corona. Limes! Starting at {starting_time}.")
next_links = ["https://drudgereport.com"]
depth = 5 # specify how many layers links we're going to follow.
current_depth = 0
save_csv_path = "corona.csv"
total_scraped_pages = 0
# Here we try to load a CSV file, if we're saving additional data to it.
# If it's not found, we just create a blank one to use.
try:
# converter since the list is loaded as a str by default from CSV.
# See:https://stackoverflow.com/questions/23111990/pandas-dataframe-stored-list-as-string-how-to-convert-back-to-list
# https://stackoverflow.com/questions/36519086/how-to-get-rid-of-unnamed-0-column-in-a-pandas-dataframe
beginning_df = pandas.read_csv(save_csv_path, converters={'related_links': eval}, index_col=0)
new_file = False
except FileNotFoundError as e:
print("File not found, we'll make one later")
new_file = True
# Prepare a new empty list of dicts to write into the pandas doc at the end.
dict_list_to_write = []
# Run the scraping methods until we reach the desired number of iterations
while current_depth < depth:
print(f"Scraping at depth {current_depth}. Total scraping: {len(next_links)} links")
current_depth += 1
current_amt_links = len(next_links)
# Get the list of data dicts and links for this depth. Add them to the list of dicts to convert to a table.
# Each dict object gets converted into a row in the table
data_dict_list, next_links = scrape_page(next_links,dict_list_to_write)
dict_list_to_write.extend(data_dict_list)
next_links = set(next_links) # unique URLs only. set() removes duplicates.
# Now that the scraped round is complete (prior 2 methods are quite time-consuming), increment the completed count.
total_scraped_pages += current_amt_links
print(f"Scraping depth {current_depth} complete. Next up: {len(next_links)} links")
# turn list of dicts into a dataframe. Each dict is a row.
df_additions = pandas.DataFrame(dict_list_to_write, columns=["url", "summary", "related_links"])
# Finally, (re)save the CSV
if not new_file:
df_to_save = pandas.concat([beginning_df, df_additions])
df_to_save.to_csv(save_csv_path, sep=",")
else:
df_additions.to_csv(save_csv_path, sep=",")
# Print out to console just how much time has elapsed
end_time = datetime.datetime.now()
duration = (end_time-starting_time).seconds
print(f"Scraped a total of {total_scraped_pages} pages in {duration}")
exit(code=1) | true |
9698df740aed7bc1f553cb8d0da1bfb29e268343 | Python | Apocilyptica/Python_notes_school | /Functions/argument_unpacking.py | UTF-8 | 425 | 4.125 | 4 | [] | no_license | # Default unpacking is *args, it get returned as a Tuple
def greeting(*args):
print('Hi ' + ' '.join(args))
print(args)
greeting('Tiffany', 'Hudgens')
greeting('Kristine', 'M', 'Hudgens')
def greeting(time_of_day, *args):
print(f"Hi {'-'.join(args)}, I hope that you are having a good {time_of_day}")
print(args)
greeting('Morning', 'Tiffany', 'Hudgens')
greeting('Afternoon', 'Kristine', 'M', 'Hudgens') | true |
1540108abfa364caa94b20c920b19f1138cbbfbf | Python | shcgay/song-chun | /5day/6-new方法.py | UTF-8 | 210 | 2.9375 | 3 | [] | no_license | class A(object):
def __init__(self):
print(self)
print("这是init方法")
def __new__(cls):
print(cls)
print("这是new方法")
ret = object.__new__(cls)
return ret
a = A()
print(a)
print(id(A))
| true |
9b29f2cd6706ec692718de17cfc32d1bc62802e7 | Python | anotherjoshsmith/nonstick | /nonstick/pamm.py | UTF-8 | 5,913 | 2.8125 | 3 | [
"MIT"
] | permissive | import os.path as op
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from scipy.spatial import distance_matrix
from scipy.stats import multivariate_normal
def main():
np.random.seed(7)
data_dir = op.join(op.dirname(__file__), "../examples")
data_file = op.join(data_dir, "example_data.csv")
# read example data
data = pd.read_csv(data_file, index_col=0)
X = data.values[:, :3] # include class as third dimension
y = data.values[:, 2]
# scale data
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y)
# get sample grid for KDE with farthest point algorithm
N = X.shape[0]
M = np.sqrt(N).round()
Y = farthest_point_grid(X_train, M)
P = density_estimation(X_train, Y)
clust = quick_shift(Y, P)
print("number of clusters: ", len(np.unique(clust)))
plt.scatter(X_train[:, 0], X_train[:, 1], c="k", alpha=0.3, s=10)
plt.scatter(Y[:, 0], Y[:, 1], c=clust, s=(P / 10))
plt.show()
# predict with gmm
gmm = build_gmm(Y, P, clust)
best = gmm.predict(X_test)
plt.scatter(X_train[:, 0], X_train[:, 1], c="k", alpha=0.3, s=10)
plt.scatter(X_test[:, 0], X_test[:, 1], c=best, s=20)
plt.show()
def calc_distances(p0, points):
return ((p0 - points) ** 2).sum(axis=1)
def farthest_point_grid(x, m):
farthest_pts = np.zeros((int(m), x.shape[1]))
# select random point as first grid point
farthest_pts[0] = x[np.random.randint(len(x))]
distances = calc_distances(farthest_pts[0], x)
# iteratively select farthest remaining point
for i in range(1, int(m)):
farthest_pts[i] = x[np.argmax(distances)]
distances = np.minimum(distances, calc_distances(farthest_pts[i], x))
return farthest_pts
def density_estimation(x, y):
D = x.shape[1]
y_dists = distance_matrix(y, y)
y_dists[y_dists == 0] = 1000 # large value to prevent self selection
delta_i = np.amin(y_dists, axis=1)
y_x_dists = distance_matrix(y, x)
min_dists = np.argmin(y_x_dists, axis=0)
sigma_j = np.array([delta_i[idx] for idx in min_dists])
prefactor = np.power(2 * np.pi * np.power(sigma_j, 2.0), (-D / 2.0))
gaussians = prefactor * (
np.exp(-np.power(y_x_dists, 2.0) / (2 * np.power(sigma_j, 2.0)))
)
pdf = np.sum(gaussians, axis=1)
return pdf
def quick_shift(y, P, scaling_factor=2.0):
# get y distances
y_dists = distance_matrix(y, y)
y_dists[y_dists == 0] = 1000
lamb = y_dists.min(axis=0).mean() * scaling_factor
# create cluster id array to assign clusters
clusters = np.zeros_like(P, dtype="int")
def connect_to_neighbor(i):
# find points with greater probability
mask = np.where(P > P[i])[0]
# return if we hit the highest probability point
if len(mask) == 0:
return i
# get the id of the closest higher probability point
min_dist_id = np.argmin(y_dists[mask, i])
j = mask[min_dist_id]
if y_dists[i, j] > lamb:
return i
return connect_to_neighbor(j)
# for each y, climb to highest prob within lambda
for idx in range(0, len(P)):
clusters[idx] = connect_to_neighbor(idx)
# combine single-member clusters w/ nearest neighbor
for idx, clust_id in enumerate(np.unique(clusters)):
members = np.where(clusters == clust_id)[0]
if len(members) == 1:
yo = np.argmin(y_dists[members[0]])
clusters[members[0]] = clusters[yo]
return clusters
class GaussianMixtureModel:
def __init__(self, p, z, sigma):
self.p = p
self.z = z
self.sigma = sigma
def predict(self, X):
"""
Predict the labels for the data samples in X using trained model.
Parameters:
-----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a single data point.
Returns:
--------
labels : array, shape (n_samples,)
Component labels.
"""
probs = self.predict_proba(X)
return np.argmax(probs, axis=1)
def predict_proba(self, X):
"""
Predict posterior probability of each component given the data.
Parameters:
-----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a single
data point.
Returns:
--------
resp : array, shape (n_samples, n_components)
Returns the probability each Gaussian (state) in the model given each sample.
"""
probs = np.zeros([X.shape[0], len(self.p)])
for idx, weight in enumerate(self.p):
gaussian_prob = multivariate_normal(
self.z[idx], self.sigma[idx], allow_singular=True
)
probs[:, idx] = weight * gaussian_prob.pdf(X)
return probs
def score(self, X):
pass
def build_gmm(y, P, clusters):
total_P = P.sum()
# get position of each cluster center
z_k = y[np.unique(clusters)].copy()
# get pk for cluster
p_k = np.array(
[
P[np.where(clusters == clust_id)].sum() / P.sum()
for clust_id in np.unique(clusters)
]
)
# get sigma for each cluster
sigma_k = np.zeros(shape=(len(z_k), y.shape[1], y.shape[1]))
for idx, clust_id in enumerate(np.unique(clusters)):
members = np.where(clusters == clust_id)
distances = y[members] - z_k[idx]
sigma_k[idx] = np.cov(distances.T, aweights=(P[members] / total_P))
return GaussianMixtureModel(p_k, z_k, sigma_k)
if __name__ == "__main__":
main()
| true |
4a37cd673d92c7c37a8f38f605518cde15a86384 | Python | Luckyaxah/leetcode-python | /赎金信.py | UTF-8 | 619 | 3.015625 | 3 | [] | no_license | class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
if len(ransomNote) > len(magazine):
return False
d = {}
for i in magazine:
if i in d:
d[i] += 1
else:
d[i] = 1
for i in ransomNote:
if not i in d:
return False
else:
d[i] -= 1
if d[i] < 0:
return False
return True
if __name__ == "__main__":
a = Solution()
print(a.canConstruct('aa','aab'))
print(a.canConstruct('aa','ab')) | true |
1f66fa8ba49593a5875e5e2e4fdb16ff1a157bf0 | Python | aixiu/myPythonWork | /OldBoy/冒泡排序-练习.py | UTF-8 | 281 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
data = [10, 4, 33, 21, 54, 3, 8, 11, 5, 22, 2, 1, 17, 13, 6]
for i in range(len(data)-1):
for j in range(len(data)-1-i):
if data[j] > data[j+1]:
data[j], data[j+1] = data[j+1], data[j]
print(data) | true |
69d5762d050e3c9b78f81e45aaf1aa2df3667df2 | Python | fedorzaytsev06/FirstProject | /9.168.py | UTF-8 | 165 | 3.40625 | 3 | [] | no_license | s = input().split() # [sdfsdf, sdfs, dsfs]
print(s)
r=0
for i in range(len(s)):
# if s[0] == "н":
word = s[i]
if word[0] == "n":
r+=1
print(r)
| true |
89e2d82a812d6114f631725656e8272cac8c5132 | Python | MvdB/Project-Euler-solutions | /python/p046.py | UTF-8 | 535 | 3.25 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #
# Solution to Project Euler problem 46
# by Project Nayuki
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
import eulerlib, itertools
def compute():
for n in itertools.count(9, 2):
if not test_goldbach(n):
return str(n)
def test_goldbach(n):
if n % 2 == 0 or eulerlib.is_prime(n):
return True
for i in itertools.count(1):
k = n - 2 * i * i
if k <= 0:
return False
elif eulerlib.is_prime(k):
return True
if __name__ == "__main__":
print(compute())
| true |
3fcba4ea71415ddeed1e5782241c3ed33bbb9385 | Python | LusineKamikyan/Purdue-Deep-Learning-BME595A | /Homework2/test.py | UTF-8 | 836 | 3.609375 | 4 | [] | no_license | from neural_network import NeuralNetwork
import torch
from logic_gates import AND
from logic_gates import OR
from logic_gates import XOR
from logic_gates import NOT
'''
##### PART A #####
model = NeuralNetwork([2,2,5,6,3])
input_tensor= torch.tensor([[1,2,3],[3,4,5]], dtype = torch.double)
output = model.forward(input_tensor)
print(output)
'''
##### PART B #####
print("Results for AND:")
And = AND()
print(And(True, False))
'''print(And(False, True))
print(And(True, True))
print(And(False, False))
print("Results for OR:")
Or = OR()
print(Or(False,True))
print(Or(True,True))
print(Or(False,False))
print(Or(True,False))
print("Results for NOT:")
Not = NOT()
print(Not(False))
print(Not(True))
print("Results for XOR:")
Xor = XOR()
print(Xor(False,True))
print(Xor(False,False))
print(Xor(True,True))
print(Xor(True,False))
''' | true |
132a1ce2d7e65a827ba4bdaf7b50c0158522f345 | Python | KristinaUlicna/CellComp | /Movie_Analysis_Pipeline/Single_Movie_Processing/Server_Movies_Paths.py | UTF-8 | 5,549 | 3.140625 | 3 | [] | no_license | import os
import re
"""
def GetMovieFilesPaths(exp_type = "MDCK_WT_Pure"):
Get the absolute paths of all movies available for analysis.
Folders to iterate: "MDCK_WT_Pure" or "MDCK_Sc_Tet-_Pure"
Args:
exp_type (string, "MDCK_WT_Pure" by default) -> change if needed.
- options: "MDCK_Sc_Tet-_Pure", "MDCK_Sc_Tet+_Pure", "MDCK_90WT_10Sc_NoComp"
Return:
xml_file_list, txt_file_list -> Two lists of absolute paths to all available movies for analysis.
Notes:
Outputs 'cellIDdetails_raw.txt' files, so use txt_file.replace("raw", "sorted" or "filtered") to switch.
# Initialise the lists:
xml_file_list = []
txt_file_list = []
# Specify the directory:
directory = "/Volumes/lowegrp/Data/Kristina/{}/".format(exp_type)
dir_list = [item for item in os.listdir(directory) if item != ".DS_Store" and len(str(item)) == 8] # len('17_07_31') == 8
for folder_date in dir_list:
directory = "/Volumes/lowegrp/Data/Kristina/{}/{}/".format(exp_type, folder_date) # re-initialise the native string
dir_list = [item for item in os.listdir(directory) if item != ".DS_Store" and "pos" in item]
for folder_pos in dir_list:
directory = "/Volumes/lowegrp/Data/Kristina/{}/{}/{}/".format(exp_type, folder_date, folder_pos) # re-initialise the native string
if exp_type == "MDCK_WT_Pure" or (exp_type.startswith("MDCK_Sc_Tet") and exp_type.endswith("_Pure")):
directory_xml = directory + "tracks/tracks_type1.xml"
xml_file_list.append(directory_xml)
directory_txt = directory + "analysis/cellIDdetails_raw.txt"
txt_file_list.append(directory_txt)
if exp_type == "MDCK_90WT_10Sc_NoComp":
for type in [1, 2]:
directory_xml = directory + "tracks/tracks_type{}.xml".format(type)
xml_file_list.append(directory_xml)
for channel in ["GFP", "RFP"]:
directory_txt = directory + "channels/{}/analysis/cellIDdetails_raw.txt".format(channel)
txt_file_list.append(directory_txt)
return xml_file_list, txt_file_list
"""
class GetMovieFilesPaths(object):
def __init__(self, exp_type="MDCK_WT_Pure"):
""" Exp_type Options: ["MDCK_WT_Pure" or "MDCK_Sc_Tet-_Pure" or "MDCK_90WT_10Sc_NoComp"] """
directory = "/Volumes/lowegrp/Data/Kristina/{}".format(exp_type)
dir_list = []
for date in os.listdir(directory):
if re.findall(pattern='[0-9][0-9]_[0-9][0-9]_[0-9][0-9]', string=date):
for pos in os.listdir("{}/{}/".format(directory, date)):
if re.findall(pattern='^pos', string=pos):
dir_list.append("{}/{}/{}/".format(directory, date, pos))
channel_list = [1, 2]
if exp_type == "MDCK_WT_Pure":
channel_list.pop(1)
elif exp_type == "MDCK_Sc_Tet-_Pure":
channel_list.pop(0)
self.dir_list = dir_list
self.channel_list = channel_list
def GetTracksDirs(self):
""" Output example: '/Volumes/lowegrp/Data/Kristina/MDCK_WT_Pure/17_07_31/pos0/tracks/' """
tracks_dir_list = [directory + "tracks/" for directory in self.dir_list]
return tracks_dir_list
def GetChannelXmlFiles(self, nhood=False):
""" Output example: '/Volumes/lowegrp/Data/Kristina/MDCK_WT_Pure/17_07_31/pos0/tracks/tracks_typeX.xml' """
neighbourhood = ""
if nhood is True:
neighbourhood = "_nhood"
xml_file_list = []
for directory in self.dir_list:
for channel in self.channel_list:
xml_file_list.append(directory + "tracks/tracks_type{}{}.xml".format(channel, neighbourhood))
return xml_file_list
def GetChannelTxtFiles(self, file_type="raw"):
""" Output example: '/Volumes/lowegrp/Data/Kristina/MDCK_WT_Pure/17_07_31/pos0/
/analysis/channel_XFP/cellIDdetails_raw.txt'
Possibly, specify 'filtered' file type if needed.
"""
channels = ['GFP' if item == 1 else 'RFP' if item == 2 else None for item in self.channel_list]
txt_file_list = []
for directory in self.dir_list:
for channel in channels:
txt_file_list.append(directory + "analysis/channel_{}/cellIDdetails_{}.txt".format(channel, file_type))
return txt_file_list
def Get_MDCK_Movies_Paths():
"""
:return:
"""
positions_list = []
directory = "/Volumes/lowegrp/Data/Kristina/Cells_MDCK/"
for folder in sorted(os.listdir(directory)):
if folder.startswith("AB") or folder.startswith("GV"):
folder = directory + folder
for pos in sorted(os.listdir(folder)):
if pos.startswith("pos"):
position = folder + "/" + pos + "/"
positions_list.append(position)
return positions_list
def Get_HeLa_Movies_Paths():
"""
:return:
"""
positions_list = []
directory = "/Volumes/lowegrp/Data/Kristina/Cells_HeLa/"
for folder in sorted(os.listdir(directory)):
if folder.startswith("KU"):
folder = directory + folder
for pos in sorted(os.listdir(folder)):
if pos.startswith("Pos"):
position = folder + "/" + pos + "/"
positions_list.append(position)
return positions_list
| true |
48ad5a49aeeadada2e793a72d767cd1e75d9656f | Python | Muhos-wandia/TempCCalc | /Temperature Converter.py | UTF-8 | 449 | 3.796875 | 4 | [] | no_license | temp =input("Input the temperature to be converted:")
degree = int(temp[:-1])
i_convention = temp[-1]
if i_convention.upper() =="C":
result = int(round((9* degree) / 5+32))
o_convention = "Fanhereit"
elif i_convention.upper()== "F":
result = int(round((degree - 32)*5 / 9))
o_convention = "Celsius"
else:
print("Input proper convention .")
quit()
print("The temperature in", o_convention,"is", result,"degrees. ") | true |
1cf10e6d036c2d87e81f80b7a9c35a9d8d65483d | Python | gladiopeace/ADIM_thesis | /Interest Management/Activity-Driven/lstm_adapter.py | UTF-8 | 1,483 | 2.90625 | 3 | [] | no_license | from rnn.config import Config
from rnn.load_data import load_X, load_Y
from rnn.lstm import init_lstm, classify
import numpy as np
class LSTM():
def __init__(self, player_ids):
self.player_ids = player_ids
self.X_samples = load_X(player_ids)
self.Y_samples = load_Y(player_ids)
self.nn_vars = init_lstm(self.X_samples)
self.idxs = [0 for i in range(len(player_ids))]
self.correct = [0 for i in range(len(player_ids))]
self.labelmap = {0:"Low", 1:"Mid", 2:"High"}
def max_rows(self):
mx = 0
for player_samples in self.Y_samples:
if len(player_samples) > mx:
mx = player_samples.size
return mx
def classify(self, player_id):
# if we haven't exhausted this player's samples
if self.idxs[player_id] < len(self.X_samples[player_id]):
prediction = classify(self.nn_vars, self.X_samples[player_id][self.idxs[player_id]])
#print("Correct: {} Actual: {}".format(self.labelmap[prediction], self.labelmap[np.argmax(self.Y_samples[player_id][self.idxs[player_id]])]))
if self.labelmap[prediction] == self.labelmap[np.argmax(self.Y_samples[player_id][self.idxs[player_id]])]:
self.correct[player_id] += 1
self.idxs[player_id] += 1
return self.labelmap[prediction]
else:
return "Finished"
# close the tf session
def close(self):
self.nn_vars[1].close()
def show_accuracy(self):
for player_id in self.player_ids:
print("Accuracy for Player "+str(player_id)+": " + str(self.correct[player_id]/len(self.X_samples[player_id]))) | true |
bc7d064173742916b1fbc95646a1422851e016bf | Python | qybing/LeetCode | /22. 括号生成/generate_parenthesis.py | UTF-8 | 953 | 3.328125 | 3 | [] | no_license | #! python3
# _*_ coding: utf-8 _*_
# @Time : 2020/5/29 16:51
# @Author : Jovan
# @File : generate_parenthesis.py
# @desc :
def parenthess(sublist, reslut, leftnum, rightnum):
if leftnum == 0 and rightnum == 0:
reslut.append(sublist)
if rightnum > leftnum:
parenthess(sublist + ')', reslut, leftnum, rightnum - 1)
if leftnum > 0:
parenthess(sublist + '(', reslut, leftnum - 1, rightnum)
def generateParenthesis_1(n):
leftnum = rightnum = n
reslut = []
parenthess('', reslut, leftnum, rightnum)
for i in reslut:
print(i)
def generateParenthesis(n):
lists = []
def helper(tmp='', open=0, close=0):
if open == n and close == n:
lists.append(tmp)
return
if open < n:
helper(tmp + '(', open + 1, close)
if close < open:
helper(tmp + ')', open, close + 1)
helper()
return lists
generateParenthesis(n=3)
| true |
5ce5528a08ec2fcb96f9d47a65c69555f2b5dbdf | Python | WarrenWeckesser/eyediagram | /demo/mpl_demo.py | UTF-8 | 448 | 2.734375 | 3 | [
"BSD-2-Clause"
] | permissive | # Copyright (c) 2015, Warren Weckesser. All rights reserved.
# This software is licensed according to the "BSD 2-clause" license.
from eyediagram.demo_data import demo_data
from eyediagram.mpl import eyediagram
import matplotlib.pyplot as plt
# Get some data for the demonstration.
num_symbols = 5000
samples_per_symbol = 24
y = demo_data(num_symbols, samples_per_symbol)
eyediagram(y, 2*samples_per_symbol, offset=16, cmap=plt.cm.coolwarm)
plt.show()
| true |
445fea7fd66ba82d218e3e410830428ab2eea35c | Python | johan--/seleniumTests | /tests/sketch/test_sketch.py | UTF-8 | 4,875 | 2.546875 | 3 | [] | no_license | import time
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
import pytest
from codebender_testing.config import TEST_PROJECT_NAME
from codebender_testing.utils import SeleniumTestCase
# How long to wait before we give up on trying to assess the result of commands
VERIFY_TIMEOUT = 10
FLASH_TIMEOUT = 2
# Board to test for the dropdown selector.
TEST_BOARD = "Arduino Fio"
class TestSketch(SeleniumTestCase):
"""Tests various functions of the /sketch view."""
@pytest.fixture(scope="class", autouse=True)
def open_test_project(self, tester_login):
"""Makes sure we are logged in and have a project open before
performing any of these tests."""
self.open_project()
# I get a StaleElementReferenceException without
# this wait. TODO: figure out how to get around this.
time.sleep(3)
def test_verify_code(self):
"""Ensures that we can compile code and see the success message."""
compile_button = self.driver.find_element_by_id("compile")
compile_button.click()
# test progress bar is visible
progress_bar = self.get_element(By.ID, 'progress')
assert progress_bar.is_displayed()
WebDriverWait(self.driver, VERIFY_TIMEOUT).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, "operation_output"), "Verification Successful!")
)
def test_boards_dropdown(self):
"""Tests that the boards dropdown is present, and that we can change
the board successfully."""
boards_dropdown = Select(self.get_element(By.ID, "boards"))
# Click something other than the first option
boards_dropdown.select_by_visible_text(TEST_BOARD)
assert boards_dropdown.first_selected_option.text == TEST_BOARD
def test_ports_dropdown(self):
"""Tests that the ports dropdown exists."""
self.get_element(By.ID, "ports")
def test_run_with_no_port(self):
"""Makes sure that there is an error when we attempt to run with no
port selected."""
flash_button = self.get_element(By.ID, "uploadusb")
flash_button.click()
WebDriverWait(self.driver, FLASH_TIMEOUT).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, "operation_output"), "Please select a valid port or enable the plugin!!"))
def test_speeds_dropdown(self):
"""Tests that the speeds dropdown exists."""
self.get_element(By.ID, "baudrates")
def test_clone_project(self):
"""Tests that clicking the 'Clone Project' link brings us to a new
sketch with the title 'test_project clone'."""
clone_link = self.get_element(By.LINK_TEXT, 'Clone Project')
clone_link.click()
project_name = self.get_element(By.ID, 'editor_heading_project_name')
assert project_name.text.startswith("%s copy" % TEST_PROJECT_NAME)
def test_add_projectfile_direct(self):
""" Tests that new file can be added to project using create-new-file field """
add_button = self.get_element(By.CLASS_NAME, 'icon-plus')
add_button.click()
create_field = self.get_element(By.ID, 'createfield')
create_field.send_keys('test_file.txt')
create_button = self.get_element(By.CLASS_NAME, 'btn')
create_button.click()
self.driver.refresh()
assert 'test_file.txt' in self.driver.page_source
'''
def test_add_projectfile_upload(self):
""" Tests that new file can be added to project using upload dialog """
add_button = self.get_element(By.CLASS_NAME, 'icon-plus')
add_button.click()
drop_zone = self.get_element(By.CLASS_NAME, 'dz-clickable')
drop_zone.click()
self.driver.get("http://localhost/js/dropzone/min.js")
self.driver.execute_script("self.get_element(By.NAME,'uploadType').value = '/test.h'")
#file_input_element = self.get_element(By.NAME, 'uploadType')'''
def test_delete_file(self):
"""Tests file delete modal """
delete_file_button = self.get_element(By.CLASS_NAME, 'icon-remove')
delete_file_button.click()
delete_modal = self.get_element(By.ID, 'filedeleteModal')
assert delete_modal.is_displayed()
def test_verify_deletion(self):
""" Verifies that file has been deleted """
confirm_delete_button = self.get_element(By.ID, 'filedeleteButton')
confirm_delete_button.click()
self.driver.refresh()
assert 'test_file.txt' not in self.driver.page_source
| true |