blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7a5db1942a33fd0923d49fa8eb9f3c38f927e76b | Python | lockbro/Python-Crash-Course | /6-2.py | UTF-8 | 291 | 4.3125 | 4 | [] | no_license | """6-2
用一个字典存5个人的名字以及他们喜欢的数字,最后打印成一句话。
"""
favorite_number = {
"Tom": 5,
"Jay": 34,
"Mike": 13,
"Halen": 54,
"Paul": 40,
}
for person, number in favorite_number.items():
print(person + "'s favorite color is " + str(number))
| true |
fbadff420eaca955ac3bc8f078d2a5ed14058734 | Python | mariaserena/EasyPark-Polito | /central_server/db.py | UTF-8 | 4,748 | 2.9375 | 3 | [
"MIT"
] | permissive | '''
Created on May 18, 2015
@author: mariaserena
'''
import sqlite3
import json
# 0 libero 1 occupato
def prepare_all(cur, conn):
#create users table
cur.execute("CREATE TABLE USERS (NAME text NOT NULL, SURNAME text NOT NULL, USERNAME text PRIMARY KEY NOT NULL, PASSWORD text NOT NULL, NPLATE text NOT NULL)")
#Crete parking tables
cur.execute('''CREATE TABLE PARKING1 (ID int PRIMARY KEY NOT NULL,AVAILABILITY int NOT NULL,DIRECTIONS text NOT NULL)''')
cur.execute('''CREATE TABLE PARKING2 (ID int PRIMARY KEY NOT NULL,AVAILABILITY int NOT NULL,DIRECTIONS text NOT NULL)''')
cur.execute('''CREATE TABLE PARKING3 (ID int PRIMARY KEY NOT NULL,AVAILABILITY int NOT NULL,DIRECTIONS text NOT NULL)''')
#4 users:
cur.execute("INSERT INTO USERS VALUES('barbara','munoz','barbara_m', 'hola', 'XB5827')")
cur.execute("INSERT INTO USERS VALUES('serena', 'ciaburri', 'm_serena', 'cerreto', '2DRT32')")
cur.execute("INSERT INTO USERS VALUES('lorenzo', 'chianura', 'lorenzo_c', 'metal', '22TY33')")
cur.execute("INSERT INTO USERS VALUES('cristina', 'donato', 'cristina_d', 'stracciatella', '34GT62')")
cur.execute("INSERT INTO USERS VALUES('raffaele', 'gemiti', 'raffa_gem', 'juventus', '75TZ54')")
conn.commit()
#insert 4 free spaces in paring1
cur.execute("INSERT INTO PARKING1 VALUES(1,0,'immediately on the right')")
cur.execute("INSERT INTO PARKING1 VALUES(2,0,'immediately on the left')")
cur.execute("INSERT INTO PARKING1 VALUES(3,0,'ahead on the left')")
cur.execute("INSERT INTO PARKING1 VALUES(4,0,'ahead on the right')")
conn.commit()
#insert 2 free spaces in parking2
cur.execute("INSERT INTO PARKING2 VALUES(1,0,'immediately on the right')")
cur.execute("INSERT INTO PARKING2 VALUES(2,0,'immediately on the left')")
cur.execute("INSERT INTO PARKING2 VALUES(3,1,'ahead on the left')")
cur.execute("INSERT INTO PARKING2 VALUES(4,1,'ahead on the right')")
conn.commit()
#insert 2 spaces in parking3
cur.execute("INSERT INTO PARKING3 VALUES(1,1,'on the left')")
cur.execute("INSERT INTO PARKING3 VALUES(2,0,'on the right')")
conn.commit()
conn.close()
###################################DATABASE FUNCTIONS################################################################
#insert a new user in the db
def add_new_user(cur, conn, name, surname, username, password, nplate):
cur.execute("INSERT INTO USERS VALUES(?,?,?,?,?)", (name, surname, username, password, nplate))
conn.commit()
return json.dumps('Inserted')
#show all the users
def all_users(cur, conn):
cur.execute("SELECT * FROM USERS")
rows=cur.fetchall()
return json.dumps(rows)
#check a user exists
def check_user(cur, conn, username, password):
cur.execute('SELECT * FROM USERS WHERE USERNAME=? AND PASSWORD=?',[username, password])
name=cur.fetchone()
if name is None:
return json.dumps('NotFound')
else:
return json.dumps('Found')
#show all the spaces in a parking lot
def all_spaces(cur, conn, park_id):
string_query='SELECT * FROM PARKING'+park_id
print string_query
cur.execute(string_query)
rows=cur.fetchall()
conn.commit()
return json.dumps(rows)
#find the user corresponding to a number plate
def user_from_nplate(cur, conn, nplate):
cur.execute('SELECT NAME FROM USERS WHERE NPLATE=?', [nplate])
name=cur.fetchone()
if name is None:
return json.dumps('None')
else:
return json.dumps(name)
#find the closest available spot in a parking (id) and return directions
def directions(cur, conn, park_id):
string_query='SELECT DIRECTIONS FROM PARKING'+park_id+' WHERE AVAILABILITY=0 LIMIT 1 '
cur.execute(string_query)
data=cur.fetchone()
print 'data'
print data
return json.dumps(data)
#count the free spaces in a specific parking lot (id)
def count_free_spaces(cur, conn, id):
parking_name='parking'+id
query_string='SELECT COUNT(*) FROM '+parking_name.upper()+' WHERE AVAILABILITY=0'
cur.execute(query_string)
data=cur.fetchone()
return data
#update status of a parking -given a parking and an id
def update_status(cur, conn, park_id, space_id, space_status):
parking_name='parking'+park_id
query_string='UPDATE FROM '+parking_name.upper()+' SET AVAILABILITY='+space_status+' WHERE ID='+space_id
cur.execute(query_string)
conn.commit()
#return the status of all the spaces in a parking lot -debug
def all_park_status(cur, conn, park_id):
parking_name='parking'+park_id
query_string='SELECT * FROM '+parking_name.upper()
cur.execute(query_string)
data=cur.fetchall()
return data
| true |
a95f381c30ca70a0ae54dcee1520f7778eb71433 | Python | elliterate/capybara.py | /capybara/tests/session/test_unselect.py | UTF-8 | 3,680 | 2.515625 | 3 | [
"MIT"
] | permissive | import pytest
from capybara.exceptions import ElementNotFound, UnselectNotAllowed
from capybara.tests.helpers import extract_results
class TestUnselect:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/form")
def test_raises_an_error_with_single_select(self, session):
with pytest.raises(UnselectNotAllowed):
session.unselect("English", field="form_locale")
def test_raises_an_error_with_a_locator_that_does_not_exist(self, session):
with pytest.raises(ElementNotFound):
session.unselect("foo", field="does not exist")
def test_raises_an_error_with_an_option_that_does_not_exist(self, session):
with pytest.raises(ElementNotFound):
session.unselect("Does not Exist", field="form_underwear")
def test_approximately_matches_select_box(self, session):
session.unselect("Boxerbriefs", field="Under")
session.click_button("awesome")
underwear = extract_results(session).getlist("form[underwear][]")
assert "Boxerbriefs" not in underwear
def test_approximately_matches_option(self, session):
session.unselect("Boxerbr", field="Underwear")
session.click_button("awesome")
underwear = extract_results(session).getlist("form[underwear][]")
assert "Boxerbriefs" not in underwear
def test_approximately_matches_when_field_not_given(self, session):
session.unselect("Boxerbr")
session.click_button("awesome")
underwear = extract_results(session).getlist("form[underwear][]")
assert "Boxerbriefs" not in underwear
def test_does_not_approximately_match_select_box(self, session):
with pytest.raises(ElementNotFound):
session.unselect("Boxerbriefs", field="Under", exact=True)
def test_does_not_approximately_match_option(self, session):
with pytest.raises(ElementNotFound):
session.unselect("Boxerbr", field="Underwear", exact=True)
def test_does_not_approximately_match_when_field_not_given(self, session):
with pytest.raises(ElementNotFound):
session.unselect("Boxerbr", exact=True)
class TestMultipleUnselect:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/form")
def test_unselects_an_option_from_a_select_box_by_id(self, session):
session.unselect("Commando", field="form_underwear")
session.click_button("awesome")
underwear = extract_results(session).getlist("form[underwear][]")
assert "Briefs" in underwear
assert "Boxerbriefs" in underwear
assert "Command" not in underwear
def test_unselects_an_option_without_a_select_box(self, session):
session.unselect("Commando")
session.click_button("awesome")
underwear = extract_results(session).getlist("form[underwear][]")
assert "Briefs" in underwear
assert "Boxerbriefs" in underwear
assert "Command" not in underwear
def test_unselects_an_option_from_a_select_box_by_label(self, session):
session.unselect("Commando", field="Underwear")
session.click_button("awesome")
underwear = extract_results(session).getlist("form[underwear][]")
assert "Briefs" in underwear
assert "Boxerbriefs" in underwear
assert "Command" not in underwear
def test_escapes_quotes(self, session):
session.unselect("Frenchman's Pantalons", field="Underwear")
session.click_button("awesome")
underwear = extract_results(session).getlist("form[underwear][]")
assert "Frenchman's Pantalons" not in underwear
| true |
242ddf80719932b97404b0e481ba8df70705378b | Python | QuantumMisaka/GLUE | /scglue/graph.py | UTF-8 | 2,637 | 2.921875 | 3 | [
"MIT"
] | permissive | r"""
Graph-related functions
"""
from itertools import chain
from typing import Any, Callable, Iterable, Mapping, Optional, Set
import networkx as nx
from .utils import smart_tqdm
def compose_multigraph(*graphs: nx.Graph) -> nx.MultiGraph:
r"""
Compose multi-graph from multiple graphs with no edge collision
Parameters
----------
graphs
An arbitrary number of graphs to be composed from
Returns
-------
composed
Composed multi-graph
Note
----
The resulting multi-graph would be directed if any of the input graphs
is directed.
"""
if any(nx.is_directed(graph) for graph in graphs):
graphs = [graph.to_directed() for graph in graphs]
composed = nx.MultiDiGraph()
else:
composed = nx.MultiGraph()
composed.add_edges_from(
(e[0], e[1], graph.edges[e])
for graph in graphs for e in graph.edges
)
return composed
def collapse_multigraph(
graph: nx.MultiGraph, merge_fns: Optional[Mapping[str, Callable]] = None
) -> nx.Graph:
r"""
Collapse multi-edges into simple-edges
Parameters
----------
graph
Input multi-graph
merge_fns
Attribute-specific merge functions, indexed by attribute name.
Each merge function should accept a list of values and return
a single value.
Returns
-------
collapsed
Collapsed graph
Note
----
The collapsed graph would be directed if the input graph is directed.
Edges causing ValueError in ``merge_fns`` will be discarded.
"""
if nx.is_directed(graph): # MultiDiGraph
collapsed = nx.DiGraph(graph)
else: # MultiGraph
collapsed = nx.Graph(graph)
if not merge_fns:
return collapsed
for e in smart_tqdm(list(collapsed.edges)):
attrs = graph.get_edge_data(*e).values()
for k, fn in merge_fns.items():
try:
collapsed.edges[e][k] = fn([attr[k] for attr in attrs])
except ValueError:
collapsed.remove_edge(*e)
return collapsed
def reachable_vertices(graph: nx.Graph, source: Iterable[Any]) -> Set[Any]:
r"""
Identify vertices reachable from source vertices
(including source vertices themselves)
Parameters
----------
graph
Input graph
source
Source vertices
Returns
-------
reachable_vertices
Reachable vertices
"""
source = set(source)
return set(chain.from_iterable(
nx.descendants(graph, item) for item in source
if graph.has_node(item)
)).union(source)
| true |
b468ef4016e2c7d11f4c8324412d1295cbfebd3a | Python | EkantBajaj/leetcode-Questions | /int-2-roman.py | UTF-8 | 829 | 4.0625 | 4 | [] | no_license | #Given an integer, convert it to a roman numeral.
#Input is guaranteed to be within the range from 1 to 3999.
class Solution(object):
def intToRoman(self,int):
a=[]
while(int>=1000):
int-=1000
a.append('M')
while(int>=900):
int-=900
a.append('CM')
while(int>=500):
int-=500
a.append('D')
while(int>=400):
int-=400
a.append('CD')
while(int>=100):
int-=100
a.append('C')
while(int>=90):
int-=90
a.append('XC')
while(int>=50):
int-=50
a.append('L')
while(int >=40):
int-=40
a.append('XL')
while(int >= 10):
int-=10
a.append('X')
if(int >= 9):
int-=9
a.append('IX')
elif(int>=5):
int-=5
a.append('V')
elif(int>=4):
int-=4
a.append('IV')
while(int>=1):
int-=1
a.append('I')
b=''.join(a)
return b
| true |
a5676a3fc685cdbdb954dbb2d6c6fe82645c97c0 | Python | siddharthcurious/Pythonic3-Feel | /LeetCode/202.py | UTF-8 | 583 | 3.25 | 3 | [] | no_license | class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 1:
return True
num_set = set()
while n > 1:
num = 0
for d in str(n):
num += (int(d) * int(d))
if num in num_set:
return False
else:
num_set.add(num)
n = num
if n == 1:
return True
return False
if __name__ == "__main__":
s = Solution()
n = 20
r = s.isHappy(n)
print(r) | true |
ac002e885abb8b21b395a7c57040a0961599ca06 | Python | 0Blanck0/ShooterGame | /Shooter/ShooterGame/player.py | UTF-8 | 4,091 | 3.453125 | 3 | [] | no_license | import pygame
import constant
# Player class (all function for all player)
class Player(pygame.sprite.Sprite):
def __init__(self, game):
super().__init__()
self.game = game
# Basic and max health point
self.health = 100
self.max_health = 100
# Basic attack point
self.attack = 10
# Basic player speed
self.speed = 8
# Default player score
self.score = 0
# Bullet def
self.all_bullet = pygame.sprite.Group()
# Image for player sprite
self.image = constant.player_image_sprite
self.rect = self.image.get_rect()
# Set default position on screen
self.rect.x = (constant.screen_width / 2) - self.rect.width / 2
self.rect.y = constant.screen_height - (265 - (self.rect.width / 4))
self.default_rect = self.rect
# Definition of life bar color (rgb)
self.bar_color = (111, 210, 46)
self.default_bar_color = (111, 210, 46)
self.background_bar_color = (0, 0, 0)
def update_health_bar(self, surface):
# Definition of life bar position
bar_position = [self.rect.x + 45, self.rect.y - 3, self.health, 5]
background_bar_position = [self.rect.x + 45, self.rect.y - 3, self.max_health, 5]
# Draw life bar
pygame.draw.rect(surface, self.background_bar_color, background_bar_position)
pygame.draw.rect(surface, self.bar_color, bar_position)
def damage(self, amount):
if self.health > 0:
self.health -= amount
else:
self.bar_color = (0, 0, 0)
if self.health < 1:
self.game.game_over()
def fire(self, direction):
# Create new bullet in the game
bullet = Bullet(self, direction)
if direction == "Left":
bullet.rotate()
# Add new bullet in bullet group
self.all_bullet.add(bullet)
def nothing_move(self):
# Don't move player
self.rect.x = self.rect.x
def move_right(self):
# Check mummy collision
if not self.game.check_collision(self, self.game.all_mummy):
# Move player in right
self.rect.x += self.speed
def move_left(self):
# Move player in left
self.rect.x -= self.speed
class Bullet(pygame.sprite.Sprite):
def __init__(self, player, direction):
super().__init__()
# Add player in attribute
self.player = player
# speed of bullet
if direction == "Right":
self.speed = 10
elif direction == "Left":
self.speed = -10
# Definition default angle of image
self.angle = 0
# Bullet image for sprite
self.image = constant.bullet_image_sprite
# Get and set size of bullet
self.rect = self.image.get_rect()
self.width = int(self.rect.width / 10)
self.height = int(self.rect.height / 10)
self.image = pygame.transform.scale(self.image, (self.width, self.height))
# Set bullet position
self.rect.x = player.rect.x + (player.rect.width / 3)
self.rect.y = player.rect.y + (player.rect.width / 3)
# Save image of sprite
self.origin_image = self.image
# Define knowback
self.knowback = 20
def remove(self):
# Remove actual select bullet
self.player.all_bullet.remove(self)
def rotate(self):
# Rotate object function
self.angle = constant.rotate_image_angle_bullet
self.image = pygame.transform.rotate(self.origin_image, self.angle)
def move(self):
# Move bullet
self.rect.x += self.speed
# It's outside the screen destroy it
if self.rect.x > constant.screen_width or self.rect.x < -50:
self.remove()
# If bullet enter in collision with monster remove self
for monster in self.player.game.check_collision(self, self.player.game.all_mummy):
self.remove()
monster.damage(self.player.attack, self.player, self.knowback)
| true |
06721d4aa983f280ba59ff524e72e72d716200e6 | Python | nhespe/leetcode_practice | /boomerang.py | UTF-8 | 1,085 | 3.90625 | 4 | [] | no_license | """ 1037. Valid Boomerang
A boomerang is a set of 3 points that are all distinct and not in a straight line.
Given a list of three points in the plane, return whether these points are a boomerang.
Example 1:
Input: [[1,1],[2,3],[3,2]]
Output: true
Example 2:
Input: [[1,1],[2,2],[3,3]]
Output: false
Note:
points.length == 3
points[i].length == 2
0 <= points[i][j] <= 100
https://leetcode.com/problems/valid-boomerang/
"""
from typing import List
import math
class Solution:
def isBoomerang(self, points: List[List[int]]) -> bool:
area = (
(points[0][0] * (points[1][1]-points[2][1])) +
(points[1][0] * (points[2][1]-points[0][1])) +
(points[2][0] * (points[0][1]-points[1][1]))
) / 2.0
return abs(area) > 0
test1 = [[0,1], [0,2],[1,2]] # True
test2 = [[1,1], [2,1],[2,2]] # T
test3 = [[1,1], [0,1],[1,1]] # F
test4 = [[0,0],[1,0],[2,2]] # T
test5 = [[0,1],[1,1],[2,1]] # F
x = Solution()
print(x.isBoomerang(test1))
print(x.isBoomerang(test2))
print(x.isBoomerang(test3))
print(x.isBoomerang(test4))
print(x.isBoomerang(test5)) | true |
349cd3b77f9d00f627880d395ccc5fb67725fbcf | Python | mp5maker/library | /python/tutorial/20. download_image.py | UTF-8 | 289 | 3.125 | 3 | [
"MIT"
] | permissive | import urllib.request
def dl_jpg(url, file_path, file_name):
full_path = file_path + file_name + '.jpg'
urllib.request.urlretrieve(url, full_path)
url = input('Enter img URL to download: ')
file_name = input('Enter the file name to save as: ')
dl_jpg(url, 'images/', file_name)
| true |
f0a241b153fb70d6e103f9533c5170c2ef6474ec | Python | gauravk268/Competitive_Coding | /Python Competitive Program/remove duplicate charcter in string.py | UTF-8 | 281 | 3.890625 | 4 | [] | no_license | def duplicate(string):
duplicatestring = ""
for i in string:
if i not in duplicatestring:
duplicatestring += i
return duplicatestring
string = input("Enter the string : ")
print("After removing the duplicates , the string is : ", duplicate(string))
| true |
a2813f03eb400dd8d37738620b806eb35ec5e31b | Python | daydreamer2023/vampyre | /test/test_trans/test_wavelet.py | UTF-8 | 2,045 | 3 | 3 | [
"MIT"
] | permissive | """
test_wavelet.py: Test suite for the wavelet module
"""
from __future__ import print_function, division
import unittest
import numpy as np
# Add the path to the vampyre package and import it
import env
env.add_vp_path()
import vampyre as vp
def wavelet2d_test(nrow=256,ncol=256,verbose=False,tol=1e-8):
"""
Unit test for the Wavelet2DLT class.
The test creates a random Gaussian image and verifies that inner product
and reconstruction
:param nrow: number of rows in the image
:param ncol: number of columns in the image
:param tol: tolerance above which test fails
:param Booolean verbose: Boolean flag indicating if the
results are to be displayed
"""
# Create a wavelet transform class
wv = vp.trans.Wavelet2DLT(nrow=nrow,ncol=ncol)
# Create a random input and output
z0 = np.random.normal(0,1,(nrow,ncol))
u1 = np.random.normal(0,1,(nrow,ncol))
# Validate reconstruction
z1 = wv.dot(z0)
z2 = wv.dotH(z1)
recon_err = np.sum((z2-z0)**2)
fail = (recon_err > tol)
if verbose or fail:
print("Reconstruction error {0:12.4e}".format(recon_err))
if fail:
raise vp.common.TestException("Reconstruction error exceeded tolerance")
# Inner product test
u0 = wv.dotH(u1)
ip0 = np.sum(z0*u0)
ip1 = np.sum(z1*u1)
ip_err = np.abs(ip0-ip1)
fail = (ip_err > tol)
if verbose or fail:
print("Inner product error {0:12.4e}".format(ip_err))
if fail:
raise vp.common.TestException("Inner products do not match within tolerance")
class TestCases(unittest.TestCase):
def test_wavelet2d(self):
"""
Run the conv2d test.
Note that on Windows 10 with tensorflow 1.0, there is a long warning
that can be ignored. This will be fixed in the next TF release.
"""
wavelet2d_test(verbose=False)
if __name__ == '__main__':
unittest.main()
| true |
aabd1816957fe4fd05d0a24290328bc4be94ff79 | Python | RusonWong/wimg | /scripts/monitor.py | UTF-8 | 324 | 2.765625 | 3 | [] | no_license | import time
import threading
def getLocalFile(fileName):
path = fileName
f = open(path,"r")
content = f.read()
f.close()
return content
last_proccess = ""
while True:
new_proccess = getLocalFile("proccess.txt")
if new_proccess != last_proccess:
print new_proccess
last_proccess = new_proccess
time.sleep(1)
| true |
ceefd6cc5551cfce0ba12bb3f3ab2dfae991d17f | Python | aaron0215/Projects | /Python/HW4/primes.py | UTF-8 | 970 | 4.09375 | 4 | [] | no_license | #Aaron Zhang
#CS021 Green group
#This is a program to check whether input number is prime
#Lead user to input a number is more than 1
#Use modulus symbol to defind whether a number is prime
#When finding the number is not prime, the calculation will be terminated
#If there is no number can be divided, output prime result
print('Welcome to my prime number detector.')
print('Provide an integer and I will determine if it is prime.')
print()
keep_going='y'
while keep_going=='y'or keep_going=='Y':
d=2
p = True
num=int(input('Enter an integer > 1: '))
while num<d and p:
num=int(input('Input must be > 1, try again: '))
if num==d:
print(num,'is prime')
p=False
while num>d and p:
if num%d == 0:
print(num,'is not prime')
p=False
d+=1
if p:
print(num,'is prime')
p=False
keep_going=input('Do you want to try another number? (Y/N) : ')
| true |
c169232153343e2a7009cb8f8c3b5ec5575cfb0b | Python | waltercoan/ALPCBES2016 | /calcsalprof.py | ISO-8859-1 | 806 | 3.96875 | 4 | [] | no_license | __author__ = 'Walter'
'''Construir um programa que efetue o clculo do
salrio lquido de um professor. Para fazer este
programa, voc dever possuir alguns dados, tais
como: valor da hora aula, nmero de horas trabalhadas
no ms e percentual de desconto do INSS. Em primeiro
lugar, deve-se estabelecer qual ser o seu salrio
bruto para efetuar o desconto e ter o valor do salrio
lquido.'''
print("Digite o valor da hora aula")
valhora = float(input())
print("Digite o numero de horas trabalhadas")
numhoras = float(input())
print("Digite o percentual de desconto do INSS")
percdesc = float(input())
salbruto = valhora * numhoras
print("O valor do salario bruto e: ", salbruto)
valdesc = (salbruto * percdesc) / 100
salliq = salbruto - valdesc
print("O valor do salario liquido e: ", salliq)
| true |
80627b09d8868a48015eb322e898d0fa5da5ddf1 | Python | wangzb-001/nnga_jxf | /GAs/strategy/Select.py | UTF-8 | 276 | 2.515625 | 3 | [] | no_license | import numpy as np
def Sl_pop_by_roulette(self, pop):
probility = self.fitness / self.fitness.sum()
idx = np.random.choice(np.arange(self.test_func.pop_size), size=self.test_func.pop_size, replace=True,
p=probility)
return pop[:, idx]
| true |
6895c58620bf5b95b21ea208cefafa79a1b77d6f | Python | Flashweb14/PyGameRPG | /RPG/scripts/gui/inventory/inventory.py | UTF-8 | 5,604 | 2.984375 | 3 | [
"MIT"
] | permissive | import pygame
from scripts.consts import INVENTORY_IMAGE
from scripts.game_objects.game_object import GameObject
from scripts.gui.inventory.cell import Cell
from scripts.gui.button import Button
from scripts.game_objects.armor import Armor
from scripts.game_objects.weapon import Weapon
from scripts.gui.error import Error
class Inventory(GameObject):
def __init__(self, game):
super().__init__(game, INVENTORY_IMAGE, 0, 0, game.gui_group)
self.rect.x = 1350
self.rect.y = 200
self.cells = []
for i in range(45, 281, 95):
for j in range(45, 376, 95):
self.cells.append(Cell(self.game, self, 'cell', j, i))
self.sword_slot = Cell(self.game, self, 'sword', 45, 645)
self.bow_slot = Cell(self.game, self, 'bow', 140, 645)
self.armor_slot = Cell(self.game, self, 'armor', 235, 645)
self.ring_slot = Cell(self.game, self, 'ring', 330, 645)
self.slots = [self.sword_slot, self.bow_slot, self.armor_slot, self.ring_slot]
self.drop_btn = Button(game, self.rect.x + 310, self.rect.y + 465, 'drop', game.gui_group)
self.use_btn = Button(game, self.rect.x + 310, self.rect.y + 395, 'use', game.gui_group)
self.selected_cell = None
self.game.inventory_cell_group.update()
def update(self):
self.game.inventory_cell_group.draw(self.image)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONUP:
if event.button == pygame.BUTTON_LEFT:
for cell in self.cells:
if cell.rect.collidepoint(event.pos):
if cell.item:
cell.on_click()
for i in range(len(self.cells)):
if self.cells[i].selected:
self.cells[i].selected = False
for i in range(len(self.slots)):
if self.slots[i].selected:
self.slots[i].selected = False
cell.selected = True
self.selected_cell = cell
for slot in self.slots:
if slot.rect.collidepoint(event.pos):
if slot.item:
slot.on_click()
for i in range(len(self.cells)):
if self.cells[i].selected:
self.cells[i].selected = False
for i in range(len(self.slots)):
if self.slots[i].selected:
self.slots[i].selected = False
slot.selected = True
self.selected_cell = slot
self.game.inventory_cell_group.update()
if self.drop_btn.rect.collidepoint(event.pos):
self.drop_btn.on_click()
self.drop_item()
if self.use_btn.rect.collidepoint(event.pos):
self.use_btn.on_click()
if self.selected_cell:
self.selected_cell.item.use()
for i in range(len(self.cells)):
if self.cells[i] == self.selected_cell:
self.cells[i].item = None
self.cells[i].selected = False
self.selected_cell = None
self.game.inventory_cell_group.update()
def add_item(self, obj):
has_empty = False
for cell in self.cells:
if not cell.item:
cell.item = obj
has_empty = True
break
self.game.inventory_cell_group.update()
if not has_empty:
error = Error(self.game, 'overweight')
self.game.has_error = True
return False
return True
def drop_item(self):
for i in range(len(self.cells)):
if self.cells[i] == self.selected_cell:
item = self.cells[i].item
self.cells[i].item = None
self.cells[i].selected = False
self.selected_cell = None
self.game.all_sprites.add(item)
self.game.pickable_objects.add(item)
item.x = self.game.player.x - 75
item.y = self.game.player.y
item.rect.x = self.game.player.rect.x - 75
item.rect.y = self.game.player.rect.y
for i in range(len(self.slots)):
if self.slots[i] == self.selected_cell:
item = self.slots[i].item
if isinstance(item, Armor):
self.game.player.armor -= item.armor
elif isinstance(item, Weapon):
if item.type == 'iron_sword':
self.game.player.damage -= item.damage
else:
self.game.player.bow_damage -= item.damage
else:
item.remove_effect()
self.slots[i].item = None
self.slots[i].selected = False
self.selected_cell = None
self.game.all_sprites.add(item)
self.game.pickable_objects.add(item)
item.x = self.game.player.x - 75
item.y = self.game.player.y
item.rect.x = self.game.player.rect.x - 75
item.rect.y = self.game.player.rect.y
self.game.inventory_cell_group.update()
| true |
25189dac669a3b7e7f5d3a21a02dfef3acaa4fc3 | Python | ItsSwixel/Week5PasswordAnalyser | /app/policy_checker.py | UTF-8 | 1,564 | 3.984375 | 4 | [] | no_license | """
Policy guidelines:
- Password must be over 8 characters long
- Password must contain at least 1 uppercase letter
- Password must contain at least 1 number
- Password must contain at least 1 special character
- Password must not have 3 consecutive duplicate values
"""
import string
"""
Takes a string as a parameter
Returns a list containing all of the issues found, each as an entry
"""
def policy_check(password):
last_char = None
uppercase_counter = 0
number_count = 0
special_count = 0
dupe_count = 0
issues = []
if len(password) < 8:
issues.append("Password is not long enough")
for letter in password:
if letter in string.ascii_uppercase:
uppercase_counter += 1
if letter in string.digits:
number_count += 1
if letter in string.punctuation:
special_count += 1
if last_char is not None:
if letter == last_char:
dupe_count += 1
if dupe_count >= 2 and "Password contains 3 or more consecutive duplicate values" not in issues:
issues.append("Password contains 3 or more consecutive duplicate values")
else:
dupe_count = 0
last_char = letter
if uppercase_counter == 0:
issues.append("Password does not contain an uppercase letter")
if number_count == 0:
issues.append("Password does not contain a number")
if special_count == 0:
issues.append("Password does not contain a special character")
return issues
| true |
8dd77afd99a7b78defd414693fbe0d28a9c5ec8d | Python | ddempsey/ENGSCI263_2019 | /wairakei/wk263.py | UTF-8 | 21,521 | 2.53125 | 3 | [
"MIT"
] | permissive | ##########################################################################
##########################################################################
##
## What are you doing looking at this file?
##
##########################################################################
##########################################################################
#
# Just kidding. There is some useful stuff in here that will help you complete
# some of the labs and your project. Feel free to adapt it.
#
# (Sorry about the awful commenting though. Do as I say, not as I do, etc...)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from ipywidgets import interact, fixed, interactive_output, HBox, Button, VBox, Output, IntSlider, Checkbox, FloatSlider, FloatLogSlider, Dropdown
TEXTSIZE = 16
from IPython.display import clear_output
import time
from scipy.optimize import curve_fit
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm as colmap
from copy import copy
from scipy.stats import multivariate_normal
# commenting in here is pretty shocking tbh
# wairakei model
def wairakei_data():
# load some data
tq, q = np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True)
tp, p = np.genfromtxt('wk_pressure_history.csv', delimiter=',', unpack=True)
# plot some data
f,ax1 = plt.subplots(1,1,figsize=(12,6))
ax1.plot(tq,q,'b-',label='production')
ax1.plot([],[],'ro',label='pressure')
ax1.set_xlabel('time [yr]',size=TEXTSIZE)
ax1.set_ylabel('production rate [kg/s]',size=TEXTSIZE)
ax2 = ax1.twinx()
ax2.plot(tp,p,'ro')
v = 2.
for tpi,pi in zip(tp,p):
ax2.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
ax2.set_ylabel('pressure [bar]',size=TEXTSIZE);
for ax in [ax1,ax2]:
ax.tick_params(axis='both',labelsize=TEXTSIZE)
ax.set_xlim([None,1980])
ax1.legend(prop={'size':TEXTSIZE})
plt.show()
def lpm_plot(i=1):
f,ax = plt.subplots(1,1, figsize=(12,6))
ax.axis('off')
ax.set_xlim([0,1])
ax.set_ylim([0,1])
r = 0.3
cx,cy = [0.5,0.35]
h = 0.3
dh = -0.13
dh2 = 0.05
e = 4.
th = np.linspace(0,np.pi,101)
col = 'r'
ax.fill_between([0,1],[0,0],[1,1],color='b',alpha=0.1, zorder = 0)
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e, color = col, ls = '-')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e, color = col, ls = '-')
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e+h, color = col, ls = '--')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e+h, color = col, ls = '--')
ax.plot([cx+r,cx+r],[cy,cy+h],color=col,ls='--')
ax.plot([cx-r,cx-r],[cy,cy+h],color=col,ls='--')
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e+h+(i>0)*dh+(i>1)*dh2, color = col, ls = '-')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e+h+(i>0)*dh+(i>1)*dh2, color = col, ls = '-')
ax.plot([cx+r,cx+r],[cy,cy+h+(i>0)*dh+(i>1)*dh2],color=col,ls='-')
ax.plot([cx-r,cx-r],[cy,cy+h+(i>0)*dh+(i>1)*dh2],color=col,ls='-')
ax.fill_between(cx + r*np.cos(th),cy - r*np.sin(th)/e,cy + r*np.sin(th)/e+h+(i>0)*dh+(i>1)*dh2, color='r', alpha = 0.1)
if i > 0:
cube(ax, 0.90, 0.8, 0.025, 'r')
ax.arrow(cx+1.05*r,cy+1.2*(h+dh)+0.05, 0.05, 0.14, color = 'r', head_width=0.02, head_length=0.04, length_includes_head=True)
if i > 1:
cube(ax, 0.85, 0.5, 0.015, 'b')
cube(ax, 0.15, 0.5, 0.015, 'b')
cube(ax, 0.85, 0.35, 0.015, 'b')
cube(ax, 0.15, 0.35, 0.015, 'b')
cube(ax, 0.25, 0.23, 0.015, 'b')
cube(ax, 0.50, 0.18, 0.015, 'b')
cube(ax, 0.75, 0.23, 0.015, 'b')
ax.arrow(0.17,0.5,0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.83,0.5,-0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.17,0.35,0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.83,0.35,-0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.50,0.21,0.,0.04, color = 'b', head_width=0.01, head_length=0.02, length_includes_head=True)
ax.arrow(0.26,0.25,0.015,0.025, color = 'b', head_width=0.015, head_length=0.01, length_includes_head=True)
ax.arrow(0.74,0.25,-0.015,0.025, color = 'b', head_width=0.015, head_length=0.01, length_includes_head=True)
if i > 2:
for fr in [0.35,0.70,0.90]:
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e+h+fr*(dh+dh2), color = 'k', ls = '--')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e+h+fr*(dh+dh2), color = 'k', ls = '--')
ax.fill_between(cx + r*np.cos(th), cy - r*np.sin(th)/e+h+fr*(dh+dh2), cy + r*np.sin(th)/e+h+fr*(dh+dh2), color = 'k', alpha = 0.1)
ax.arrow(0.18, cy+h, 0, dh+dh2, color = 'k', head_width=0.01, head_length=0.02, length_includes_head=True)
ax.text(0.17, cy+h+0.5*(dh+dh2), 'lowers\nover time', color='k', ha = 'right', va='center', size=TEXTSIZE-1, fontstyle = 'italic')
xt1,xt2,xt3,xt4 = [0.2,0.06,0.07,0.07]
yt = 0.85
yt2 = 0.05
ax.text(xt1,yt,r'$\dot{P}$ =', color = 'k', size = TEXTSIZE+4)
if i == 0:
ax.text(xt1+xt2,yt,r'$0$', color = 'k', size = TEXTSIZE+4)
if i > 0:
ax.text(xt1+xt2,yt,r'$-aq$', color = 'r', size = TEXTSIZE+4)
if i > 1:
ax.text(xt1+xt2+xt3,yt,r'$-bP$', color = 'b', size = TEXTSIZE+4)
if i > 2:
ax.text(xt1+xt2+xt3+xt4,yt,r'$-c\dot{q}$', color = 'k', size = TEXTSIZE+4)
if i == 0:
ax.text(0.5, yt2, 'reservoir initially at pressure equilibrium', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
elif i == 1:
ax.text(0.5, yt2, 'extraction from reservoir at rate, $q$', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
elif i == 2:
ax.text(0.5, yt2, 'recharge from surrounding rock, proportional to $P$', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
elif i == 3:
ax.text(0.5, yt2, 'response to extraction not instantaneous: "slow drainage", $\dot{q}$', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
plt.show()
def cube(ax,x0,y0,dx,col):
dy = dx*2.
s2 = 2
ax.plot([x0+dx/s2,x0, x0-dx,x0-dx,x0,x0],[y0+dy/s2,y0,y0,y0-dy,y0-dy,y0],color=col,ls='-')
ax.plot([x0-dx,x0-dx+dx/s2,x0+dx/s2,x0+dx/s2,x0],[y0,y0+dy/s2,y0+dy/s2,y0+dy/s2-dy,y0-dy],color=col,ls='-')
ax.fill_between([x0-dx,x0-dx+dx/s2,x0,x0+dx/s2],[y0-dy,y0-dy,y0-dy,y0-dy+dy/s2],[y0,y0+dy/s2,y0+dy/s2,y0+dy/s2],color=col,alpha=0.1)
def lpm_demo():
sldr = IntSlider(value=0, description='slide me!', min = 0, max = 3, step = 1, continuous_update = False, readout=False)
return VBox([sldr, interactive_output(lpm_plot, {'i':sldr})])
def plot_lpm_models(a,b,c):
# load some data
tq,q = np.genfromtxt('wk_production_history.csv', delimiter = ',')[:28,:].T
tp,p = np.genfromtxt('wk_pressure_history.csv', delimiter = ',')[:28,:].T
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
# plot the data with error bars
f,ax = plt.subplots(1,1,figsize=(12,6))
ax.set_xlabel('time [yr]',size=TEXTSIZE)
ax.plot(tp,p,'ro', label = 'observations')
v = 2.
for tpi,pi in zip(tp,p):
ax.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an improved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# solve and plot model
pm = solve_lpm(tp,a,b,c)
ax.plot(tp, pm, 'k-', label='model')
# axes upkeep
ax.set_ylabel('pressure [bar]',size=TEXTSIZE);
ax.tick_params(axis='both',labelsize=TEXTSIZE)
ax.legend(prop={'size':TEXTSIZE})
plt.show()
def lpm_model():
# load flow rate data and compute derivative
tq,q = np.genfromtxt('wk_production_history.csv', delimiter = ',')[:28,:].T
tp,p = np.genfromtxt('wk_pressure_history.csv', delimiter = ',')[:28,:].T
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an imporved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# use CURVE_FIT to find "best" model
from scipy.optimize import curve_fit
pars = curve_fit(solve_lpm, tp, p, [1,1,1])[0]
# plot the best solution
pm = solve_lpm(tp,*pars)
f,ax = plt.subplots(1,1,figsize=(12,6))
ax.plot(tp, p, 'ro', label = 'observations')
ax.plot(tp, pm, 'k-', label='model')
ax.set_ylabel("pressure [bar]",size=14); ax.set_xlabel("time",size=14)
ax.legend(prop={'size':14})
ax.set_ylim([25,60])
ax.set_title('a={:2.1e}, b={:2.1e}, c={:2.1e}'.format(*pars),size=14);
def lpm_models():
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
a = FloatLogSlider(value=a0, base=10, description=r'$a$', min = np.log10(a0)-dlog, max = np.log10(a0)+dlog, step = dlog/10, continuous_update = False)
b = FloatLogSlider(value=b0, base=10, description=r'$b$', min = np.log10(b0)-dlog, max = np.log10(b0)+dlog, step = dlog/10, continuous_update = False)
dlog*=5
c = FloatLogSlider(value=c0, base=10, description=r'$c$', min = np.log10(c0)-dlog, max = np.log10(c0)+dlog, step = dlog/10, continuous_update = False)
io = interactive_output(plot_lpm_models, {'a':a,'b':b,'c':c})
return VBox([HBox([a,b,c]),io])
def plot_lpm_posterior(sa,sb,sc,Nmods):
# load some data
tq, q = np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True)
tp, p = np.genfromtxt('wk_pressure_history.csv', delimiter=',', unpack=True)
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
Nmods = int(Nmods)
a = np.random.randn(Nmods)*sa+a0
b = np.random.randn(Nmods)*sb+b0
c = np.random.randn(Nmods)*sc+c0
# plot the data with error bars
f = plt.figure(figsize=(12,6))
ax = plt.axes([0.15,0.15,0.5,0.7])
ax1 = plt.axes([0.70,0.69,0.2,0.15])
ax2 = plt.axes([0.70,0.42,0.2,0.15])
ax3 = plt.axes([0.70,0.15,0.2,0.15])
for m0,sm,axi,mv in zip([a0,b0,c0],[sa,sb,sc],[ax1,ax2,ax3],[a,b,c]):
axi.set_yticks([])
if sm < 1.e-6:
axi.plot([m0-3*dlog*m0, m0,m0,m0,m0+3*dlog*m0],[0,0,1,0,0],'r-',zorder=2)
else:
x = np.linspace(m0-3*dlog*m0, m0+3*dlog*m0, 101)
y = np.exp(-(x-m0)**2/(2*sm**2))/np.sqrt(2*np.pi*sm**2)
axi.plot(x,y,'r-',zorder=2)
bins = np.linspace(m0-3*dlog*m0, m0+3*dlog*m0, int(4*np.sqrt(Nmods))+1)
h,e = np.histogram(mv, bins)
h = h/(np.sum(h)*(e[1]-e[0]))
axi.bar(e[:-1],h,e[1]-e[0], color = [0.5,0.5,0.5])
if axi is ax2: dlog*=5
ax1.set_xlabel('$a$',size=TEXTSIZE)
ax2.set_xlabel('$b$',size=TEXTSIZE)
ax3.set_xlabel('$c$',size=TEXTSIZE)
ax.set_xlabel('time [yr]',size=TEXTSIZE)
ax.plot(tp,p,'ro', label = 'observations')
v = 2.
for tpi,pi in zip(tp,p):
ax.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an improved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# solve and plot model
alpha = np.min([0.5,10./Nmods])
lw = 0.5
for ai,bi,ci in zip(a,b,c):
pm = solve_lpm(tp,ai,bi,ci)
ax.plot(tp, pm, 'k-', alpha = alpha, lw = lw)
ax.plot([],[],'k-',alpha=alpha,lw=lw,label='possible models')
# axes upkeep
pm = solve_lpm(tp,a0,b0,c0)
ax.plot(tp, pm, 'k-', lw = 2, label = 'best model')
ax.set_ylabel('pressure [bar]',size=TEXTSIZE);
ax.tick_params(axis='both',labelsize=TEXTSIZE)
ax.legend(prop={'size':TEXTSIZE})
ax.set_xlim([None,1980])
ax.set_title(r'$\sigma_a='+'{:2.1e}'.format(sa)+r'$, $\sigma_b='+'{:2.1e}'.format(sb)+r'$, $\sigma_c='+'{:2.1e}'.format(sc)+'$',size=TEXTSIZE);
plt.show()
def lpm_posterior():
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
sa = FloatSlider(value=dlog*a0/2, description=r'$\sigma_a$', min = 0., max = dlog*a0, step = dlog*a0/10., continuous_update = False)
sb = FloatSlider(value=dlog*b0/2, description=r'$\sigma_b$', min = 0., max = dlog*b0, step = dlog*b0/10., continuous_update = False)
dlog*=5
sc = FloatSlider(value=dlog*c0/2, description=r'$\sigma_c$', min = 0., max = dlog*c0, step = dlog*c0/10., continuous_update = False)
Nmods = FloatLogSlider(value = 4, base=2, description='samples', min = 0, max = 8, step = 1, continuous_update=False)
io = interactive_output(plot_lpm_posterior, {'sa':sa,'sb':sb,'sc':sc,'Nmods':Nmods})
return VBox([HBox([sa,sb,sc,Nmods]),io])
def plot_lpm_prediction(Nmods, reveal, sa, sb, sc):
# load some data
tq, q = np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True)
tp, p = np.genfromtxt('wk_pressure_history.csv', delimiter=',', unpack=True)
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
if not reveal:
iq = np.argmin(abs(tq-1981))
ip = np.argmin(abs(tp-1981))
else:
iq = len(tq)
ip = len(tp)
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
Nmods = int(Nmods)
np.random.seed(13)
a = np.random.randn(Nmods)*sa+a0
b = np.random.randn(Nmods)*sb+b0
c = np.random.randn(Nmods)*sc+c0
# plot the data with error bars
f = plt.figure(figsize=(15,5))
ax = plt.axes([0.15,0.15,0.5,0.7])
ax2 = plt.axes([0.75,0.15,0.20,0.7])
ax.set_xlabel('time [yr]',size=TEXTSIZE)
ax.plot(tp[:ip],p[:ip],'ro', label = 'observations')
v = 2.
for tpi,pi in zip(tp[:ip],p[:ip]):
ax.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an improved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# solve and plot model
alpha = np.min([0.5,10./Nmods])
lw = 0.5
pmf = []
for ai,bi,ci in zip(a,b,c):
pm = solve_lpm(tp,ai,bi,ci)
ax.plot(tp, pm, 'k-', alpha = alpha, lw = lw)
pmf.append(pm[-1])
ax.plot([],[],'k-',alpha=0.5,lw=lw,label='possible models')
pm = solve_lpm(tp,a0,b0,c0)
ax.plot(tp, pm, 'k-', lw = 2, label = 'best model')
ax.axvline(tp[-1], color = 'k', linestyle = ':', label='predict future')
bins = np.linspace(np.min(pmf)*0.999, np.max(pmf)*1.001, int(np.sqrt(Nmods))+1)
h,e = np.histogram(pmf, bins)
h = h/(np.sum(h)*(e[1]-e[0]))
ax2.bar(e[:-1],h,e[1]-e[0], color = [0.5,0.5,0.5])
ax2.set_xlim([30,45])
ax2.set_ylim([0,1])
if Nmods>10:
ax2.axvline(pm[-1], label='best model',color = 'k', linestyle = '-')
if reveal:
ax2.axvline(p[-1], label='true process',color = 'r', linestyle = '-')
ax2.fill_between([p[-1]-v, p[-1]+v], [0,0], [1,1], color='r', alpha=0.5)
yf5,yf95 = np.percentile(pmf, [5,95])
ax2.axvline(yf5, label='90% interval',color = 'k', linestyle = '--')
ax2.axvline(yf95, color = 'k', linestyle = '--')
# axes upkeep
ax.set_ylabel('pressure [bar]',size=TEXTSIZE);
ax2.set_xlabel('pressure [bar]',size=TEXTSIZE);
ax2.set_ylabel('probability',size=TEXTSIZE)
for axi in [ax,ax2]: axi.tick_params(axis='both',labelsize=TEXTSIZE)
ax.legend(prop={'size':TEXTSIZE})
plt.show()
def lpm_prediction(sa,sb,sc):
Nmods = FloatLogSlider(value = 64, base=4, description='samples', min = 0, max = 5, step = 1, continuous_update=False)
reveal = Checkbox(value = False, description='reveal future!')
io = interactive_output(plot_lpm_prediction, {'Nmods':Nmods, 'reveal':reveal, 'sa':fixed(sa), 'sb':fixed(sb), 'sc':fixed(sc)})
return VBox([HBox([Nmods, reveal]),io])
def plot_lpm_structural(c,reveal):
# load some data
tq, q = np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True)
tp, p = np.genfromtxt('wk_pressure_history.csv', delimiter=',', unpack=True)
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
if not reveal:
iq = np.argmin(abs(tq-1981))
ip = np.argmin(abs(tp-1981))
else:
iq = len(tq)
ip = len(tp)
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an improved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
solve_lpm_c0 = lambda t,a,b: solve_lpm(t,a,b,c)
a,b = curve_fit(solve_lpm_c0, tp[:28], p[:28], [1,1])[0]
#a0,b0 = [4.72e-3,2.64e-1]
#dlog = 0.1
#Nmods = 64
#np.random.seed(13)
#a = np.random.randn(Nmods)*sa+a0
#b = np.random.randn(Nmods)*sb+b0
# plot the data with error bars
f = plt.figure(figsize=(15,5))
ax = plt.axes([0.1,0.15,0.8,0.7])
ax.set_xlabel('time [yr]',size=TEXTSIZE)
ax.plot(tp[:ip],p[:ip],'ro', label = 'observations')
v = 2.
for tpi,pi in zip(tp[:ip],p[:ip]):
ax.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
# solve and plot model
#alpha = np.min([0.5,10./Nmods])
#lw = 0.5
#for ai,bi in zip(a,b):
# pm = solve_lpm(tp[:ip],ai,bi,c)
# ax.plot(tp[:ip], pm, 'k-', alpha = alpha, lw = lw)
#ax.plot([],[],'k-',alpha=alpha,lw=lw,label='possible models')
# axes upkeep
pm = solve_lpm(tp[:ip],a,b,c)
ax.plot(tp[:ip], pm, 'k-', lw = 2, label = 'best model')
ax.set_ylabel('pressure [bar]',size=TEXTSIZE);
ax.tick_params(axis='both',labelsize=TEXTSIZE)
ax.legend(prop={'size':TEXTSIZE})
ax.set_xlim([1952,2012])
ax.set_ylim([25,60])
ax.set_title(r'$a='+'{:2.1e}'.format(a)+r'$, $b='+'{:2.1e}'.format(b)+r'$, $c='+'{:2.1e}'.format(c)+'$',size=TEXTSIZE);
plt.show()
def lpm_structural():
c = FloatSlider(value=0, description=r'$c$', min = 0., max = 1.2e-2, step = 1.e-3, continuous_update = False)
#dlog*=5
reveal = Checkbox(value = False, description='reveal future!')
io = interactive_output(plot_lpm_structural, {'c':c,'reveal':reveal})
return VBox([HBox([c,reveal]),io])
| true |
b0243d3f1110f9d8cd5b3268f8b72cf6d7cfb84e | Python | tjcuddihy/AdventOfCode | /2020/05/five.py | UTF-8 | 1,506 | 3.25 | 3 | [] | no_license | from math import ceil, floor
with open("five.txt", "r") as f:
passes = [row.strip() for row in f.read().splitlines()]
def parse(code, dir_setter, start=0, end=127):
if len(code) == 1:
if code == dir_setter:
return end
else:
return start
mid_point = (start + end) / 2
direction = code[0]
code = code[1:]
if direction == dir_setter:
return parse(code, dir_setter, start=ceil(mid_point), end=end)
else:
return parse(code, dir_setter, start=start, end=floor(mid_point))
def calculate_part1(board):
row = parse(board[:7], "B")
seat = parse(board[7:], "R", 0, 7)
return row * 8 + seat
def calculate_part2(board):
row = parse(board[:7], "B")
seat = parse(board[7:], "R", 0, 7)
code = row * 8 + seat
return (row, seat, code)
part_1 = [calculate_part1(x) for x in passes]
print(f"Part 1: {max(part_1)}")
part_2 = [calculate_part2(x) for x in passes]
d = dict()
for board in part_2:
position = str(board[1]) + str(board[0])
d[position] = board[2]
taken_seats = set(d.keys())
for row in range(128):
for seat in range(8):
position = str(seat) + str(row)
if position in taken_seats:
continue
else:
plus = str(seat) + str(row + 1)
minus = str(seat) + str(row - 1)
if plus not in taken_seats or minus not in taken_seats:
continue
else:
print(f"Part 2: {row*8+seat}")
| true |
1af79fd8ceddd05610ab685dbe81df7a5a2b9d71 | Python | ofhasirci/swe-573 | /backend/app/wikiData.py | UTF-8 | 2,183 | 2.78125 | 3 | [] | no_license | import requests
class WikiData:
def __init__(self, id):
wiki = requests.get('https://www.wikidata.org/w/api.php?action=wbgetentities&ids=' + id + '&languages=en&format=json')
self.wikiData = wiki.json().get('entities').get(id)
def getDescription(self):
if self.wikiData.get('descriptions'):
return self.wikiData.get('descriptions').get('en').get('value')
else:
return ""
def getLabel(self):
if self.wikiData.get('labels'):
return self.wikiData.get('labels').get('en').get('value')
else:
return ""
def getSentence(self):
sentence = " "
if self.wikiData.get('claims'):
if self.wikiData.get('claims').get('P31'):
for item in self.wikiData.get('claims').get('P31'):
id = item.get('mainsnak').get('datavalue').get('value').get('id')
data = WikiData(id)
sentence = sentence + " " + data.getDescription() + " " + data.getLabel()
if self.wikiData.get('claims').get('P279'):
for item in self.wikiData.get('claims').get('P279'):
id = item.get('mainsnak').get('datavalue').get('value').get('id')
data = WikiData(id)
sentence = sentence + " " + data.getDescription() + " " + data.getLabel()
if self.wikiData.get('claims').get('P2579'):
for item in self.wikiData.get('claims').get('P2579'):
id = item.get('mainsnak').get('datavalue').get('value').get('id')
data = WikiData(id)
sentence = sentence + " " + data.getDescription() + " " + data.getLabel()
if self.wikiData.get('claims').get('P361'):
for item in self.wikiData.get('claims').get('P361'):
id = item.get('mainsnak').get('datavalue').get('value').get('id')
data = WikiData(id)
sentence = sentence + " " + data.getDescription() + " " + data.getLabel()
return sentence | true |
d8f0430251f356571be8846133aad5eff20e8d75 | Python | PegasusWang/collection_python | /z42/z42/lib/jsob.py | UTF-8 | 1,379 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python
#coding:utf-8
from yajl import dumps
class JsOb(object):
def __init__(self, *args, **kwds):
for i in args:
self.__dict__.update(args)
self.__dict__.update(kwds)
def __getattr__(self, name):
return self.__dict__.get(name, '')
def __setattr__(self, name, value):
self.__dict__[name] = value
def __delattr__(self, name):
if name in self.__dict__:
del self.__dict__['name']
def __repr__(self):
return self.__dict__.__repr__()
__getitem__ = __getattr__
__delitem__ = __delattr__
__setitem__ = __setattr__
def __len__(self):
return self.__dict__.__len__()
def __iter__(self):
return self.__dict__.iteritems()
def __contains__(self, name):
return self.__dict__.__contains__(name)
def __str__(self):
return dumps(self.__dict__)
class StripJsOb(JsOb):
def __init__(self, *args, **kwds):
super(StripJsOb,self).__init__(*args, **kwds)
d = self.__dict__
for k,v in d.items():
if isinstance(v, basestring):
if "\n" not in v:
_v = v.strip()
if _v != v:
d[k] = _v
if __name__ == '__main__':
o = JsOb(a='张沈鹏')
print o
for k, v in o:
print k, v
print str(o)
| true |
18ba404cf29708596816d86e122b475eea535bed | Python | spritezl/Learning | /PythonLearn/src/Perf/log_analyse.py | UTF-8 | 3,084 | 2.75 | 3 | [] | no_license | '''
Created on Sep 14, 2016
This is a little utility to analyze LogWriterLog for user activities by minute.
@author: fzhang
'''
from datetime import datetime
def calcActivity(logentities):
import sqlite3
# from datetime import datetime
# logentities = [(datetime.strptime('Tue Sep 13 12:00:10 2016',
# "%a %b %d %H:%M:%S %Y"),
# 'user ACQ'),
# (datetime.strptime('Tue Sep 13 12:00:15 2016',
# "%a %b %d %H:%M:%S %Y"),
# 'user ANanchang'),
# (datetime.strptime('Tue Sep 13 12:00:15 2016',
# "%a %b %d %H:%M:%S %Y"),
# 'user ANanchang')]
conn = sqlite3.connect(':memory:')
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS LogEntities(
actiondate DATETIME,
username TEXT)''')
cur.executemany('insert into LogEntities values(?,?)', logentities)
conn.commit()
userActivities = []
for (actionmin, user_count) in cur.execute("""
select strftime('%Y-%m-%d %H:%M',actiondate),count(distinct username)
from LogEntities
group by strftime('%Y-%m-%d %H:%M',actiondate)
order by 1"""):
# print(actionmin+'|'+str(user_count))
userActivities.append((datetime.strptime(actionmin+':00',
'%Y-%m-%d %H:%M:%S'),
user_count))
cur.close()
conn.close()
return userActivities
def activityChart(activitydata):
minute, usercount = list(zip(*activitydata))
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.plot(minute, usercount, 'g-',
label='User Activity by minute')
fig.autofmt_xdate()
fig.suptitle('User Activity by minute')
ax.set_xlabel('minute')
ax.set_ylabel('concurrent user#')
_, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.ylim(0, 30)
plt.show()
def extractLog(logWriterFile, starttime, endtime):
import re
# from datetime import datetime
logentities = []
pattern = re.compile(r'\[(.*)\]+\s(user\s\w+),+\s.*$')
with open(logWriterFile, "r", encoding='latin1') as f:
for line in f:
match = re.match(pattern, line)
if match:
actiondate = datetime.strptime(
match.group(1).replace('CST ', ''),
"%a %b %d %H:%M:%S %Y")
username = match.group(2)
if (actiondate >= starttime and
actiondate <= endtime):
logentities.append((actiondate, username))
return logentities
# test with actual time range
activityChart(calcActivity(extractLog(
r'D:\Project\Allocation\Load\20160918\LogWriterLog204.txt',
datetime.strptime('2016-09-13 00:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2016-09-14 00:00:00', '%Y-%m-%d %H:%M:%S'))))
| true |
7e4ce5d957b362c211b8a2af02d1bd5a5e64b7cb | Python | weddy3/TPP | /07_gashlycrumb/phonebook.py | UTF-8 | 307 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
from pprint import pprint as pp
def main():
my_phonebook = {}
with open('phonebook.txt') as fh:
for line in fh:
my_phonebook.update({line.split(', ')[0]: line.split(', ')[1]})
pp(my_phonebook)
if __name__ == '__main__':
main() | true |
be9cee05dc3b30e86b191d7e320ebb58c20ccb70 | Python | JuanMPerezM/AlgoritmosyProgramacion_Talleres | /taller de estructuras de control de repeticion/ejercicio2.py | UTF-8 | 106 | 3.328125 | 3 | [
"MIT"
] | permissive | """
Entradas
Salidas
Impares-->int-->a
"""
a=0
while(a<100):
if(a%2!=0 and a%7!=0):
print(a)
a=a+1 | true |
8bcf01dcfb0184bf27b38e05c3122d7aa06b0e93 | Python | tjdrb63/Python | /BaekJoon/Problem1302.py | UTF-8 | 301 | 3.390625 | 3 | [] | no_license | Str = {}
outPut = []
for i in range(int(input())):
cnt = 0
Key=input()
if(Key in Str):
cnt=Str[Key]
cnt+=1
Str[Key] = cnt
maxValue = max(Str.values())
for s in Str.keys():
if(Str[s] == maxValue):
outPut.append(s)
outPut.sort()
print(outPut[0]) | true |
ac6c63e145729052806522cb49ace58c5ca0b479 | Python | kizombaciao/Python_Snippets | /__call__.py | UTF-8 | 281 | 3.34375 | 3 | [] | no_license | class Pay:
def __init__(self, hourly_wage):
self.hourly_wage = hourly_wage
def __call__(self, hours_worked):
#print(hours_worked)
return hours_worked
pay_test = Pay(15)
print(pay_test(8))
# the instantiated object becomes a function effectively ! | true |
ca94a87bdf1123cb21096347328fcc774d55c6ed | Python | jwang151/CS115 | /lab4.py | ISO-8859-1 | 1,049 | 3.75 | 4 | [] | no_license | '''
Created on Sep 28, 2017
@author: jwang151
Pledge: I pledge my honor that I have abided by the Stevens Honor System.
'''
def knapsack(capacity, itemList):
if capacity == 0:
return [0,[]]
if itemList== []:
return [0,[]]
if itemList[0][0] > capacity:
return knapsack(capacity, itemList[1:])
use_it = knapsack(capacity- itemList[0][0], itemList[1:])
lose_it = knapsack(capacity, itemList[1:])
use2 = [itemList[0][1] + use_it[0],[itemList[0]]+ use_it[1]]
lose2 = [lose_it[0],lose_it[1]]
if use2[0] > lose2[0]:
return use2
return lose2
print(knapsack(76, [[36, 35], [10, 28], [39, 47], [8, 1], [7, 24]]))
'''The pascal_rowfunctiontakeasingleintegerasinput,whichrepresentstherownumber, and
it returns that row of the pascal triangle.'''
'''The pacal_triangle function takesinasingleintegernandreturnsalistoflistscontainingthevaluesofthealltherowsuptoandincludingrown.''' | true |
45ba085d53daa8d9d5f01553dce026b2a65cb2e7 | Python | adamswater/xfuse | /xfuse/utility/core.py | UTF-8 | 3,453 | 2.953125 | 3 | [] | no_license | import itertools as it
from typing import (
Any,
ContextManager,
Iterable,
List,
Protocol,
Tuple,
TypeVar,
Sequence,
Union,
)
import warnings
import numpy as np
from PIL import Image
__all__ = [
"center_crop",
"chunks_of",
"rescale",
"resize",
"temp_attr",
]
ArrayType = TypeVar("ArrayType", bound="ArrayLike")
class ArrayLike(Protocol):
r"""
A protocol for sliceable objects (e.g., numpy arrays or pytorch tensors)
"""
@property
def shape(self) -> Tuple[int, ...]:
# pylint: disable=missing-docstring
...
def __getitem__(
self: ArrayType, idx: Union[slice, Tuple[slice, ...]]
) -> ArrayType:
...
def __setitem__(
self: ArrayType, idx: Union[slice, Tuple[slice, ...]], value: Any
) -> None:
...
def center_crop(x: ArrayType, target_shape: Tuple[int, ...]) -> ArrayType:
r"""Crops `x` to the given `target_shape` from the center"""
return x[
tuple(
slice(round((a - b) / 2), round((a - b) / 2) + b)
if b is not None
else slice(None)
for a, b in zip(x.shape, target_shape)
)
]
def rescale(
image: np.ndarray, scaling_factor: float, resample: int = Image.NEAREST
) -> np.ndarray:
r"""
Rescales image by a given `scaling_factor`
:param image: Image array
:param scaling_factor: Scaling factor
:param resample: Resampling filter
:returns: The rescaled image
"""
image_pil = Image.fromarray(image)
image_pil = image_pil.resize(
[round(x * scaling_factor) for x in image_pil.size], resample=resample,
)
return np.array(image_pil)
def resize(
image: np.ndarray,
target_shape: Sequence[int],
resample: int = Image.NEAREST,
) -> np.ndarray:
r"""
Resizes image to a given `target_shape`
:param image: Image array
:param target_shape: Target shape
:param resample: Resampling filter
:returns: The rescaled image
"""
image_pil = Image.fromarray(image)
image_pil = image_pil.resize(target_shape[::-1], resample=resample)
return np.array(image_pil)
def temp_attr(obj: object, attr: str, value: Any) -> ContextManager:
r"""
Creates a context manager for setting transient object attributes.
>>> from types import SimpleNamespace
>>> obj = SimpleNamespace(x=1)
>>> with temp_attr(obj, 'x', 2):
... print(obj.x)
2
>>> print(obj.x)
1
"""
class _TempAttr:
def __init__(self):
self.__original_value = None
def __enter__(self):
self.__original_value = getattr(obj, attr)
setattr(obj, attr, value)
def __exit__(self, *_):
if getattr(obj, attr) == value:
setattr(obj, attr, self.__original_value)
else:
warnings.warn(
f'Attribute "{attr}" changed while in context.'
" The new value will be kept.",
)
return _TempAttr()
T = TypeVar("T")
def chunks_of(xs: Iterable[T], size: int) -> Iterable[List[T]]:
r"""
Yields size `size` chunks of `xs`.
>>> list(chunks_of([1, 2, 3, 4], 2))
[[1, 2], [3, 4]]
"""
class _StopMarker:
pass
for chunk in it.zip_longest(*[iter(xs)] * size, fillvalue=_StopMarker):
yield list(filter(lambda x: x is not _StopMarker, chunk))
| true |
d545de99aff9641674588058049f1d37812182fd | Python | paddydoyle/exercism-python | /largest-series-product/largest_series_product.py | UTF-8 | 1,177 | 3.796875 | 4 | [] | no_license | from functools import reduce
def largest_product(series, size):
# Corner case: not entirely sure why this is a valid answer but ok
if size == 0:
return 1
# Test for failure inputs
if not series:
raise ValueError("The input string cannot be empty")
if len(series) < size:
raise ValueError("The input string '{}' ".format(series) +
"is shorter than the slice size {}".format(size))
if size < 0:
raise ValueError("The slice size must be non-negative")
# There will be at least 1 substring, even if the length of the
# string equals the slice length (i.e. the string itself)
num_substrings = len(series) - size + 1
# First generate the list of substrings
substrs = [series[i:i+size] for i in range(num_substrings)]
# Using named function in the map, because otherwise it's way too messy
# as a lambda
return max(map(_string_to_product, substrs))
def _string_to_product(str_of_digits):
"""
Input is a string of digits. Convert each to an int
and calculate the product.
"""
return reduce(lambda x, y: x*y, [int(i) for i in str_of_digits])
| true |
7ee6076630a157f7f21e217625cdf92b827fd298 | Python | parky83/python0209 | /st01.Python기초/py08반복문/py08_13_3단구구단.py | UTF-8 | 81 | 3.0625 | 3 | [] | no_license | i=0
while i<9:
i=i+1
x=3*i
str=("%s * %s = %s" %(3,i,x))
print(str) | true |
88b8ee8300b7d2d7937e6f4af3667ec471ce6460 | Python | fossabot/leetcode-2 | /4. Median of Two Sorted Arrays.py | UTF-8 | 286 | 2.75 | 3 | [
"MIT"
] | permissive | class Solution:
def findMedianSortedArrays(self, nums1, nums2):
num=nums1+nums2
num=sorted(num)
n=len(num)
if n%2==0:
mid=n//2
return (num[mid-1]+num[mid])/2
else:
mid=(n+1)//2
return num[mid-1] | true |
dfdbbbdf80ff3a131f9a789153624a55f21f9c20 | Python | ratularora/python_code | /python/list/list_max.py | UTF-8 | 145 | 3.078125 | 3 | [] | no_license | list1, list2 = [123, 565654, 'A','Z','gdgf'], [456, 700, 200]
print "Max value element : ", max(list1)
print "Max value element : ", max(list2)
| true |
bdf31f45a28f15d8aa618e3e8364e7018d591957 | Python | Infinidrix/competitive-programming | /Day 19/search.py | UTF-8 | 665 | 3.640625 | 4 | [] | no_license |
# https://leetcode.com/problems/binary-search/submissions/
class Solution:
def bin_search(self, nums, index_min, index_max, target):
if index_min != index_max:
index_mid = (index_min + index_max)//2
if nums[index_mid] == target:
return index_mid
elif nums[index_mid] > target:
return self.bin_search(nums, index_min, index_mid, target)
else:
return self.bin_search(nums, index_mid + 1, index_max, target)
return -1
def search(self, nums: List[int], target: int) -> int:
return self.bin_search(nums, 0, len(nums), target) | true |
e2e2e71f605c25a3247753f91b79392c2f9ad9e7 | Python | dgod1028/hazard_model | /Utils/topwords.py | UTF-8 | 3,361 | 2.515625 | 3 | [] | no_license |
from tqdm import tqdm
import pickle as pk
from random import seed
import numpy as np
import pandas as pd
from statsmodels.distributions.empirical_distribution import ECDF
import copy
from gensim.models.ldamulticore import LdaMulticore
def ecdf(data):
# create a sorted series of unique data
cdfx = np.sort(data)
ind = np.argsort(data)
# x-data for the ECDF: evenly spaced sequence of the uniques
x_values = np.linspace(start=min(cdfx),
stop=max(cdfx), num=len(cdfx))
size_data = data.size
y_values = []
# y-data for the ECDF:-104values = []
for i in x_values:
# all the values in raw data less than the ith value in x_values
temp = data[data <= i]
# fraction of that value with respect to the size of the x_values
value = temp.size / size_data
# pushing the value in the y_values
y_values.append(value)
# return both x and y values
return x_values, y_values,ind
def frex(phi,mu,w):
return 1/(w/phi+(1-w)/mu)
def topwords(beta,mode = "freq",list=None):
#sorted_phi = np.zeros()
toplist = []
topprob = []
print(beta)
phi = (beta + model.beta) / (beta.sum(axis=0, keepdims=True) + (model.beta * model._V))
print(pd.DataFrame(phi))
if mode == "freq":
toplist = np.argsort(phi)[:,::-1].tolist() # Reverse
topprob = np.sort(phi)[:,::-1]
topprob = topprob.tolist()
elif mode == "frex":
nkv = copy.deepcopy(model.nkv)
print("Mode:Frex")
for i in tqdm(range(phi.shape[0])):
ephi = ECDF(phi[i,:])
phi[i, :] = ephi(phi[i, :])
nnkv = copy.deepcopy(model.nkv)
for i in tqdm(range(model.nkv.shape[1])):
enkv = ECDF(nnkv[:,i])
nkv[:,i] = enkv(nnkv[:,i])
fe = frex(phi,nkv,1)
print(fe)
toplist = np.argsort(fe)[:,::-1].tolist() # Reverse
topprob = np.sort(fe)[:,::-1]
topprob = topprob.tolist()
import matplotlib.pyplot as plt
return [toplist,topprob]
def top_words_table(topwords, jlist=None, type="category",prob=False):
"""
:param topwords: [toplist, topprob] from topwords()
:param jlist : {1:[*Jan*,*JICFS*,*Jan Name*,*JICFS Name*],2:...}
:param type : "category": convert to category, "jan": convert to JAN code
:return:
"""
k = len(topwords[0])
words = topwords[0].copy()
print("Start make table....")
if prob:
pass
else:
for wk in tqdm(range(len(words))):
for w in range(len(words[wk])):
if jlist is not None:
if type == "category":
tmp = jlist[topwords[0][wk][w]][3]
elif type == "jan":
tmp = jlist[topwords[0][wk][w]][2]
words[wk][w] = '[%i] %s' % (topwords[0][wk][w], tmp)
else:
words[wk][w] = topwords[0][wk][w]
words = pd.DataFrame(words).T
print(words)
return words
if __name__ == "__main__":
T = 5
model = LdaMulticore.load('../data/LDA/his_LDA_%i.lda' % T)
# id2jan -> {1,[*
#id2jan = pk.load(open('id2jan.p','rb'))
top = topwords(model.nkv,"frex")
table = top_words_table(top)
table.iloc[:20,:].to_csv("topwords(1).csv",encoding="utf_8_sig")
| true |
a175a8f219e97e564ec80870a6ab2a8bd69a5d06 | Python | jacquev6/DrawTurksHead | /DrawTurksHead/color.py | UTF-8 | 1,425 | 2.546875 | 3 | [
"MIT"
] | permissive | # coding: utf8
# Copyright 2015-2018 Vincent Jacques <vincent@vincent-jacques.net>
import unittest
from ._turkshead import hsv_to_rgb
class HsvToRgbTestCase(unittest.TestCase):
def test_red(self):
self.assertEqual(hsv_to_rgb(0., 1., 1.), (1, 0, 0))
def test_yellow(self):
self.assertEqual(hsv_to_rgb(60., 1., 1.), (1, 1, 0))
def test_yellow_after_360(self):
self.assertEqual(hsv_to_rgb(420., 1., 1.), (1, 1, 0))
def test_green(self):
self.assertEqual(hsv_to_rgb(120., 1., 1.), (0, 1, 0))
def test_cyan(self):
self.assertEqual(hsv_to_rgb(180., 1., 1.), (0, 1, 1))
def test_blue(self):
self.assertEqual(hsv_to_rgb(240., 1., 1.), (0, 0, 1))
def test_magenta(self):
self.assertEqual(hsv_to_rgb(300., 1., 1.), (1, 0, 1))
def test_blacks(self):
self.assertEqual(hsv_to_rgb(30., 1., 0.), (0, 0, 0))
self.assertEqual(hsv_to_rgb(210., 1., 0.), (0, 0, 0))
def test_whites(self):
self.assertEqual(hsv_to_rgb(30., 0., 1.), (1, 1, 1))
self.assertEqual(hsv_to_rgb(210., 0., 1.), (1, 1, 1))
def test_greys(self):
self.assertEqual(hsv_to_rgb(30., 0., 0.5), (0.5, 0.5, 0.5))
self.assertEqual(hsv_to_rgb(210., 0., 0.5), (0.5, 0.5, 0.5))
def test_grey_ishes(self):
self.assertEqual(hsv_to_rgb(30., 0.5, 0.5), (0.5, 0.375, 0.25))
self.assertEqual(hsv_to_rgb(210., 0.5, 0.5), (0.25, 0.375, 0.5))
| true |
fd0427eab34ae3506201488402fa61b65de59285 | Python | leoisl/pandora_paper_roc | /evaluate/vcf_filters.py | UTF-8 | 972 | 2.59375 | 3 | [
"MIT"
] | permissive | from .vcf import VCF
from collections import UserList
from .coverage_filter import CoverageFilter
from .strand_bias_filter import StrandBiasFilter
from .gaps_filter import GapsFilter
class VCF_Filters(UserList):
def record_should_be_filtered_out(self, vcf_record: VCF) -> bool:
return any(
vcf_filter.record_should_be_filtered_out(vcf_record) for vcf_filter in self
)
@staticmethod
def get_all_VCF_Filters(
coverage_threshold: str, strand_bias_threshold: str, gaps_threshold: str
) -> "VCF_Filters":
vcf_filters = VCF_Filters()
if coverage_threshold != "Not_App":
vcf_filters.append(CoverageFilter(float(coverage_threshold)))
if strand_bias_threshold != "Not_App":
vcf_filters.append(StrandBiasFilter(float(strand_bias_threshold)))
if gaps_threshold != "Not_App":
vcf_filters.append(GapsFilter(float(gaps_threshold)))
return vcf_filters
| true |
9acc49d90f1b3792ed8e1cf8396ba065faa30ddd | Python | 18501955449/hexin_Week4Homework_Mnist | /week4_lbp_mlp_mnist.py | UTF-8 | 3,719 | 2.703125 | 3 | [] | no_license | #coding:utf-8
import torch
from torchvision import datasets,transforms
from skimage.feature import local_binary_pattern
import torch.utils.data as Data
import numpy as np
import torch.nn as nn
def get_feature(x):
'''提取LBP特征
params:x为灰度图像
return:x的LBP特征'''
radius = 1 # LBP算法中范围半径的取值
n_points = 8 * radius # 领域像素点数
xa = np.array(x)
xt = torch.from_numpy(xa.reshape(28,28))
def get_Lbp(x):
lbp = local_binary_pattern(x, n_points, radius)
#先转tensor
lbp = torch.tensor(lbp)
lbp = lbp/255.0
feature = lbp.view(1, 28*28)
return feature
features = get_Lbp(xt)
batch_feature = features.float()
#print(feature)
return batch_feature
def model(feature,weights0,weights1):
'''三层前向传播结构
params:每一层的参数'''
feature = torch.cat((feature, torch.tensor(1.0).view(1, 1)), 1)
h = feature.mm(weights0)
h1 = torch.tanh(h).mm(weights1)
#h2 = torch.tanh(h1).mm(weights2)
y = torch.softmax(h1,1)
return y
# def one_hot(gt):
# gt_vector = torch.ones(1,10)
# gt_vector *= 0*0.1
# gt_vector[0,gt] = 1.0*0.9
# return gt_vector
def get_acc(image_data,W0,W1):
'''计算准确率
params:image_data为所有数据
W为权重参数
returns:准确率'''
correct = 0
for image,label in image_data:
feature = get_feature(image)
y = model(feature,W0,W1)
pred = torch.argmin((torch.abs(y-1))).item()
# print("图像[%s]得分类结果是:[%s]"%(gt,pred))
if label == pred:
correct += 1
return float(correct / float(len(image_data)))
def train_model(train_image_data,test_image_data, weights0, weights1,lr):
criterion = nn.CrossEntropyLoss()
for epoch in range(0, 100):
loss_value = 0
for image_data,image_label in train_image_data:
feature = get_feature(image_data)
y = model(feature, weights0, weights1)
gt = image_label
loss = criterion(y, gt)
loss_value += loss.data.item()
loss.backward()
weights0.data.sub_(weights0.grad.data * lr)
weights0.grad.data.zero_()
weights1.data.sub_(weights1.grad.data * lr)
weights1.grad.data.zero_()
# weights2.data.sub_(weights2.grad.data * lr)
# weights2.grad.data.zero_()
loss_value = loss_value/len(train_image_data)
train_acc = get_acc(train_image_data,weights0,weights1)
test_acc = get_acc(test_image_data,weights0,weights1)
print("epoch=%s,loss=%s,train/test_acc:%s/%s" % (epoch, loss_value, train_acc, test_acc))
return weights0, weights1
if __name__ == "__main__":
#初始化权重
weights0 = torch.randn(785, 35, requires_grad=True)
weights1 = torch.randn(35, 10, requires_grad=True)
#weights2 = torch.randn(35, 10, requires_grad=True)
#加载MNIST数据
batch_size = 1
mnist_transforms = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.131], [0.308])])
#训练数据集
train_dataset = datasets.MNIST(root='./data/', train=True, transform=mnist_transforms, download=False)
train_loader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
# 测试数据集 train=False
data_test = datasets.MNIST('./data',train=False,transform=mnist_transforms,download=False)
test_loader = Data.DataLoader(data_test,batch_size=batch_size,shuffle=False)
train_model(train_loader,test_loader, weights0, weights1,0.01)
| true |
41d311e307bfbeb324e5120f9333ee846e940651 | Python | dennis1219/baekjoon_code | /math1/2775.py | UTF-8 | 127 | 3.046875 | 3 | [] | no_license | import math
t = int(input())
for i in range(t):
k = int(input())
n = int(input())
c = math.comb(n+k,n-1)
print(c) | true |
7b3fb3d613c6cff886fdedb8f148fadcd3028bfa | Python | lfdebrux/n_bodies | /util/coroutines.py | UTF-8 | 2,148 | 3.078125 | 3 | [] | no_license | def coroutine(func):
"""coroutine decorator"""
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
@coroutine
def printer():
import sys
try:
while 1:
p = []
while 1:
buf = (yield)
if buf == None: break
p.extend(buf)
sys.stderr.write('\r')
sys.stderr.write(' '.join(p))
sys.stderr.flush()
except GeneratorExit:
sys.stderr.write('\r')
sys.stderr.write('\n')
sys.stderr.flush()
@coroutine
def progress_bar():
import sys
def print_progress(p):
sys.stderr.write('\r')
sys.stderr.write("[%-50s] %d%%" % ('='*(p/2), p))
sys.stderr.flush()
try:
old_p = None
while True:
p = (yield)
if p == old_p:
continue
print_progress(p)
old_p = p
except GeneratorExit:
sys.stderr.write('\r')
sys.stderr.write('\n')
sys.stderr.flush()
@coroutine
def two_lines(target1,target2):
import sys
try:
while 1:
target1.send((yield))
sys.stderr.write("\n")
sys.stderr.flush()
while 1:
new = (yield)
target2.send(new)
if new == None: break
sys.stderr.write("\033[F")
except GeneratorExit:
target1.close()
target2.close()
@coroutine
def tee(target,f):
"""
Save output to a file
"""
with open(f) as f:
while 1:
p = []
while 1:
buf = (yield)
target.send(buf)
if buf == None: break
p.extend(buf)
f.write(' '.join(p)+'\n')
@coroutine
def data_buffer(target,bufsize):
"""
Caches values in a fixed length buffer
until it is full or None is sent,
whereupon it sends on
"""
try:
buf = bufsize*[None]
while 1:
for i in xrange(bufsize):
buf[i] = (yield)
if buf[i] == None:
target.send(buf[:i])
break
else:
target.send(buf)
except GeneratorExit:
target.close()
@coroutine
def dedup(target,**tol):
"""
Skip values in a sequence
that are too similar. Values
can be tuples, in which case
all elements of the tuple have
to be similar, a la numpy.allclose().
"""
import numpy
old = numpy.NaN
while 1:
new = (yield)
if new == None:
target.send(None)
continue
if numpy.allclose(new,old,**tol):
continue
target.send(new)
old = new | true |
319e257c3f7631dcec4d9ed5ffead275899d5016 | Python | sam78640/DodgeThat | /images/new_bars/fix_png.py | UTF-8 | 490 | 2.65625 | 3 | [] | no_license | import os
import subprocess
def system_call(args, cwd=""):
print("Running"+'{}' "in" +'{}'+format(str(args), cwd))
subprocess.call(args, cwd=cwd)
print ("Fixing")
pass
def fix_image_files(root=os.curdir):
for path, dirs, files in os.walk(os.path.abspath(root)):
# sys.stdout.write('.')
for dir in dirs:
system_call(mogrify .png, {}.format(os.path.join(path, dir)))
print ("Fixed")
fix_image_files(os.curdir)
| true |
e07925111fbd7be0e03ab5f1baacc1a2a780d1c1 | Python | malshaCSE14/IEEEXtreme10.0-InvalidSyntax | /unreliable.py | UTF-8 | 269 | 2.578125 | 3 | [] | no_license | T = input()
for t in range(T):
questions, lies = map(int,raw_input())
rawOutput = ["rgb"] * 10
rawOutput = ["rgb"]*10
for q in range(questions):
questions = raw_input().split()
answer = raw_input()
for l in range(lies):
| true |
5a681d4fd0377b412ab50cecf12f23d9d06471a4 | Python | shen777/SSPBB | /lb.py | UTF-8 | 6,697 | 2.953125 | 3 | [] | no_license | import instance
import KTNS
import DynamicHungarianAlgorithm
import copy
import pickle
inf = 1000
class Edge :
def __init__(self, arg_src, arg_dst, arg_weight) :
self.src = arg_src
self.dst = arg_dst
self.weight = arg_weight
class Graph :
def __init__(self, arg_num_nodes, arg_edgelist,i) :
self.num_nodes = arg_num_nodes
self.edgelist = arg_edgelist
self.parent = []
self.rank = []
# mst stores edges of the minimum spanning tree
self.mst = []
self.i=i
def FindParent (self, node) :
# With path-compression.
if node != self.parent[node] :
self.parent[node] = self.FindParent(self.parent[node])
return self.parent[node]
# Without path compression
# if node == self.parent[node] :
# return node
# return self.FindParent(self.parent[node])
def KruskalMST (self) :
# Sort objects of an Edge class based on attribute (weight)
self.edgelist.sort(key = lambda Edge : Edge.weight)
self.parent = [None] * self.num_nodes
self.rank = [None] * self.num_nodes
for n in range(self.num_nodes) :
self.parent[n] = n # Every node is the parent of itself at the beginning
self.rank[n] = 0 # Rank of every node is 0 at the beginning
for edge in self.edgelist :
root1 = self.FindParent(edge.src)
root2 = self.FindParent(edge.dst)
# Parents of the source and destination nodes are not in the same subset
# Add the edge to the spanning tree
if root1 != root2 :
self.mst.append(edge)
if self.rank[root1] < self.rank[root2] :
self.parent[root1] = root2
self.rank[root2] += 1
else :
self.parent[root2] = root1
self.rank[root1] += 1
cost = 0
for edge in self.mst :
cost += edge.weight
return cost
def lb2(L,i,solution) :
# start from solution i+1's spanning tree
#O(n^2 log(n))
# Edge(source, destination, weight)
#assert i<len(solution)-1
if i>=len(solution)-1:
return 0
num_nodes = len(solution)-i-1
l=[]
for j in range(i+1,len(solution)):
for k in range(i+1,len(solution)):
if j!=k:
e=Edge(j-i-1, k-i-1, L[solution[j]][solution[k]])
l.append(e)
g1 = Graph(num_nodes,l,i)
cost=g1.KruskalMST()
if i<0:
return cost
closest=L[solution[i]][solution[i+1]]
for j in range(i+1,len(solution)):
closest=min(closest,L[solution[i]][j])
#print("closest",closest)
cost+=closest
return cost
def lb1(i,solution,matrix,capacity):
# start from solution i=jp
T=set()
for j in range(i,len(solution)):
if j<0:
continue
for k in range(len(matrix[0])):
if matrix[solution[j]][k]==1:
T.add(k)
return len(T)-capacity
def lb3(i,solution,DH,L):
if i==-1:
La=copy.deepcopy(L)
La=augment_matrix(La,solution)
DH=DynamicHungarianAlgorithm.Dynamic_Hungarian_Algorithm(La)
val=DH.H.minWeightMatching()
return val,DH
if i==0:
return 0,DH
if i>=len(solution)-1:
return 0,None
new_DH=copy.deepcopy(DH)
#new_DH=DH.copy_Hungarian()
new_row=[inf]*len(solution)
new_row[solution[i]]=0
new_col=[inf]*len(solution)
new_col[solution[i-1]]=0
new_DH.modify_col(solution[i],new_col)
new_DH.modify_row(solution[i-1],new_row)
val=new_DH.min_dynamic_cal()
return val,new_DH
def lb3_m(i,solution,DH,L):
if i==-1:
#La=copy.deepcopy(L)
La=pickle.loads(pickle.dumps(L, -1))
La=augment_matrix(La,solution)
DH=DynamicHungarianAlgorithm.Dynamic_Hungarian_Algorithm(La)
val=DH.H.minWeightMatching()
return val,DH
if i==0:
return 0,DH
if i>=len(solution)-1:
return 0,None
#new_DH=copy.deepcopy(DH)
new_DH=DH.copy_Hungarian()
new_row=[inf]*len(solution)
new_row[solution[i]]=0
new_col=[inf]*len(solution)
new_col[solution[i-1]]=0
new_DH.modify_col(solution[i],new_col)
new_DH.modify_row(solution[i-1],new_row)
val=new_DH.min_dynamic_cal()
return val,new_DH
def Greedy_lb3(i,solution,DH,L):
if i==0:
#La=copy.deepcopy(L)
La=pickle.loads(pickle.dumps(L, -1))
La=augment_matrix(La,solution)
DH=DynamicHungarianAlgorithm.Dynamic_Hungarian_Algorithm(La)
val=DH.H.minWeightMatching()
return val,DH
if i>=len(solution)-1:
return 0,None
#new_DH=copy.deepcopy(DH)
new_DH=DH.copy_Hungarian()
new_row=[inf]*len(solution)
new_row[solution[i]]=0
new_col=[inf]*len(solution)
new_col[solution[i-1]]=0
new_DH.modify_col(solution[i],new_col)
new_DH.modify_row(solution[i-1],new_row)
val=new_DH.min_dynamic_cal()
return val,new_DH
def lb4(i,solution):
if 1 in solution[:i] and 0 in solution[i:]:
return True
return False
def lij(matrix,capacity):
L=[]
for i in range(len(matrix)):
l=[0]*len(matrix)
for j in range(len(matrix)):
count=0
for k in range(len(matrix[0])):
if matrix[i][k]==1 or matrix[j][k]:
count+=1
l[j]=max(0,count-capacity)
l[i]=inf
L.append(l)
#print(L)
return L
def augment_matrix(matrix,solution):
for i in range(len(matrix)):
matrix[i].append(0)
matrix.append([0]*(len(matrix)+1))
matrix[len(matrix)-1][len(matrix)-1]=inf
for i in range(len(matrix)-1):
matrix[i][solution[0]]=inf
return matrix
if __name__ == '__main__':
inf = 1000
k=instance.load_data("dat1")
solution=list(range(len(k.matrix)))
W=KTNS.ktns(solution,k.matrix,k.capacity)
L=lij(k.matrix,k.capacity)
print("solution=",solution)
L=augment_matrix(L,solution)
solution.append(len(solution))
DH=DynamicHungarianAlgorithm.Dynamic_Hungarian_Algorithm(L)
print("######start",DH.H.minWeightMatching())
print(DH.H.Mu)
index=0
for j in range(index,len(solution)-1):
solution[index],solution[j]=solution[j],solution[index]
print(solution)
val,DH=lb3(index-1,solution,DH,L)
print(DH.H.Mu)
print(val)
#self.branch(index+1,DH)
solution[index],solution[j]=solution[j],solution[index]
# dummy node need to define better
| true |
5f32256695bf13b14e2aee18c670dd88b68e1233 | Python | abirmoy/Python-Practice | /More Practice/6.String Lists-plaindrome.py | UTF-8 | 323 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri May 24 00:27:16 2019
@author: Abirmoy
"""
# -*- coding: utf-8 -*-
"""
Created on Tue May 21 20:38:01 2019
@author: Abirmoy
"""
string = 'heeh'
if string[:]==string[::-1]:
print("Plaindrome")
else:
print("Normal")
| true |
d888fb6d40715c2d43c193f0e5e9e16ad593eda2 | Python | martin-majlis/Wikipedia-API | /tests/langlinks_test.py | UTF-8 | 1,962 | 2.609375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import unittest
from tests.mock_data import user_agent
from tests.mock_data import wikipedia_api_request
import wikipediaapi
class TestLangLinks(unittest.TestCase):
def setUp(self):
self.wiki = wikipediaapi.Wikipedia(user_agent, "en")
self.wiki._query = wikipedia_api_request
def test_langlinks_count(self):
page = self.wiki.page("Test_1")
self.assertEqual(len(page.langlinks), 3)
def test_langlinks_titles(self):
page = self.wiki.page("Test_1")
self.assertEqual(
list(sorted(map(lambda s: s.title, page.langlinks.values()))),
["Test 1 - " + str(i + 1) for i in range(3)],
)
def test_langlinks_lang_values(self):
page = self.wiki.page("Test_1")
self.assertEqual(
list(sorted(map(lambda s: s.language, page.langlinks.values()))),
["l" + str(i + 1) for i in range(3)],
)
def test_langlinks_lang_keys(self):
page = self.wiki.page("Test_1")
self.assertEqual(
list(sorted(page.langlinks.keys())), ["l" + str(i + 1) for i in range(3)]
)
def test_langlinks_urls(self):
page = self.wiki.page("Test_1")
self.assertEqual(
list(sorted(map(lambda s: s.fullurl, page.langlinks.values()))),
[
(
"https://l"
+ str(i + 1)
+ ".wikipedia.org/wiki/Test_1_-_"
+ str(i + 1)
)
for i in range(3)
],
)
def test_jump_between_languages(self):
page = self.wiki.page("Test_1")
langlinks = page.langlinks
p1 = langlinks["l1"]
self.assertEqual(p1.language, "l1")
self.assertEqual(p1.pageid, 10)
def test_langlinks_no_langlink_count(self):
page = self.wiki.page("No_LangLinks")
self.assertEqual(len(page.langlinks), 0)
| true |
d8c6d615f1c4514bf960a3700f0203dd3cc4edff | Python | aleman844/Fama-French-3-Factor-Model-Implementation | /FF3factor.py | UTF-8 | 20,264 | 3.53125 | 4 | [] | no_license | """
Fama-French-Three-Factor-Model Implementation
----------------------------------------------------------------------------------------------------------------
Here is the workflow of the model realization in this file:
1) Get data (ticker pool, S&P500, risk free rate, close price, market cap and book-to-market ratio) from SQL database.
2) Organize data to the form we want: {trading_day:df_data}
3) Calculate factor in two different ways:
i. Split mc to 'Big'(50%) and 'Small'(50%), then use market cap weighted average return ('Small'-'Big') to get 'SMB'
Split bm to 'High'(30%), 'Medium'(40%) and 'L'(30%), then use market cap weighted average return ('High'-'Low') to
get 'HML'.
ii. The difference is after initial separation, we do one more step. Mark tickers based on intersection, e.g. if a
ticker is marked as 'Big' and 'High' in the same time, then we mark it as 'B/H'. Therefore, we'll have total 6
different groups: B/H, B/M, B/L, S/H, S/M and S/L. Finally, use market cap weighted average return
((S/H + S/M + S/L) / 3 - (B/H + B/M + B/L) / 3) to get 'SMB' and use market cap weighted average return
((B/H + S/H) / 2 - (B/L + S/l) / 2) go get 'HML'.
4) Save all factor data in a df with columns ['Rm', 'SMB', 'HML'].
where 'Rm' is the log return of S&P500 minus corresponding daily risk free rate.
5) Regress all tickers' log return on factor data, get interception as 'alpha' and its p-value. Save these two data to
a dict called 'alpha' with form: {trading_day:df_data([alpha, p-value])}.
6) Input all necessary data to a Class to get an 'Alpha' object.
7) Run backtest() method in Alpha object. When backtesting is done, program will plot portfolio cumulative return vs.
market cumulative return and print the portfolio result in a table form with columns:
[Portfolio Return | Sharpe | Volatility | IR | Max Drawdown].
e.g Top 25 alpha: Portfolio results
+------------------+--------+------------+------+--------------+
| Portfolio Return | Sharpe | Volatility | IR | Max Drawdown |
+------------------+--------+------------+------+--------------+
| 38.95% | 0.68 | 0.17 | 0.19 | 21.4% |
+------------------+--------+------------+------+--------------+
The detailed info about trading rules and Alpha Class could be found in portfolio.py
----------------------------------------------------------------------------------------------------------------
"""
__author__ = 'Han (Aaron) Xiao'
import pandas as pd
import numpy as np
import statsmodels.api as sm
import portfolio as pf
import pymysql
# ---------------------------------------------------------------------
# Set necessary environment
db = pymysql.connect(host='localhost', user='root', password="",
database='ff_3factor', port=3308,
charset='utf8')
# ---------------------------------------------------------------------
# Functions: Prepare data for factor calculation
def get_ticker(connection=None):
"""
Get a list of all tickers we have from sql sever.
"""
sql = 'SELECT `Ticker` FROM `ticker_list`'
df = pd.read_sql(sql, connection)
df.sort_values(by=['Ticker'], inplace=True)
name_list = list(df['Ticker'])
return name_list
def get_market(connection=None):
"""
Get time benchmark, market data (S&P500) and risk free rate.
"""
sql = 'SELECT `date`, `S&P500`, `risk_free` FROM `marketdata`'
df = pd.read_sql(sql, connection)
df.sort_values(by=['date'])
return df
def get_close(tickerlist, daterange, connection=None):
"""
Get daily close price based on input ticker list from sql server. Save them into a dict.
"""
output_dict = {}
for ticker in tickerlist:
sql = "SELECT `date`, `close` FROM `stockdata` WHERE ticker = '{}'".format(ticker)
df = pd.read_sql(sql, connection)
df['date'] = df['date'].astype('datetime64')
df.set_index(keys=['date'], inplace=True)
df.sort_index(axis=0, inplace=True)
df.fillna(method='ffill', inplace=True)
output_dict.update({ticker:df})
output_df = pd.concat(output_dict, axis=1)
output_df = __position_check(output_df, daterange)
return output_df
def get_ret(input_df, daterange=None, column=None, log=False):
"""
Calculate daily ret/log_ret for each stock/index. Save them in input dict and update the input df.
"""
if column == 'close':
output_df = input_df.pct_change()
if log is True:
output_df = np.log(1 + output_df)
output_df.dropna(inplace=True)
output_df = __position_check(output_df, daterange)
return output_df
elif column == 'S&P500':
if isinstance(input_df,pd.DataFrame):
input_df['SP500_ret'] = input_df[column].pct_change()
input_df['SP500_log_ret'] = np.log(1 + input_df['SP500_ret'])
input_df.dropna(inplace=True)
input_df['daily_rf'] = input_df['risk_free'] / 100 / 252
input_df['Rm'] = input_df['SP500_ret'] - input_df['daily_rf']
input_df['log_Rm'] = input_df['SP500_log_ret'] - input_df['daily_rf']
input_df['date'] = input_df['date'].astype('datetime64')
input_df.set_index(keys=['date'], inplace=True)
input_df.sort_index(axis=0, inplace=True)
output_dict = position_split(input_df, daterange)
return output_dict
def get_marketcap(tickerlist, daterange, connection=None,):
"""
Get daily market capital based on input ticker list from sql server. Save them into a dict.
"""
output_dict = {}
for ticker in tickerlist:
sql = "SELECT `date`, `market_value` FROM `stockdata` WHERE ticker = '{}'".format(ticker)
df = pd.read_sql(sql, connection)
df['date'] = df['date'].astype('datetime64')
df.set_index(keys=['date'], inplace=True)
df.sort_index(axis=0, inplace=True)
output_dict.update({ticker: df})
output_df = pd.concat(output_dict, axis=1)
output_df = __position_check(output_df, daterange)
return output_df
def get_bm(tickerlist, daterange, connection=None):
"""
Get daily book-to-market ration based on input ticker list from sql server. Save them into a dict.
"""
output_dict = {}
for ticker in tickerlist:
sql = "SELECT `date`, `book_to_market` FROM `stockdata` WHERE ticker = '{}'".format(ticker)
df = pd.read_sql(sql, connection)
df['date'] = df['date'].astype('datetime64')
df.set_index(keys=['date'], inplace=True)
df.sort_index(axis=0, inplace=True)
output_dict.update({ticker: df})
output_df = pd.concat(output_dict, axis=1)
output_df = __position_check(output_df, daterange)
return output_df
def position_split(data_df, date_range, sample_length=252, trading_gap=21):
"""
Split a df (could be market data or stock data) to different keys in a dict, based on trading days.
:param data_df: df with 1006 index about days and with 498 columns about tickers.
:param date_range: a ndarray that consist of total 1006 datetime.date objects, will be used to create trading days.
:param sample_length: sample length for regression to get alpha
:param trading_gap: gap between each trading days
:return: return a dict consist of different trading days (key) and their corresponding past 252 days data records.
"""
output_dict = {}
date_position = date_range[sample_length::trading_gap]
for i in range(len(date_position)):
date_title = str(date_position[i])
right = __date_position(date_range, date_title)
date_left = date_range[right-sample_length]
date_right = date_range[right-1]
df = data_df[date_left:date_right]
output_dict.update({date_title:df})
return output_dict
def get_factor_mark(position_dict, catog=None):
"""
Return an exact same size dict like input position_dict, with exact same size df in each keys.
Based on tickers' performance in each day, mark them accordingly.
----------------------------------------------------------------------------------------------------------------
Rules:
1) In mc_dict, mark 'B' to tickers whose market capital is in the top 50%, mark 'S' to the rest of tickers.
2) In bm_dict, mark 'H' to tickers whose book-to-market ratio is in the top 30%, mark 'L' to the bottom 30%, mark
'M' to the rest of them (40%).
----------------------------------------------------------------------------------------------------------------
"""
output_dict = {}
if catog == 'mc':
for trading_day in position_dict:
mc_mark = pd.DataFrame().reindex_like(position_dict[trading_day])
position_df = position_dict[trading_day].copy().T
# Total number of days used for collecting data before each trading days
amount_days = len(position_df.columns)
# Total number of tickers
amount_tickers = len(position_df.index)
for day in range(amount_days):
mc_daily = position_df.iloc[:,day].copy()
mc_daily.sort_values(inplace=True)
big = mc_daily[int(amount_tickers/2):].index.values
small = mc_daily[:int(amount_tickers/2)].index.values
mark_day = mc_mark.index[day]
for mark in big:
if mark in mc_mark.columns:
mc_mark.loc[mark_day, mark] = 'B'
for mark in small:
if mark in mc_mark.columns:
mc_mark.loc[mark_day, mark] = 'S'
output_dict.update({trading_day:mc_mark})
return output_dict
elif catog == 'bm':
for trading_day in position_dict:
bm_mark = pd.DataFrame().reindex_like(position_dict[trading_day])
position_df = position_dict[trading_day].copy().T
# Total number of days used for collecting data before each trading days
amount_days = len(position_df.columns)
# Total number of tickers
amount_tickers = len(position_df.index)
for day in range(amount_days):
bm_daily = position_df.iloc[:,day].copy()
bm_daily.sort_values(inplace=True)
high = bm_daily[-int(amount_tickers*0.3):].index.values
low = bm_daily[:int(amount_tickers*0.3)].index.values
medium = bm_daily[int(amount_tickers*0.3):-int(amount_tickers*0.3)].index.values
mark_day = bm_mark.index[day]
for mark in high:
if mark in bm_mark.columns:
bm_mark.loc[mark_day, mark] = 'H'
for mark in low:
if mark in bm_mark.columns:
bm_mark.loc[mark_day, mark] = 'L'
for mark in medium:
if mark in bm_mark.columns:
bm_mark.loc[mark_day, mark] = 'M'
output_dict.update({trading_day:bm_mark})
return output_dict
def get_factor(ret_dict, market_dict, mc_dict, bm_dict, intersection=False):
"""
Calculate factor in each trading period, based on each trading days.
----------------------------------------------------------------------------------------------------------------
Compared with traditional Fama-French method, the method I used in this function is a simplified one.
Instead of getting Intersection of ('B','S') and ('H', 'M', 'L'), I simply split market capital to 'Big' and
'Small', then use mc weighted average return with 'Small' tag 'minus' mc weighted average return with tag 'Big'
to get SMB. Similar procedure to get HML.
----------------------------------------------------------------------------------------------------------------
:return: Save different df(Rm, SMB, HML) in a dict, based on trading days (key).
"""
if intersection is True:
return get_factor_intersect(ret_dict, market_dict, mc_dict, bm_dict)
output_dict = {}
mc_mark = get_factor_mark(mc_dict, 'mc')
bm_mark = get_factor_mark(bm_dict, 'bm')
for traday in ret_dict: # Data records among 252 days before trading day
days = len(ret_dict[traday].index)
tickers = len(ret_dict[traday].columns)
SMB = []
HML = []
for day in range(days): # Each day
# Small Minus Big
mark_small = [ticker for ticker in range(tickers) if mc_mark[traday].iloc[day,ticker] == 'S']
S = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, mark_small)
mark_big = [ticker for ticker in range(tickers) if mc_mark[traday].iloc[day,ticker] == 'B']
B = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, mark_big)
SMB.append(S - B)
# High Minus Low
mark_high = [ticker for ticker in range(tickers) if bm_mark[traday].iloc[day,ticker] == 'H']
H = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, mark_high)
mark_low = [ticker for ticker in range(tickers) if bm_mark[traday].iloc[day, ticker] == 'L']
L = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, mark_low)
HML.append(H - L)
# Rm = rm - rf
Rm = market_dict[traday]['log_Rm']
factor = pd.DataFrame({'log_Rm':Rm, 'SMB':SMB, 'HML':HML})
output_dict.update({traday:factor})
return output_dict
def get_factor_intersect(ret_dict, market_dict, mc_dict, bm_dict):
"""
Calculate factor in each trading period, based on each trading days.
"""
output_dict = {}
mc_mark = get_factor_mark(mc_dict, 'mc')
bm_mark = get_factor_mark(bm_dict, 'bm')
for traday in ret_dict:
days = len(ret_dict[traday].index)
tickers = len(ret_dict[traday].columns)
SMB = []
HML = []
for day in range(days): # Each day
SH_mark = [
ticker for ticker in range(tickers)
if (mc_mark[traday].iloc[day,ticker] == 'S') and (bm_mark[traday].iloc[day,ticker] == 'H')
]
SM_mark = [
ticker for ticker in range(tickers)
if (mc_mark[traday].iloc[day,ticker] == 'S') and (bm_mark[traday].iloc[day,ticker] == 'M')
]
SL_mark = [
ticker for ticker in range(tickers)
if (mc_mark[traday].iloc[day,ticker] == 'S') and (bm_mark[traday].iloc[day,ticker] == 'L')
]
SH = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, SH_mark)
SM = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, SM_mark)
SL = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, SL_mark)
BH_mark = [
ticker for ticker in range(tickers)
if (mc_mark[traday].iloc[day,ticker] == 'B') and (bm_mark[traday].iloc[day,ticker] == 'H')
]
BM_mark = [
ticker for ticker in range(tickers)
if (mc_mark[traday].iloc[day,ticker] == 'B') and (bm_mark[traday].iloc[day,ticker] == 'M')
]
BL_mark = [
ticker for ticker in range(tickers)
if (mc_mark[traday].iloc[day,ticker] == 'B') and (bm_mark[traday].iloc[day,ticker] == 'L')
]
BH = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, BH_mark)
BM = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, BM_mark)
BL = _factor_weight_average(ret_dict[traday], mc_dict[traday], day, BL_mark)
# Small Minus Big
SMB.append((SH + SM + SL)/3 - (BH + BM + BL)/3)
# High Minus Low
HML.append((BH + SH)/2 - (BL + SL)/2)
# Rm = rm - rf
Rm = market_dict[traday]['log_Rm']
factor = pd.DataFrame({'log_Rm': Rm, 'SMB': SMB, 'HML': HML})
output_dict.update({traday: factor})
return output_dict
def get_alpha(ret_dict, factor_dict, market_dict):
"""
Calculate and save alpha and its corresponding p-value into a dict, based on trading days.
"""
output_dict = {}
for traday in ret_dict:
tickers = len(ret_dict[traday].columns)
alpha = []
p_value = []
for ticker in range(tickers):
ticker_ret = ret_dict[traday].iloc[:, ticker] - market_dict[traday]['daily_rf']
ticker_ret.name = ret_dict[traday].iloc[:, ticker].name[0]
df = pd.concat([ticker_ret, factor_dict[traday]], axis=1)
formula = ticker_ret.name + ' ~ log_Rm + SMB + HML'
ols = sm.OLS.from_formula(formula, df).fit()
alpha.append(ols.params[0])
p_value.append(ols.pvalues[0])
score = pd.DataFrame({'alpha': alpha, 'p_value': p_value}, index=ret_dict[traday].columns.get_level_values(0))
score.sort_values(by=['alpha'], inplace=True)
output_dict.update({traday:score})
return output_dict
def _factor_weight_average(ret_df, mc_df=None, day=None, mark_index=None):
if len(mark_index) == 0:
return 0
else:
ret = ret_df.iloc[day, mark_index]
mc = mc_df.iloc[day, mark_index]
weight = mc / mc.sum()
res = np.average(ret, weights=weight)
return res
def __position_check(df, daterange):
for i in df.index.values:
if i not in daterange.astype('datetime64[ns]'):
df.drop([i], inplace=True)
return df
def __date_position(input_date, target):
for pos, value in enumerate(input_date):
if str(value) == target:
return pos
# ---------------------------------------------------------------------
def main(top:int = 5):
# Get sp500 data and risk free (annualized percentage) rate
market = get_market(db)
# Base on time range of market data, date when we initialize our position and gap between each trading days,
# get time range and position range.
date_range = market['date'].values.copy()
# date_range_64 = market['date'].copy().astype('datetime64')
# date_position = date_range[252::21]
# date_position_64 = date_range_64[252::21]
# Add daily return and risk free (decimal) into market df
market_position = get_ret(market, date_range[1:], 'S&P500')
# Get ticker list
ticker_list = get_ticker(db)
# Get close price
close = get_close(ticker_list, date_range, db)
# Get daily ret and split them based on trading days
ret = get_ret(close, date_range, 'close', True)
ret_position = position_split(ret, date_range[1:])
# Get daily market capital and split them based on trading days
mc = get_marketcap(ticker_list, date_range, db)
mc_position = position_split(mc, date_range[1:])
# Get daily bm ratio, split them based on trading days and mark them based on High, Medium and Low
bm = get_bm(ticker_list, date_range, db)
bm_position = position_split(bm, date_range[1:])
# Get 3 factors: Rm, SMB and HML. Save them into dict, based on trading days.
factor_position = get_factor(ret_position, market_position, mc_position, bm_position, intersection=True)
# Calculate alpha for all tickers in each trading period.
alpha = get_alpha(ret_position, factor_position, market_position)
# Input all results from above procedures into class Alpha
# close = __position_check(close, date_range)
alpha_strategy = pf.Alpha(principle=100000, pool=ticker_list, close=close, alpha=alpha, market=market,
date_range=date_range[1:], top=top)
alpha_strategy.backtest()
return alpha_strategy
if __name__ == '__main__':
alpha_5 = main()
| true |
8b5db5fc50254cc86496d8565f84e4b1b156718a | Python | cicihou/LearningProject | /sql/sqlzoo_09_quiz.py | UTF-8 | 1,029 | 3.140625 | 3 | [] | no_license |
'''
QUIZ
Self join Quiz
SELF JOIN quiz
https://sqlzoo.net/wiki/Self_join_Quiz
'''
'''
1. Select the code that would show it is possible to get from Craiglockhart to Haymarket
SELECT DISTINCT a.name, b.name
FROM stops a JOIN route z ON a.id=z.stop
JOIN route y ON y.num = z.num
JOIN stops b ON y.stop=b.id
WHERE a.name='Craiglockhart' AND b.name ='Haymarket'
'''
'''
2. Select the code that shows the stops that are on route.num '2A' which can be reached with one bus from Haymarket?
SELECT S2.id, S2.name, R2.company, R2.num
FROM stops S1, stops S2, route R1, route R2
WHERE S1.name='Haymarket' AND S1.id=R1.stop
AND R1.company=R2.company AND R1.num=R2.num
AND R2.stop=S2.id AND R2.num='2A'
'''
'''
3. Select the code that shows the services available from Tollcross?
SELECT a.company, a.num, stopa.name, stopb.name
FROM route a JOIN route b ON (a.company=b.company AND a.num=b.num)
JOIN stops stopa ON (a.stop=stopa.id)
JOIN stops stopb ON (b.stop=stopb.id)
WHERE stopa.name='Tollcross'
'''
| true |
2aac70f3c6d5f55fc2099f2fe80db59f45407b0f | Python | becerratello/mido | /mido/backends/_common.py | UTF-8 | 4,362 | 2.859375 | 3 | [
"MIT"
] | permissive | """
These classes will be made publicly available once their API is settled. For now they should only be used
inside this package.
"""
import time
from .. import ports
from ..parser import Parser
from ..py2 import PY2
if PY2:
import Queue as queue
else:
import queue
class ParserQueue:
"""
Thread safe message queue with built in MIDI parser.
This should be avaiable to other backend implementations and perhaps
also in the public API, but the API needs a bit of review. (Ideally This
would replace the parser.)
q = ParserQueue()
q.feed([0xf8, 0, 0])
q.put(msg)
msg = q.get()
msg = q.poll()
for msg in q:
...
"""
def __init__(self):
self._queue = queue.Queue()
self._parser = Parser()
def put(self, msg):
self._queue.put(msg)
def feed(self, msg_bytes):
# Todo: should this be protected somehow?
# No, it's better to put a lock around reading AND parsing.
self._parser.feed(msg_bytes)
for msg in self._parser:
self.put(msg)
def _get_py2(self):
# In Python 2 queue.get() doesn't respond to CTRL-C. A workaroud is
# to call queue.get(timeout=100) (very high timeout) in a loop, but all
# that does is poll with a timeout of 50 milliseconds. This results in
# much too high latency.
#
# It's better to do our own polling with a shorter sleep time.
#
# See Issue #49 and https://bugs.python.org/issue8844
sleep_time = ports.get_sleep_time()
while True:
try:
return self._queue.get_nowait()
except queue.Empty:
time.sleep(sleep_time)
continue
# Todo: add timeout?
def get(self):
if PY2:
return self._get_py2()
else:
return self._queue.get()
def poll(self):
try:
return self._queue.get_nowait()
except queue.Empty:
return None
def iterpoll(self):
while True:
msg = self.poll()
if msg:
yield msg
else:
return
class PortMethods(object):
is_input = False
is_output = False
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
return False
def __repr__(self):
if self.closed:
state = 'closed'
else:
state = 'open'
capabilities = self.is_input, self.is_output
port_type = {
(True, False): 'input',
(False, True): 'output',
(True, True): 'I/O port',
(False, False): 'mute port',
}[capabilities]
name = self.name or ''
try:
device_type = self._device_type
except AttributeError:
device_type = self.__class__.__name__
return '<{} {} {!r} ({})>'.format(
state, port_type, name, device_type)
class InputMethods(object):
is_input = True
def iter_pending(self):
"""Iterate through pending messages."""
while True:
msg = self.poll()
if msg is None:
return
else:
yield msg
def __iter__(self):
"""Iterate through messages until the port closes."""
while True:
try:
yield self.receive()
except IOError:
if self.closed:
# The port closed before or inside receive().
# (This makes the assumption that this is the reason,
# at the risk of masking other errors.)
return
else:
raise
class OutputMethods(object):
is_output = True
def reset(self):
"""Send "All Notes Off" and "Reset All Controllers" on all channels"""
for msg in ports.reset_messages():
self.send(msg)
def panic(self):
"""Send "All Sounds Off" on all channels.
This will mute all sounding notes regardless of
envelopes. Useful when notes are hanging and nothing else
helps.
"""
for msg in ports.panic_messages():
self.send(msg)
| true |
eed60ec257f6cfbe0640d2a5ca2a40ba5204cacb | Python | pierrotpetitpot/COMP472_A2 | /main.py | UTF-8 | 375 | 3.25 | 3 | [] | no_license |
from depthFirst import depthFirstAlgorithm
from iterativeDeepening import iterativeDeepeningAlgorithm
from aStar import aStarBoth
# initial state of the puzzle
initial_state = [1, 2, 7, 4, 5, 6, 3, 8, 9]
# calls the different algorithm with the specified initial state
depthFirstAlgorithm(initial_state)
iterativeDeepeningAlgorithm(initial_state)
aStarBoth(initial_state)
| true |
302938e4c7246f056f16eca13b5e8c9049a46eaf | Python | aaronLinLu/GTFS_navigationTool | /CreateNetworkDataset - HoMing.py | UTF-8 | 10,966 | 2.578125 | 3 | [] | no_license | # Name: Generate Transit Lines and Stops
# Author: Lin
# Description: Create a feature dataset
# Import system modules
import arcpy
from arcpy import env
from operator import itemgetter
import sqlize_csv
import hms
import sqlite3, os, operator, itertools, csv, re
from sets import Set
# enable file overwrite
arcpy.env.overwriteOutput = True
# GTFS directories
inGTFSdir = r'E:\Final Project\SFMTA_GTFS\google_transit'
outFD = r'E:\Final Project\Lins Workspace\Network Dataset\GTFSAnalysis.gdb'
# create dBASE tables into the workspace
#arcpy.CreateTable_management(workspace, "stops.dbf")
# Creating a spatial reference object
sr = arcpy.SpatialReference(4326)
# GTFS stop lat/lon are written in WGS1984 coordinates
WGSCoords = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', \
SPHEROID['WGS_1984',6378137.0,298.257223563]], \
PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; \
-400 -400 1000000000;-100000 10000;-100000 10000; \
8.98315284119522E-09;0.001;0.001;IsHighPrecision"
FD = arcpy.management.CreateFeatureDataset(outFD, "nd", sr)
# (1) Generate Transit Stops Point Feature Class
#outStopFCName = "Stops"
#outStopFC = os.path.join(outFD,outStopFCName)
outStopsFC = arcpy.CreateFeatureclass_management(FD,"stops","POINT",spatial_reference=sr)
# read-in stops.txt
StopTable = inGTFSdir + "\stops.txt"
stopFields = [("stop_id","TEXT"),("stop_name","TEXT"),("stop_desc","TEXT"),("zone_id","TEXT"),("stop_url","TEXT")]
# adding fields to stops feature class
for field in stopFields:
arcpy.management.AddField(outStopsFC, field[0], field[1])
# Initialize a dictionary of stop lat/lon (filled below)
# {stop_id: <stop geometry object>} in the output coordinate system
stopGeomDict = {}
arcpy.AddMessage("Generating stops feature class.")
# feed data from stops.txt to the stops feature class
with arcpy.da.InsertCursor(outStopsFC, ["SHAPE@"] + [field[0] for field in stopFields]) as ICursor:
with open(StopTable,'rb') as fStops:
next(fStops)
reader = csv.reader(fStops)
# Put everything in utf-8 to handle BOMs and weird characters.
# Eliminate blank rows (extra newlines) while we're at it.
reader = ([x.decode('utf-8-sig').strip() for x in r] for r in reader if len(r) > 0)
for stop in reader:
# construct point geometry
ptGeometry = arcpy.PointGeometry(arcpy.Point(float(stop[4]), float(stop[3])))
stopGeomDict[int(stop[0])] = ptGeometry
# insert our objects
ICursor.insertRow((ptGeometry,stop[0],stop[1],stop[2],stop[5],stop[6]))
arcpy.AddMessage("Stops feature class created!\n\n")
arcpy.AddMessage("Generating transit line feature class.")
# (2) Obtain schedule info from the stop_times.txt file
# and convert it to a line-based model
stop_times_dict = {} # {trip_id: [stop_id, stop_sequence, arrival_time, departure_time]}
# One entry per transit line connecting a unique pair of stops (with duplicate entries for different
# route_type values connecting the same pair of stops). Size shouldn't be terribly much larger than the
# number of stops for a normal network. Only central stations and transit hubs have large numbers of
# connections.
routeDict = {}
tripsDict = {}
TripsTable = inGTFSdir + "\trips.txt"
with open(TripsTable, 'rb') as fTrips:
next(fTrips)
reader = csv.reader(fTrips)
# Put everything in utf-8 to handle BOMs and weird characters.
# Eliminate blank rows (extra newlines) while we're at it.
reader = ([x.decode('utf-8-sig').strip() for x in r] for r in reader if len(r) > 0)
for row in reader:
routeDict[row[2]] = row[0]
outLineFC = arcpy.CreateFeatureclass_management(FD,"transit","POLYLINE",spatial_reference=sr)
# read-in stop_times.txt
StopTimesTable = inGTFSdir + "\stop_times.txt"
with open(StopTimesTable, 'rb') as fStopTimes:
next(fStopTimes)
reader = csv.reader(fStopTimes)
# Put everything in utf-8 to handle BOMs and weird characters.
# Eliminate blank rows (extra newlines) while we're at it.
reader = ([x.decode('utf-8-sig').strip() for x in r] for r in reader if len(r) > 0)
for row in reader:
if int(row[0]) in tripsDict:
tripsDict[int(row[0])].append((int(row[3]), int(row[4])))
else:
tripsDict[int(row[0])] = [(int(row[3]), int(row[4]))]
## # First row is column names:
## columns = [name.strip() for name in reader.next()]
##
## # locate each field in each rows
## idx_trip_id = columns.index("trip_id")
## idx_stop_id = columns.index("stop_id")
## idx_stop_sequence = columns.index("stop_sequence")
## idx_arrival_time = columns.index("arrival_time")
## idx_departure_time = columns.index("departure_time")
outPairStops = arcpy.CreateFeatureclass_management(FD,"pairStops","POINTS",spatial_reference=sr)
arcpy.management.AddField(outPairStops, "LINEID", "SHORT")
arcpy.management.AddField(outPairStops, "SEQ", "SHORT")
stopPairSet = []
with arcpy.da.InsertCursor(outPairStops, ["SHAPE@", "ROUTEID", "SEQ"]) as ICursor:
for tripID in tripsDict.keys():
arr = arcpy.Array()
trip = sorted(tripsDict[tripID], key=itemgetter(1))
for x in range(0, len(trip)-1):
lineID = "%s , %s , %s" % (trip[x][0], trip[x+1][0], str(routeDict[trip]))
if lineID not in stopPairSet:
stopPairSet.append(lineID)
for i in stopPairSet:
data = i.split(',')
ICursor.insertRow([stopGeomDict[int(data[0])], i, 1])
ICursor.insertRow([stopGeomDict[int(data[1])], i, 1])
## trip_id = row[idx_trip_id]
## stop_id = row[idx_stop_id]
## stop_sequence = int(row[idx_stop_sequence])
## arrival_time = hms.str2sec(row[idx_arrival_time])
## departure_time = hms.str2sec(row[idx_departure_time])
## datarow = [stop_id,stop_sequence,arrival_time,departure_time]
## stop_times_dict.setdefault(trip_id,[]).append(datarow)
##
## # for each trip, select stops in the trip, put them in order and get pairs
## # of directly-connected stops
## for trip in stop_times_dict.keys():
## selectedstops = stop_times_dict[trip]
## selectedstops.sort(key=operator.itemgetter(1))
## for x in range(0,len(selectedstops)-1):
## start_stop = selectedstops[x][0]
## end_stop = selectedstops[x+1][0]
##
## SourceOIDkey = "%s, %s" % (start_stop, end_stop)
##
## # SourceOIDkey = "%s, %s, %s" % (start_stop, end_stop, str(trip_routetype_dict[trip]))
## # this stop paris needs a line feature
## linefeature_dict[SourceOIDkey] = True
##
##
## # ----- Write pairs to a points feature class
## # (this is intermediate and will NOT go into the final ND) ----
##
## # create a points feature class for the point pairs
##outStopPairs = arcpy.management.CreateFeatureclass(outFD, "StopPairs", "POINT", "", "", "")
##arcpy.management.AddField(outStopPairs, "stop_id", "TEXT")
##arcpy.management.AddField(outStopPairs, "pair_id", "TEXT")
##arcpy.management.AddField(outStopPairs, "sequence", "SHORT")
##
## # add pairs of stops to the feature calss in preparation for generating line features
##
##badStops, badkeys = [],[] # ??? this line seems redundant
##
##with arcpy.da.InsertCursor(outStopPairs, ["SHAPE@", "stop_id", "pair_id", "sequence"]) as cursor2:
### linefeature_dict = {"start_stop , end_stop , route_type": True}
## for SourceOIDkey in linefeature_dict:
## stopPair = SourceOIDkey.split(" , ")
## # {stop_id: [stop_lat, stop_lon]}
## stop1 = stopPair[0]
## stop1_geom = stoplatlon_dict[stop1] ##### CHECK THIS BUG NEXT TIME!
## stop2 = stopPair[1]
## stop2_geom = stoplatlon_dict[stop2]
##
## cursor2.insertRow((stop1_geom, stop1, SourceOIDkey, 1))
## cursor2.insertRow((stop2_geom, stop2, SourceOIDkey, 2))
##
##
## # ----- Generate lines between all stops (for the final ND) -----
##
### defining workspace here
##arcpy.env.workspace = outFD
##
##
##outLines = arcpy.management.PointsToLine(outStopPairs, "Routes","pair_id", "sequence")
##arcpy.management.AddField(ourLines, "route_type", "SHORT")
##
##arcpy.management.AddField(outLines, "route_type_text", "TEXT") # ??? is this redundant?
##
### We don't need the points for anything anymore, so delete them.
##arcpy.Delete_management(outStopPairs)
##
### Clean up lines with 0 length. They will just produce build errors and
### are not valuable for the network dataset in any other way.
##expression = """"Shape_Length" = 0"""
##with arcpy.da.UpdateCursor(outLines, ["pair_id"], expression) as cursor3:
## for row in cursor3:
## del linefeature_dict[row[0]]
## cursor3.deleteRow()
##
### insert the route type to the output lines
##
##
##
##print "done!\n"
##
##"""
##try 1st (this prints out everything):
##StopTable = inGTFSdir + "\stops.txt"
##with open(StopTable,'rb') as f:
## reader = csv.reader(f)
## for row in reader:
## print row
##
##"""
##
##"""
##try 2nd:
##
##arcpy.management.AddField(outStopsFC, "stop_id", "TEXT")
##arcpy.management.AddField(outStopsFC, "stop_name", "TEXT")
##arcpy.management.AddField(outStopsFC, "stop_desc", "TEXT")
##arcpy.management.AddField(outStopsFC, "stop_lat", "FLOAT")
##arcpy.management.AddField(outStopsFC, "stop_lon", "FLOAT")
##arcpy.management.AddField(outStopsFC, "zone_id", "TEXT")
##arcpy.management.AddField(outStopsFC, "stop_url", "TEXT")
##arcpy.management.AddField(outStopsFC, "location_type", "TEXT")
##arcpy.management.AddField(outStopsFC, "parent_station", "TEXT")
##arcpy.management.AddField(outStopsFC, "stop_timezone", "TEXT")
##arcpy.management.AddField(outStopsFC, "wheelchair_boarding", "TEXT")
##
##
##with arcpy.da.InsertCursor(outStopsFC, ["SHAPE@", "stop_id",
## "stop_code", "stop_name", "stop_desc",
## "zone_id", "stop_url", "location_type",
## "parent_station", "wheelchair_boarding"]) as cur1:
## for stop in StopTable:
## stop_id = stop[0]
##
##
##
##
##
##"""
##
##"""
##try 3rd:
##rownum = 0
##for row in reader:
## if rownum == 0:
## header = row
## for field in header:
## #print type(field)
## rownum +=1
##
##print "\nheader row is:",header
##"""
##
##
##
##
| true |
c8e64ef5914cb3c0386abf5e9e81ba15bf83988a | Python | rbarbioni/python-flask-api | /tests/unit/dao/test_user.py | UTF-8 | 2,971 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | from alchemy_mock.mocking import AlchemyMagicMock
from unittest import TestCase
from unittest.mock import MagicMock, patch
from app.dao import user
class TestDaoUser(TestCase):
user_mock = {
'id': 1,
'name': 'User 1',
'email': 'user1@email.com'
}
def setUp(self):
self.session = AlchemyMagicMock()
@patch('app.dao.db.all', MagicMock(return_value=[]))
def test_find_all(self):
users = user.find_all(self.session)
self.assertEqual(len(users), 0)
@patch('app.dao.db.query_first', MagicMock(return_value=user_mock))
def test_find_by_id(self):
id = 1
user_find = user.find_by_id(self.session, id)
self.assertEqual(user_find, self.user_mock)
@patch('app.dao.db.query_first', MagicMock(return_value=user_mock))
def test_find_by_email(self):
email = 'user1@email.com'
user_find = user.find_by_email(self.session, email)
self.assertEqual(user_find, self.user_mock)
@patch('app.dao.db.query_first', MagicMock(side_effect=[None, user_mock]))
@patch('app.dao.db.insert', MagicMock(return_value=user_mock))
def test_create(self):
user_created = user.create(self.session, self.user_mock)
self.assertEqual(user_created, self.user_mock)
@patch('app.dao.db.query_first', MagicMock(side_effect=user_mock))
def test_create_already_exists_error(self):
try:
user.create(self.session, self.user_mock)
except Exception as ex:
self.assertEqual(ex.args[0], 400)
self.assertEqual(ex.args[1]['msg'], 'user email:user1@email.com already exists')
@patch('app.dao.db.update', MagicMock(return_value=user_mock))
def test_update(self):
user_create_update = self.user_mock.copy()
user_create_update['price'] = 88.88
with patch('app.dao.db.query_first',
MagicMock(side_effect=[self.user_mock, user_create_update])):
user_create_update = user.update(self.session, 1, user_create_update)
self.assertEqual(user_create_update['price'], 88.88)
@patch('app.dao.db.query_first', MagicMock(return_value=None))
def test_update_not_found(self):
try:
user.update(self.session, 1, self.user_mock)
except Exception as ex:
self.assertEqual(ex.args[0], 404)
self.assertEqual(ex.args[1]['msg'], 'user id:1 not found')
@patch('app.dao.db.query_first', MagicMock(return_value=user_mock))
@patch('app.dao.db.delete', MagicMock(return_value=True))
def test_delete(self):
user_deleted = user.delete(self.session, 1)
self.assertTrue(user_deleted)
@patch('app.dao.db.query_first', MagicMock(return_value=None))
def test_delete_not_found(self):
try:
user.delete(self.session, 1)
except Exception as ex:
self.assertEqual(ex.args[0], 404)
self.assertEqual(ex.args[1]['msg'], 'user id:1 not found')
| true |
359f51f47e6720d93f5113d676824d362f655da7 | Python | hyteer/work | /Python/Test/Process/Test/first.py | UTF-8 | 150 | 2.59375 | 3 | [] | no_license | from multiprocessing import Pool
list = range(1,20,3)
def f(x):
return x*x
if __name__ == '__main__':
p = Pool(5)
print(p.map(f, list)) | true |
3ff7a7f40e5b600fb50bc23908a2912203962fb3 | Python | manikos/EDX | /6.00x Files/W2_L4_P5_function_without_if.py | UTF-8 | 377 | 2.84375 | 3 | [] | no_license | x=1
lo=2
hi=3
z=min(max(x,lo),max(lo,hi))
print 'z=', z
##x=5
##lo=2
##hi=3
##
##z=min(max(x,lo),max(lo,hi))
##print 'z=', z
##
##x=2
##lo=2
##hi=3
##
##z=min(max(x,lo),max(lo,hi))
##print 'z=', z
##
##x=3
##lo=2
##hi=3
##
##z=min(max(x,lo),max(lo,hi))
##print 'z=', z
##
##x=4
##lo=3
##hi=5
##
##z=min(max(x,lo),max(lo,hi))
##print 'z=', z
| true |
e1c33e0c84899a295cf8809ba5176412cad74f5d | Python | imagilex/tereapps | /app_reports/templatetags/app_reports_tags.py | UTF-8 | 2,242 | 2.53125 | 3 | [] | no_license | from django import template
from app_reports.models import Esfera
register = template.Library()
@register.inclusion_tag('app_reports/esfera/card.html')
def esfera_card(user, context, include_title="no"):
"""
Inclusion tag: {% esfera_card user %}
"""
esferas = []
for esfera in Esfera.objects.all():
if esfera.accesible_by(user):
esferas.append(esfera)
return {
'esferas': esferas,
'context': context,
'include_title': include_title
}
@register.filter
def esfera_accesible_by(esfera, user):
"""
Simple Tag: {% if esfera|esfera_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar a la esfera
Parameters
----------
esfera : objeto Esfera
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar a la esfera, False en otro caso
"""
return esfera.accesible_by(user)
@register.filter
def dimension_accesible_by(dimension, user):
"""
Simple Tag: {% if dimension|dimension_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar a la
dimension del reporte
Parameters
----------
dimension : objeto DimensionReporte
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar a la dimension de reporte,
False en otro caso
"""
return dimension.accesible_by(user)
@register.filter
def reporte_accesible_by(reporte, user):
"""
Simple Tag: {% if reporte|reporte_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar al reporte
Parameters
----------
reporte : objeto Reporte
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar al reporte, False en otro caso
"""
return reporte.accesible_by(user)
@register.inclusion_tag('app_reports/esfera/menu_opc.html')
def dimension_as_menu(esfera, dimension, user, nivel=0):
"""
Inclusion tag: {% dimension_as_menu esfera dimension user nivel %}
"""
nivel = int(nivel) + 1
return {
'esfera': esfera,
'dimension': dimension,
'user': user,
'nivel': nivel}
| true |
c192c233ba68e230ac7fa33329bb07488c62aa59 | Python | gistable/gistable | /dockerized-gists/3787790/snippet.py | UTF-8 | 4,877 | 2.609375 | 3 | [
"MIT"
] | permissive | #Retrive old website from Google Cache. Optimized with sleep time, and avoid 504 error (Google block Ip send many request).
#Programmer: Kien Nguyen - QTPros http://qtpros.info/kiennguyen
#change search_site and search_term to match your requirement
#Original: http://www.guyrutenberg.com/2008/10/02/retrieving-googles-cache-for-a-whole-website/
#!/usr/bin/python
import urllib, urllib2
import re
import socket
import os, errno, os.path
import time
import random, math
#import MySQLdb
import imp;
socket.setdefaulttimeout(30)
#adjust the site here
search_site="qtpros.info"
search_term="site:" + search_site
#mysql = imp.load_source("MySQLConnector", "mysql.py").MySQLConnector()
#mysql.connect('localhost','root','','webscrape',True)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else: raise
def main():
headers = {'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.4) Gecko/20070515 Firefox/2.0.0.4'}
url = "http://www.google.com/search?q="+search_term
regex_cache = re.compile(r'<a href="([^"]*)"[^>]*>Cached</a>')
regex_next = re.compile('<a href="([^"]*)"[^>]*><span[^>]*>[^<]*</span><span[^>]*>Next</span></a>')
regex_url = re.compile(r'search\?q=cache:[\d\w-]+:([^%]*)')
# regex_title = re.compile('<title>([\w\W]+)</title>')
# regex_time = re.compile('page as it appeared on ([\d\w\s:]+)')
regex_pagenum = re.compile('<a href="([^"]*)"[^>]*><span[^>]*>[^<]*</span>([\d]+)')
#this is the directory we will save files to
mkdir_p(search_site)
path = os.path.dirname(os.path.abspath(__file__)) + '\\' + search_site
# path = os.path.dirname(os.path.abspath(__file__))
counter = 0
pagenum = int(math.floor(len([name for name in os.listdir(path)]) / 10) + 1)
max_goto = 0;
more = True
if (pagenum > 1):
while (max_goto < pagenum):
req = urllib2.Request(url, None, headers)
page = urllib2.urlopen(req).read()
goto = regex_pagenum.findall(page)
# print goto
for goto_url, goto_pagenum in goto:
goto_pagenum = int(goto_pagenum)
if (goto_pagenum == pagenum):
url = "http://www.google.com" + goto_url.replace('&', '&')
max_goto = pagenum
break
elif (goto_pagenum < pagenum and max_goto < goto_pagenum):
max_goto = goto_pagenum
url = "http://www.google.com" + goto_url.replace('&', '&')
random_interval = random.randrange(5, 20, 1)
print "sleeping for: " + str(random_interval) + " seconds"
print "going to page: " + str(max_goto)
print url
time.sleep(random_interval)
while(more):
#Send search request to google with pre-defined headers
req = urllib2.Request(url, None, headers)
#open the response page
page = urllib2.urlopen(req).read()
#find all cache in the page
matches = regex_cache.findall(page)
#loop through the matches
for match in matches:
counter+=1
#find the url of the page cached by google
the_url = regex_url.findall(match)
the_url = the_url[0]
the_url = the_url.replace('http://', '')
the_url = the_url.strip('/')
the_url = the_url.replace('/', '-')
#if href doesn't start with http insert http before
if not match.startswith("http"):
match = "http:" + match
if (not the_url.endswith('html')):
the_url = the_url + ".html"
#if filename "$url"[.html] does not exists
if not os.path.exists(search_site + "/" + the_url):
tmp_req = urllib2.Request(match.replace('&', '&'), None, headers)
try:
tmp_page = urllib2.urlopen(tmp_req).read()
f = open(search_site + "/" + the_url, 'w')
f.write(tmp_page)
f.close()
print counter, ": " + the_url
#comment out the code below if you expect to crawl less than 50 pages
random_interval = random.randrange(15, 20, 1)
print "sleeping for: " + str(random_interval) + " seconds"
time.sleep(random_interval)
except urllib2.HTTPError, e:
print 'Error code: ', e.code
pass
#now check if there is more pages
match = regex_next.search(page)
if match == None:
more = False
else:
url = "http://www.google.com"+match.group(1).replace('&', '&')
if __name__=="__main__":
main() | true |
e1899d183cee46bd15c7f65b64140d01321246e6 | Python | Ellissquires/k-color-image | /kconvert.py | UTF-8 | 784 | 2.703125 | 3 | [] | no_license | import os,sys
from PIL import Image
from sklearn.cluster import KMeans
import numpy as np
filename = sys.argv[1:][0]
img = Image.open(filename).convert('RGB')
width, height = img.size
pixel_colors = []
for x in range(width):
for y in range(height):
r,g,b = img.getpixel((x,y))
color = [r,g,b]
pixel_colors.append(color)
data = np.array(pixel_colors)
kmeans = KMeans(n_clusters=10)
kmeans.fit(data)
centroids = np.rint(kmeans.cluster_centers_)
labels = kmeans.labels_
print(centroids)
for x in range(width):
for y in range(height):
r,g,b = img.getpixel((x,y))
color = [r,g,b]
color = tuple(centroids[labels[x * height + y]].astype(int))
img.putpixel((x,y), color)
img.show()
img.save(filename + "_edited", "JPEG")
| true |
014d8f94cc9bbcba3ffc551892c50426102b21a5 | Python | jfidelia/python_master | /exercises/12-Last_two_digits/app.py | UTF-8 | 216 | 4.15625 | 4 | [] | no_license | #Complete the function to print the last two digits of an interger greater than 9.
def last_two_digits(num):
print(num % 100)
#Invoke the function with any interger greater than 9.
last_two_digits(32344224789) | true |
bc714b0a9309061cc13e3fb35be4af31367da720 | Python | TalRodin/leetcode_design | /ZigZagIterator.py | UTF-8 | 768 | 3.484375 | 3 | [] | no_license | class ZigZagIterator():
def __init__(self, v1, v2):
if len(v1)==0:
v1,v2=v2,v1
self.currVec=v1
self.nextVec=v2
self.currIdx=0
self.nextIdx=0
def next(self):
ret = self.currVec[self.currIdx]
self.currIdx+=1
if self.nextIdx<len(self.nextVec):
self.currVec, self.nextVec = self.nextVec, self.currVec
self.currIdx, self.nextIdx = self.nextIdx, self.currIdx
return ret
def hasNext(self):
return self.currIdx<len(self.currVec)
v1 = [1, 2]
v2 = [3, 4, 5, 6]
obj=ZigZagIterator(v1, v2)
print(obj.currVec)
print(obj.nextVec)
print(obj.currIdx)
print(obj.nextIdx)
i, v = ZigZagIterator(v1, v2), []
while i.hasNext():
v.append(i.next())
print(v) | true |
97d1ab750b6d0a97195ef19b9c3298d293ce211a | Python | best-doctor/flake8-adjustable-complexity | /tests/test_config.py | UTF-8 | 2,278 | 2.515625 | 3 | [
"MIT"
] | permissive | import pytest
from flake8.exceptions import ExecutionError
from flake8_adjustable_complexity.config import DEFAULT_CONFIG
@pytest.mark.parametrize(
('args', 'max_mccabe_complexity'),
[
(['--max-mccabe-complexity=5'], 5),
(['--max-adjustable-complexity=10'], 10),
([], DEFAULT_CONFIG.max_mccabe_complexity),
],
)
def test_parse_max_mccabe_complexity(parse_options, args, max_mccabe_complexity):
config = parse_options(args)
assert config.max_mccabe_complexity == max_mccabe_complexity
@pytest.mark.parametrize(
('args', 'max_complexity_per_path'),
[
(
[
'--per-path-max-adjustable-complexity',
'foo.py:10,bar.py:20',
],
{
'foo.py': 10,
'bar.py': 20,
},
),
([], DEFAULT_CONFIG.max_complexity_per_path),
],
)
def test_parse_max_complexity_per_path(parse_options, args, max_complexity_per_path):
config = parse_options(args)
assert config.max_complexity_per_path == max_complexity_per_path
def test_parse_max_complexity_per_path_error(parse_options):
args = [
'--per-path-max-adjustable-complexity',
'foo.py:invalid-complexity',
]
with pytest.raises(ExecutionError) as excinfo:
parse_options(args)
assert "Couldn\'t parse --per-path-adjustable-max-complexity" in str(excinfo.value)
@pytest.mark.parametrize(
('args', 'var_names_blacklist'),
[
(
['--var-names-extra-blacklist=my_obj,my_var'],
DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'},
),
(
['--var-names-whitelist=var,result'],
DEFAULT_CONFIG.var_names_blacklist - {'var', 'result'},
),
(
[
'--var-names-extra-blacklist=my_obj,my_var',
'--var-names-whitelist=var,result',
],
(DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'}) - {'var', 'result'},
),
([], DEFAULT_CONFIG.var_names_blacklist),
],
)
def test_parse_var_names_blacklist(parse_options, args, var_names_blacklist):
config = parse_options(args)
assert config.var_names_blacklist == var_names_blacklist
| true |
d429b7dee4658726a5b4c21a17339e8366166ca0 | Python | jdeepee/biglegal-model-loading-prototyping | /test_model.py | UTF-8 | 1,503 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
from model import Model
import numpy as np
import pickle as pkl
import tensorflow as tf
import sys
def to_list(prediction, length):
list_location = [[], [], [], [], [], [], [], [], []]
current_line = 0
prediction = np.argmax(prediction, 2)
print prediction.shape
print length.shape
for i in range(len(prediction)):
for j in range(length[i]):
current_line += 1
if prediction[i, j] == 0:
list_location[0].append(current_line)
elif prediction[i, j] == 1:
list_location[1].append(current_line)
elif prediction[i, j] == 2:
list_location[2].append(current_line)
elif prediction[i, j] == 3:
list_location[3].append(current_line)
elif prediction[i, j] == 4:
list_location[4].append(current_line)
elif prediction[i, j] == 5:
list_location[5].append(current_line)
elif prediction[i, j] == 6:
list_location[6].append(current_line)
elif prediction[i, j] == 7:
list_location[7].append(current_line)
elif prediction[i, j] == 8:
list_location[8].append(current_line)
current_line += 1
return list_location
model = Model()
file = sys.argv[0]
inp = pkl.load(file)
pred, length = sess.run([model.prediction, model.length], {model.input_data: inp})
print to_list(pred, length) | true |
bd078d74505a660456db749deff1eddbf29992f5 | Python | NatGr/annotate_audio | /split.py | UTF-8 | 7,096 | 2.6875 | 3 | [] | no_license | import argparse
import os
import pandas as pd
from tqdm import tqdm
from pydub import AudioSegment
import subprocess
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser("""Extracts audio, and splits it into smaller files""")
parser.add_argument("--input", help="big audio file", required=True)
parser.add_argument("--audio_folder",
help="folder that will contain smaller the audio files", required=True)
parser.add_argument("--out_csv", help="name of the output csv file, that will contain the name of each smaller file and that is destined to be filled with their transcript", required=True)
parser.add_argument("--wav_args", help="list of arguments of the wav created files as string",
default="-acodec pcm_s16le -ac 1 -ar 16000")
parser.add_argument("--max_duration", help="maximum duration (in seconds) a clip can last", default=7, type=int)
parser.add_argument("--min_duration", help="maximum duration (in seconds) a clip can last", default=2, type=int)
parser.add_argument("--remove_bad_segments", action="store_true", help="set this argument if you want to automatically remove the sentences that do not seem to be spoken by the speaker of interest (which need to be specified using the 'speaker_segment' argument")
parser.add_argument("--speaker_segment", nargs=2, type=float,
help="start and end time of a sample spoken by a speaker (seconds)")
args = parser.parse_args()
if not os.path.exists(args.audio_folder):
os.makedirs(args.audio_folder)
params_list = [item for param in args.wav_args.split("-")[1:] for item in f"-{param}".split(" ")[:2]]
file_extension = os.path.splitext(args.input)[1][1:]
full_audio = AudioSegment.from_file(args.input, file_extension)
# find out long, medium and small silences
# (type, noise_tol, noise_dur) for long, medium and small silences
silence_params = [(2, -50, .5), (1, -35, .3), (0, -25, .15)]
silences = []
for sil_type, noise_tol, noise_dur in silence_params:
process = subprocess.run(['ffmpeg', '-i', args.input, '-af', f'silencedetect=noise={noise_tol}dB:d={noise_dur}', '-f', 'null', '-'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
curr_silences = re.findall("\[silencedetect .{17} silence_start: (\d+(?:.\d*))\\n\[silencedetect .{17} silence_end: (\d+(?:.\d*))", process.stderr)
silences.extend([(float(s[0]), float(s[1]), sil_type) for s in curr_silences])
silences.sort(key=lambda x: x[0])
# loads necessary data if we want to remove bad speakers
if args.remove_bad_segments:
from resemblyzer import normalize_volume, VoiceEncoder
from resemblyzer.hparams import sampling_rate, audio_norm_target_dBFS
from pydub.playback import play
import matplotlib.pyplot as plt
import librosa
import numpy as np
wav, source_sr = librosa.load(args.input, sr=None)
wav = librosa.resample(wav, source_sr, sampling_rate)
wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)
speaker_wav = wav[int(args.speaker_segment[0] * sampling_rate):int(args.speaker_segment[1] * sampling_rate)]
print("Playing the selected audio segment at given offsets to check it is alright")
audio = AudioSegment.from_wav(args.input)
play(audio[int(args.speaker_segment[0]*1000):int(args.speaker_segment[1]*1000)])
if input("Is this correct? (y/n)\n") != "y":
exit(0)
encoder = VoiceEncoder("cpu")
speaker_embed = encoder.embed_utterance(speaker_wav)
similarities = []
# we will loop through the silences and try to find silences smaller than args.max_duration seconds and bigger than one second greedily by trying to cut on the biggest silences. Thus we will skip the first and last audio sample but we don't care
sent_index, i, lost_seconds = 0, 0, 0
to_save = [] # (audio, file_name)
prog_bar = tqdm(total=len(silences))
while i < len(silences):
start_period = silences[i][0] + args.min_duration
end_period = silences[i][0] + args.max_duration
j, last_med_silence, last_short_silence, last_long_silence = 1, None, None, None
while i + j < len(silences) and silences[i+j][0] < start_period:
j += 1
while i + j < len(silences) and silences[i+j][0] < end_period:
if silences[i+j][2] == 0:
last_short_silence = j
elif silences[i+j][2] == 1:
last_med_silence = j
else:
last_long_silence = j
break
j += 1
if last_long_silence is None:
if last_med_silence is not None:
j = last_med_silence
elif last_short_silence is not None:
j = last_short_silence
else:
if i+1 < len(silences):
lost_seconds += (silences[i+1][0]+silences[i+1][1])/2 - (silences[i][0]+silences[i][1])/2
i += 1
prog_bar.update(i)
continue
sent_start = (silences[i][0] + silences[i][1]) / 2 # 50% of silence duration as a margin for safety, sec to ms
sent_end = (silences[i+j][0] + silences[i+j][1]) / 2
if args.remove_bad_segments:
sent_wav = wav[int(sent_start * sampling_rate):int(sent_end * sampling_rate)]
sent_embed = encoder.embed_utterance(sent_wav, rate=16)
similarities.append(sent_embed @ speaker_embed)
to_save.append((full_audio[sent_start*1000:sent_end*1000], f"sentence_{sent_index}.wav"))
i += j
sent_index += 1
prog_bar.update(j)
prog_bar.close()
print(f"{lost_seconds : .2f} seconds of audio were cutted")
# selects the similarity threshold at which we will remove audio
if args.remove_bad_segments:
print("Find a separation threshold on the histogram between speeches spoken by your speaker (closer to 1) and others (closer to 0). Then close the figure")
plt.hist(similarities, bins=50)
plt.title("histogram of the similarities (the higher the better)")
plt.show()
thr = -1
while thr < 0 or thr > 1:
str_thr = input("Please enter a valid threshold\n")
try:
thr = float(str_thr)
except ValueError as e:
print("Value provided was not a float!")
# saves the files
csv_file = {"file": []}
for i, (audio, file_name) in enumerate(to_save):
if args.remove_bad_segments and similarities[i] < thr:
continue
csv_file["file"].append(file_name)
audio.export(os.path.join(args.audio_folder, file_name), format="wav", parameters=params_list)
csv_file["sentence"] = [""] * len(csv_file["file"]) # adding an empty column
pd.DataFrame(csv_file).to_csv(args.out_csv, sep=";", index=False)
| true |
cc7635164037a9e33ea465c3e93841aa7033d488 | Python | LukaszMajkut/codewars-katas | /[5kyu]Prime number decompositions.py | UTF-8 | 1,735 | 3.515625 | 4 | [] | no_license | '''
https://www.codewars.com/kata/53c93982689f84e321000d62
'''
def getAllPrimeFactors(n):
if isinstance(n, int) is False or n <= 0:
return []
elif n == 1:
return [1]
else:
result = []
divider = 2
while n != 1:
if n % divider == 0:
result.append(divider)
n = n/divider
else:
divider += 1
return result
def getUniquePrimeFactorsWithCount(n):
if isinstance(n, int) is False or n <= 0:
return [[],[]]
elif n == 1:
return [[1],[1]]
else:
result = []
result_count = {}
divider = 2
while n != 1:
if n % divider == 0:
result.append(divider)
n = n/divider
else:
divider += 1
for i in result:
if i not in result_count:
result_count[i] = 1
else:
result_count[i] += 1
keys = []
values = []
for key, value in result_count.items():
keys.append(key)
values.append(value)
return [keys,values]
def getUniquePrimeFactorsWithProducts(n):
if isinstance(n, int) is False or n <= 0:
return []
elif n == 1:
return [1]
else:
result = []
result_count = {}
final_result = []
divider = 2
while n != 1:
if n % divider == 0:
result.append(divider)
n = n/divider
else:
divider += 1
for i in result:
if i not in result_count:
result_count[i] = 1
else:
result_count[i] += 1
for key, value in result_count.items():
final_result.append(key**value)
return final_result
| true |
c601d04fa1260a1864dae1e6535d661a6614954a | Python | googoles/pytorch_Practice | /Tutorials/neural_network.py | UTF-8 | 1,843 | 3.328125 | 3 | [] | no_license | # 숫자이미지 분류
'''
신경망의 일반적인 학습 과정은 다음과 같습니다:
학습 가능한 매개변수(또는 가중치(weight))를 갖는 신경망을 정의합니다.
데이터셋(dataset) 입력을 반복합니다.
입력을 신경망에서 전파(process)합니다.
손실(loss; 출력이 정답으로부터 얼마나 떨어져있는지)을 계산합니다.
변화도(gradient)를 신경망의 매개변수들에 역으로 전파합니다.
신경망의 가중치를 갱신합니다. 일반적으로 다음과 같은 간단한 규칙을 사용합니다: 새로운 가중치(weight) = 가중치(weight) - 학습률(learning rate) * 변화도(gradient)
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 입력 이미지 채널 1개, 출력 채널 6개, 3x3의 정사각 컨볼루션 행렬
# 컨볼루션 커널 정의
self.conv1 = nn.Conv2d(1,6,3)
self.conv2 = nn.Conv2d(6,16,3)
# affine calculation : y = Wx + b
self.fc1 = nn.Linear(16*6*6, 120) # 6*6 is image dimension
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)
def forward(self,x):
# (2,2)
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
# 크기가 제곱수라면 하나의 숫자만을 특정
x = F.max_pool2d(F.relu(self.conv2(x)),2)
x = x.view(-1,self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self,x):
size = x.size()[1:] # 배치 차원을 제외한 모든 차원
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
| true |
34cb4fb2f8726095ca508893b5e896e59df5f479 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_199/1622.py | UTF-8 | 954 | 3.328125 | 3 | [] | no_license | # input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
import sys
def runtest(case, k):
s = [x for x in case]
count = 0
for i in range(0,len(s)-(k-1)):
if s[i]=='-':
count+=1
for m in range(k):
s[i+m]= '+' if s[i+m] == '-' else '-'
for x in s[i+1:]:
if x=='-':
return -1
return count
f_dir = sys.argv[1]
fo_dir = sys.argv[2]
f = open(f_dir,'r')
fo = open(fo_dir,'w')
t = int(f.readline()) # read a line with a single integer
for i in range(1, t + 1):
# n, m = [int(s) for s in f.readline().split(" ")] # read a list of integers, 2 in this case
# print("Case #{}: {} {}".format(i, n + m, n * m))
# check out .format's specification for more formatting options
t = f.readline()[:-1].split(' ')
ans = runtest(t[0],int(t[1]))
fo.write("Case #"+str(i)+': '+(str(ans) if ans!=-1 else 'IMPOSSIBLE')+'\n')
f.close()
fo.close() | true |
d74050823f7d88b9d49143cf4e91a592d5b3104f | Python | edu-athensoft/ceit4101python | /stem1400_modules/module_10_gui/s07_event/s072_keyboard/kbd_focus_2.py | UTF-8 | 1,119 | 3.859375 | 4 | [] | no_license | """
Event handling
Keyboard event
<FocusIn>
<FocusOut>
"""
from tkinter import *
def handle_focusin(widget):
# print(f"FocusIn event came from {widget} - {eval(widget).focus_get()}")
print(f"FocusIn event came from {widget}")
print(type(widget.focus_get()))
def handle_focusout(widget):
# print(f"FocusOut event came from {x} - {eval(x).focus_get()}")
print(f"FocusOut event came from {widget}")
print(type(widget.focus_get()))
root = Tk()
root.title("Python GUI - Event | Keyboard")
root.geometry("360x180+300+300")
root.config(bg="#ddddff")
# labels and entries
e1 = Entry(root)
e2 = Entry(root)
label1 = Label(root, text='Entry1')
label2 = Label(root, text='Entry2')
label1.grid(row=0,column=0, padx=(50,5), pady=(30,0))
label2.grid(row=1,column=0, padx=(50,5))
e1.grid(row=0,column=1, pady=(30,0))
e2.grid(row=1,column=1)
# keyboard event
e1.bind("<FocusIn>", lambda x: handle_focusin(label1))
e1.bind("<FocusOut>", lambda x: handle_focusout(label1))
e2.bind("<FocusIn>", lambda x: handle_focusin(label2))
e2.bind("<FocusOut>", lambda x: handle_focusout(label2))
root.mainloop()
| true |
e8df8c330ec533f1c1f6f25d1641ae9522a96361 | Python | Sujitha03/python-programming | /palindrome.py | UTF-8 | 78 | 3.28125 | 3 | [] | no_license | n=int(input())
b=str(n)[::-1]
if(b==str(n)):
print("yes")
else:
print("no")
| true |
93858290ad1f8cca989c3cf99f83d1d3b8441a36 | Python | imxyu/PyTorch-Fashion-MNIST | /Fashion-MNIST_conv.py | UTF-8 | 4,726 | 2.71875 | 3 | [] | no_license | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import matplotlib.pyplot as plt
NUM_TRAINING_SAMPLES = 50000
EPOCHS = 100
learning_rate = 0.001
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
dataset = torchvision.datasets.FashionMNIST(root='.')
# 生成随机索引
order = np.argsort(np.random.random(dataset.train_labels.shape))
# 打乱数据集中样本顺序
data = dataset.train_data[order].float()
data = data.float()
data = torch.unsqueeze(data, 1)
target = dataset.train_labels[order]
data = data.to(device)
target = target.to(device)
D_IN = 1
D_OUT = 10
#指定训练集和测试集
data_tr = data[0:NUM_TRAINING_SAMPLES]
data_ts = data[NUM_TRAINING_SAMPLES:]
target_tr = target[0:NUM_TRAINING_SAMPLES]
target_ts = target[NUM_TRAINING_SAMPLES:]
class FMNIST_data(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return len(self.y)
def getfeatures(self, index):
return self.x[index]
def getprice(self, index):
return self.y[index]
FMNIST_tr = FMNIST_data(data_tr, target_tr)
FMNIST_ts = FMNIST_data(data_ts, target_ts)
loader_tr = torch.utils.data.DataLoader(dataset=FMNIST_tr, batch_size=4096, shuffle=True)
loader_ts = torch.utils.data.DataLoader(dataset=FMNIST_ts, batch_size=4096, shuffle=True)
# an example of building a CNN model on PyTorch
# https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
class FMNIST_conv(nn.Module):
def __init__(self, D_IN, D_OUT):
super(FMNIST_conv, self).__init__()
self.conv1 = nn.Conv2d(D_IN, 32, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
self.dense1 = nn.Linear(22*22*32, 64)
self.dense2 = nn.Linear(64, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.relu(self.conv2(out))
out = F.relu(self.conv3(out))
out = out.view(-1, 22*22*32)
out = F.relu(self.dense1(out))
out = self.dense2(out)
return out
class FMNIST_conv_(nn.Module):
def __init__(self, D_IN, D_OUT):
super(FMNIST_conv_, self).__init__()
self.conv1 = nn.Conv2d(D_IN, 32, kernel_size=3, stride=1, padding=1)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
self.pool2 = nn.MaxPool2d(2, 2)
self.dense1 = nn.Linear(7*7*32, 64)
self.dense2 = nn.Linear(64, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = self.pool1(out)
out = F.relu(self.conv2(out))
out = self.pool2(out)
out = out.view(-1, 7*7*32)
out = F.relu(self.dense1(out))
out = self.dense2(out)
return out
dataset = {'train': FMNIST_tr, 'val': FMNIST_ts}
dataloader = {'train': loader_tr, 'val': loader_ts}
phase = ['train', 'val']
LOSS = {'train': torch.empty(EPOCHS), 'val': torch.empty(EPOCHS)}
ACCURACY = {'train': torch.empty(EPOCHS), 'val': torch.empty(EPOCHS)}
model = FMNIST_conv_(D_IN, D_OUT)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.2)
for iter in range(EPOCHS):
print('iter:', iter)
scheduler.step()
for ph in phase:
loss_total = 0
correct_total = 0
if ph == 'train':
model.train()
elif ph == 'test':
model.eval()
for inputs, targets in dataloader[ph]:
targets_pred = model(inputs)
loss = criterion(targets_pred, targets)
loss_total += loss.item() * len(inputs)
_, targets_pred = torch.max(targets_pred, 1)
correct_total += torch.sum(targets_pred == targets)
if ph == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
LOSS[ph][iter] = loss_total / dataset[ph].__len__()
ACCURACY[ph][iter] = correct_total.float() / dataset[ph].__len__()
print('{} loss: {:.6f}'.format(ph, LOSS[ph][iter]))
print('{} acc: {:.6f} %'.format(ph, ACCURACY[ph][iter] * 100))
plt.figure()
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.plot(np.linspace(1,EPOCHS,EPOCHS), ACCURACY['train'].detach().numpy())
plt.plot(np.linspace(1,EPOCHS,EPOCHS), ACCURACY['val'].detach().numpy())
plt.show() | true |
bbeb4abf9e9256fbcaac333390d65ea2ebd4fad7 | Python | JJ-learning/Introduccion-a-los-Modelos-Computacionales | /Practicas/P3/rbf2.py | UTF-8 | 10,640 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 12:37:04 2016
@author: pagutierrez
"""
# TODO Incluir todos los import necesarios
import click
import numpy as np
import pandas as pd
import sklearn
@click.command()
@click.option('--train_file', '-t', default=None, required=True,
help=u'Fichero con los datos de entrenamiento.')
# TODO incluir el resto de parámetros...
def entrenar_rbf_total(train_file, test_file, classification, ratio_rbf, l2, eta, outputs):
""" Modelo de aprendizaje supervisado mediante red neuronal de tipo RBF.
Ejecución de 5 semillas.
"""
train_mses = np.empty(5)
train_ccrs = np.empty(5)
test_mses = np.empty(5)
test_ccrs = np.empty(5)
for s in range(100,600,100):
print("-----------")
print("Semilla: %d" % s)
print("-----------")
np.random.seed(s)
train_mses[s//100-1], test_mses[s//100-1], train_ccrs[s//100-1], test_ccrs[s//100-1] = \
entrenar_rbf(train_file, test_file, classification, ratio_rbf, l2, eta, outputs)
print("MSE de entrenamiento: %f" % train_mses[s//100-1])
print("MSE de test: %f" % test_mses[s//100-1])
if classification:
print("CCR de entrenamiento: %.2f%%" % train_ccrs[s//100-1])
print("CCR de test: %.2f%%" % test_ccrs[s//100-1])
print("*********************")
print("Resumen de resultados")
print("*********************")
print("MSE de entrenamiento: %f +- %f" % (np.mean(train_mses), np.std(train_mses)))
print("MSE de test: %f +- %f" % (np.mean(test_mses), np.std(test_mses)))
if classification:
print("CCR de entrenamiento: %.2f%% +- %.2f%%" % (np.mean(train_ccrs), np.std(train_ccrs)))
print("CCR de test: %.2f%% +- %.2f%%" % (np.mean(test_ccrs), np.std(test_ccrs)))
def entrenar_rbf(train_file, test_file, classification, ratio_rbf, l2, eta, outputs):
""" Modelo de aprendizaje supervisado mediante red neuronal de tipo RBF.
Una única ejecución.
Recibe los siguientes parámetros:
- train_file: nombre del fichero de entrenamiento.
- test_file: nombre del fichero de test.
- classification: True si el problema es de clasificacion.
- ratio_rbf: Ratio (en tanto por uno) de neuronas RBF con
respecto al total de patrones.
- l2: True si queremos utilizar L2 para la Regresión Logística.
False si queremos usar L1 (para regresión logística).
- eta: valor del parámetro de regularización para la Regresión
Logística.
- outputs: número de variables que se tomarán como salidas
(todas al final de la matriz).
Devuelve:
- train_mse: Error de tipo Mean Squared Error en entrenamiento.
En el caso de clasificación, calcularemos el MSE de las
probabilidades predichas frente a las objetivo.
- test_mse: Error de tipo Mean Squared Error en test.
En el caso de clasificación, calcularemos el MSE de las
probabilidades predichas frente a las objetivo.
- train_ccr: Error de clasificación en entrenamiento.
En el caso de regresión, devolvemos un cero.
- test_ccr: Error de clasificación en test.
En el caso de regresión, devolvemos un cero.
"""
train_inputs, train_outputs, test_inputs, test_outputs = lectura_datos(train_file,
test_file,
outputs)
#TODO: Obtener num_rbf a partir de ratio_rbf
print("Número de RBFs utilizadas: %d" %(num_rbf))
kmedias, distancias, centros = clustering(classification, train_inputs,
train_outputs, num_rbf)
radios = calcular_radios(centros, num_rbf)
matriz_r = calcular_matriz_r(distancias, radios)
if not classification:
coeficientes = invertir_matriz_regresion(matriz_r, train_outputs)
else:
logreg = logreg_clasificacion(matriz_r, train_outputs, eta, l2)
"""
TODO: Calcular las distancias de los centroides a los patrones de test
y la matriz R de test
"""
if not clasificacion:
"""
TODO: Obtener las predicciones de entrenamiento y de test y calcular
el MSE
"""
else:
"""
TODO: Obtener las predicciones de entrenamiento y de test y calcular
el CCR. Calcular también el MSE, comparando las probabilidades
obtenidas y las probabilidades objetivo
"""
return train_mse, test_mse, train_ccr, test_ccr
def lectura_datos(fichero_train, fichero_test, outputs):
""" Realiza la lectura de datos.
Recibe los siguientes parámetros:
- fichero_train: nombre del fichero de entrenamiento.
- fichero_test: nombre del fichero de test.
- outputs: número de variables que se tomarán como salidas
(todas al final de la matriz).
Devuelve:
- train_inputs: matriz con las variables de entrada de
entrenamiento.
- train_outputs: matriz con las variables de salida de
entrenamiento.
- test_inputs: matriz con las variables de entrada de
test.
- test_outputs: matriz con las variables de salida de
test.
"""
#TODO: Completar el código de la función
return train_inputs, train_outputs, test_inputs, test_outputs
def inicializar_centroides_clas(train_inputs, train_outputs, num_rbf):
""" Inicializa los centroides para el caso de clasificación.
Debe elegir los patrones de forma estratificada, manteniendo
la proporción de patrones por clase.
Recibe los siguientes parámetros:
- train_inputs: matriz con las variables de entrada de
entrenamiento.
- train_outputs: matriz con las variables de salida de
entrenamiento.
- num_rbf: número de neuronas de tipo RBF.
Devuelve:
- centroides: matriz con todos los centroides iniciales
(num_rbf x num_entradas).
"""
#TODO: Completar el código de la función
return centroides
def clustering(clasificacion, train_inputs, train_outputs, num_rbf):
""" Realiza el proceso de clustering. En el caso de la clasificación, se
deben escoger los centroides usando inicializar_centroides_clas()
En el caso de la regresión, se escogen aleatoriamente.
Recibe los siguientes parámetros:
- clasificacion: True si el problema es de clasificacion.
- train_inputs: matriz con las variables de entrada de
entrenamiento.
- train_outputs: matriz con las variables de salida de
entrenamiento.
- num_rbf: número de neuronas de tipo RBF.
Devuelve:
- kmedias: objeto de tipo sklearn.cluster.KMeans ya entrenado.
- distancias: matriz (num_patrones x num_rbf) con la distancia
desde cada patrón hasta cada rbf.
- centros: matriz (num_rbf x num_entradas) con los centroides
obtenidos tras el proceso de clustering.
"""
#TODO: Completar el código de la función
return kmedias, distancias, centros
def calcular_radios(centros, num_rbf):
""" Calcula el valor de los radios tras el clustering.
Recibe los siguientes parámetros:
- centros: conjunto de centroides.
- num_rbf: número de neuronas de tipo RBF.
Devuelve:
- radios: vector (num_rbf) con el radio de cada RBF.
"""
#TODO: Completar el código de la función
return radios
def calcular_matriz_r(distancias, radios):
""" Devuelve el valor de activación de cada neurona para cada patrón
(matriz R en la presentación)
Recibe los siguientes parámetros:
- distancias: matriz (num_patrones x num_rbf) con la distancia
desde cada patrón hasta cada rbf.
- radios: array (num_rbf) con el radio de cada RBF.
Devuelve:
- matriz_r: matriz (num_patrones x (num_rbf+1)) con el valor de
activación (out) de cada RBF para cada patrón. Además, añadimos
al final, en la última columna, un vector con todos los
valores a 1, que actuará como sesgo.
"""
#TODO: Completar el código de la función
return matriz_r
def invertir_matriz_regresion(matriz_r, train_outputs):
""" Devuelve el vector de coeficientes obtenidos para el caso de la
regresión (matriz beta en las diapositivas)
Recibe los siguientes parámetros:
- matriz_r: matriz (num_patrones x (num_rbf+1)) con el valor de
activación (out) de cada RBF para cada patrón. Además, añadimos
al final, en la última columna, un vector con todos los
valores a 1, que actuará como sesgo.
- train_outputs: matriz con las variables de salida de
entrenamiento.
Devuelve:
- coeficientes: vector (num_rbf+1) con el valor del sesgo y del
coeficiente de salida para cada rbf.
"""
#TODO: Completar el código de la función
return coeficientes
def logreg_clasificacion(matriz_r, train_outputs, eta, l2):
""" Devuelve el objeto de tipo regresión logística obtenido a partir de la
matriz R.
Recibe los siguientes parámetros:
- matriz_r: matriz (num_patrones x (num_rbf+1)) con el valor de
activación (out) de cada RBF para cada patrón. Además, añadimos
al final, en la última columna, un vector con todos los
valores a 1, que actuará como sesgo.
- train_outputs: matriz con las variables de salida de
entrenamiento.
- eta: valor del parámetro de regularización para la Regresión
Logística.
- l2: True si queremos utilizar L2 para la Regresión Logística.
False si queremos usar L1.
Devuelve:
- logreg: objeto de tipo sklearn.linear_model.LogisticRegression ya
entrenado.
"""
#TODO: Completar el código de la función
return logreg
if __name__ == "__main__":
entrenar_rbf_total()
| true |
cd8316afdc91eb0ef1c74ac0ac3bf0283ee6ff2f | Python | joeriking/project-euler | /p002.py | UTF-8 | 338 | 3.28125 | 3 | [] | no_license | #
# Solution to problem 2 of Project Euler
# Copyright (c) 2021 Joeri King. All rights reserved.
# https://github.com/joeriking/project-euler
#
fibonacci = [1]
i = 2
while i < 4000000:
fibonacci.append(i)
i += fibonacci[-2]
even = []
for j in fibonacci:
if j % 2 == 0:
even.append(j)
print(sum(even))
| true |
07d66f3f5d97352f2478c654965669cee9a5be65 | Python | sivaneshl/python_data_analysis | /stack/62414149/search-values-from-a-list-in-dataframe-cell-list-and-add-another-column-with-res.py | UTF-8 | 242 | 2.921875 | 3 | [] | no_license | import pandas as pd
df = pd.DataFrame({'A': [['KB4525236', 'KB4485447', 'KB4520724', 'KB3192137', 'KB4509091']], 'B': [['a', 'b']]})
findKBs = ['KB4525236','KB4525202']
df['C'] = [[x for x in findKBs if x not in df['A'][0]]]
print(df) | true |
8f3ef54bc0f79b3dd89555626c3e9a33d26f831a | Python | shiraWeiss/NYC-MLProject | /Data/BuildingAge/BuildingAge.py | UTF-8 | 598 | 2.859375 | 3 | [] | no_license | from Data.Apartments.Apartments import *
class BuildingAge:
def __init__(self):
self.apts = Apartments.getInstance()
self.addAgeToApts()
def calcAgeOfApartment(self, apt_row_from_table):
year_built = apt_row_from_table[1]
return 2017 - year_built
def addAgeToApts(self):
addresses = self.apts.getData()[['ADDRESS', 'YEAR BUILT']]
addresses['BUILDING_AGE'] = addresses.apply(self.calcAgeOfApartment, axis=1)
self.data = addresses.drop_duplicates(subset='ADDRESS', keep='first')
def getData(self):
return self.data
| true |
3d979026af51d1b207568df4aaf68e03b4b6c368 | Python | DestinyofYeet/antonstechbot | /cogs/earth2.py | UTF-8 | 997 | 2.75 | 3 | [
"MIT"
] | permissive | from discord.ext import commands
import discord
import requests
class Earth2(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="earth2")
async def earth2_command(self, ctx):
url = "https://earth2stats.net/api/get_countries/199"
response = requests.get(url).json()
land = response["name"]
wert = response["marketplace_tile_value"]
verkauft = response["total_sold_tiles"]
embed = discord.Embed(title="Earth2 Statistiken für " + land, url="https://earth2stats.net/country/" + land)
embed.set_thumbnail(
url="https://static-cdn.jtvnw.net/jtv_user_pictures/99783da2-3f60-4aeb-92bd-83e953c03627-profile_image-70x70.png")
embed.add_field(name="Wert eines Tiles", value=f"{wert}E$")
embed.add_field(name="Insgesamt verkauft", value=f"{verkauft} Tiles")
await ctx.send(embed=embed)
return
def setup(client):
client.add_cog(Earth2(client))
| true |
926f9885ce86ad903da8d0aa57ce0ab05493d8e3 | Python | albertoseabra/project_github | /visual.py | UTF-8 | 2,634 | 3.234375 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import folium
import os
import re
os.chdir('c:\\precourse\project\project_github\data')
data_file = pd.read_csv('new_comparison.csv', encoding='latin1')
columns = ['average_area', 'average_rent', 'average_rent_per_m2', 'number_contracts']
legends = ['AVERAGE AREA', 'AVERAGE RENT', 'AVERAGE RENT PER M2', 'NUMBER OF NEW CONTRACTS']
colors = ['RdPu', 'YlGn', 'OrRd', 'YlOrRd']
def search_barrio(string):
found = []
number = 0
for name in data_file.Barris:
if re.search(string.lower(), str(name).lower()):
number += 1
found.append((number, name))
return found
def draw_folium(type, color='OrRd'):
map = folium.Map(location=[41.41, 2.15], zoom_start=13)
map.choropleth(geo_data='barris_geo.json', name='choropleth', data=data_file, columns=['Barris', columns[type]],
key_on='feature.properties.N_Barri', fill_color=color, fill_opacity=0.7, line_opacity=0.4,
legend_name=legends[type], highlight= True)
map.save(str(columns[type])+'.html')
def draw_bars(type, title='', y_label=''):
try:
title = str(title)
except:
title = ''
try:
y_label = str(y_label)
except:
y_label = ''
compare = sns.barplot(x='Barris', y=type, palette='gist_rainbow',
data=data_file.sort_values(by=type, ascending=False))
compare.axes.set_title(title, size=25, color='r')
compare.set_ylabel(y_label, size=15)
for item in compare.get_xticklabels():
item.set_rotation(90)
plt.subplots_adjust(top=0.95, bottom=0.2, left=0.04, right=0.98)
plt.show()
def choose_bar_map():
print('There are 4 comparison maps to choose from, they compare the current values of:')
print('Option 1: Average Rent')
print('Option 2: Average rent per M^2')
print('Option 3: Number of contracts')
print('Option 4: Average area of the apartments')
choice = input('Which one do you want?(any other key to quit):')
if choice == '1':
draw_bars('average_rent', title='Average Monthly Rent - 1st Trimester of 2017', y_label='Rent(€)')
elif choice == '2':
draw_bars('average_rent_per_m2', title='Average Rent per M^2 - 1st Trimester of 2017', y_label='Rent per M^2(€)')
elif choice == '3':
draw_bars('number_contracts', title='Number of new contracts - 1st Trimester of 2017', y_label='New Contracts')
elif choice == '4':
draw_bars('average_area', title='Average area of apartments - 1st Trimester of 2017', y_label='Area(M^2')
| true |
a1f051b03c4a3d164bc46d9b3f7bfef1c80af95e | Python | McGeeForest/grpc_w2m_framework_m | /test/t.py | UTF-8 | 142 | 2.640625 | 3 | [] | no_license | import threading
def run():
for t in threading.enumerate():
print(t)
print(t.name)
if __name__ == '__main__':
run() | true |
fa5a26b030ac63fc67ddd1837600f259e08b94ff | Python | zzzchangezzz/dnd_help | /data/spells.py | UTF-8 | 1,158 | 2.65625 | 3 | [] | no_license | import sqlalchemy
from sqlalchemy import orm
from .db_session import SqlAlchemyBase
class Magic(SqlAlchemyBase):
__tablename__ = 'magic'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
title = sqlalchemy.Column(sqlalchemy.String, nullable=False)
level = sqlalchemy.Column(sqlalchemy.String, nullable=False)
distance = sqlalchemy.Column(sqlalchemy.String, nullable=False)
components = sqlalchemy.Column(sqlalchemy.String, nullable=False)
durability = sqlalchemy.Column(sqlalchemy.String, nullable=False)
classes = sqlalchemy.Column(sqlalchemy.String, nullable=False)
content = sqlalchemy.Column(sqlalchemy.String, nullable=False)
connect = orm.relation("Connection", back_populates='magic')
def __repr__(self):
return self.title + "\nУровень: " + self.level + "\nДистанция: " + \
self.distance + "\nКомпоненты: " + self.components +\
"\nДлительность:" + self.durability + "\nКлассы: " +\
self.classes + "\nОписание: \n" + self.content
| true |
cbaea7f4e033b3c75eea1a4bf89485bb70a75bb4 | Python | yangyang-li/6.854-project | /src/test.py | UTF-8 | 3,603 | 3.25 | 3 | [] | no_license | import networkx as nx
from networkx import min_cost_flow_cost as min_cost_flow
from max_concurrent_flow import *
''' Test suite for max concurrent flow algorithms
Types of test cases we want to build:
-Easy test case with one commodity, solvable by hand
-Easy test case with more than one commodity, solvable by hand
-Easy test case with >1 commodity with weird betas
This should be enough to assure functionality.
Now we can build larger test cases to do experiments and profile on
(we might do this in another file)
'''
##########Test cases###############
#Basic test case: Simple flow graph
#This graph right here: http://en.wikipedia.org/wiki/File:Max-flow_min-cut_example.svg
#max flow of this graph is 7, so we can instantiate the graph and call an mcf method
#on the graph by run_max_concurrent_flow(demand) with whatever demand we want
class TestCase1():
def __init__(self):
self.edgeList = []
self.edgeList.append(Edge("S","1",4))
self.edgeList.append(Edge("S","2",3))
self.edgeList.append(Edge("1","2",3))
self.edgeList.append(Edge("1","T",4))
self.edgeList.append(Edge("2","T",5))
def run_max_concurrent_flow(self,demand):
commodity1 = Commodity("S","T",demand)
return maximum_concurrent_flow(self.edgeList,[commodity1],karakosta=True,scale_beta=True)
tc1 = TestCase1()
if False:
for demand in [7,.7,70]:
result = tc1.run_max_concurrent_flow(demand) #Beta = 3/7
#print (result['1']['T']['_flow']+result['2']['T']['_flow'])/demand
print result
raw_input()
#Less basic test case: multiple commodities
class TestCase2():
def __init__(self):
self.edgeList = []
self.edgeList.append(Edge("S","1",4))
self.edgeList.append(Edge("S","4",5))
self.edgeList.append(Edge("4","1",1))
self.edgeList.append(Edge("1","2",5))
self.edgeList.append(Edge("4","5",3))
self.edgeList.append(Edge("2","5",2))
self.edgeList.append(Edge("2","3",4))
self.edgeList.append(Edge("5","6",5))
def run_max_concurrent_flow(self,demand1,demand2):
peas = Commodity("S","3",demand1)
carrots = Commodity("S","6",demand2)
return maximum_concurrent_flow(self.edgeList,[peas,carrots],karakosta=False)
def run_two_approx(self,demand1,demand2):
commodities = [Commodity("S","3",demand1),Commodity("S","6",demand2)]
return two_approx(self.edgeList,commodities)
def run_karakostas(self, demand1,demand2):
commodities = [Commodity("S","3",demand1),Commodity("S","6",demand2)]
return maximum_concurrent_flow(self.edgeList,commodities,karakosta=True)
def run_both(self, demand1,demand2):
commodities = [Commodity("S","3",demand1),Commodity("S","6",demand2)]
return two_approx(self.edgeList,commodities,karakosta=True)
tc2 = TestCase2()
if True:
for demand in [(1,.5),(10,10),(4,4)]:
result = tc2.run_max_concurrent_flow(demand[0],demand[1])
# print min(result['2']['3']['_flow']/demand[0],result['5']['6']['_flow']/demand[1])
print result
print("\n Two approx \n")
result = tc2.run_two_approx(demand[0],demand[1])
# print min(result['2']['3']['_flow']/demand[0],result['5']['6']['_flow']/demand[1])
print result
print ("\n Karakostas \n")
result = tc2.run_karakostas(demand[0],demand[1])
print result
print ("\n Both \n")
result = tc2.run_both(demand[0],demand[1])
| true |
bf47fea292e85b014fcb9aaaee8012a8bcdec76a | Python | sometimescasey/csc2515-411 | /hw1/work/verbose/hw1_code_verbose.py | UTF-8 | 4,099 | 2.671875 | 3 | [] | no_license | import warnings
# conda installed sklearn 0.19.2
# https://github.com/scikit-learn/scikit-learn/pull/11431/files
# Suppress warning for now, manually upgrade to 0.21 later
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
from collections import Mapping, defaultdict
import time # kill this
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
# Constants: file names
CLEAN_REAL = "clean_real.txt"
CLEAN_FAKE = "clean_fake.txt"
# Constants: valid sklearn.DecisionTreeClassifier 'criterion' values
CRITERIA = ['gini', 'entropy']
def load_data():
f = open(CLEAN_REAL, "r")
real_headlines = f.read().splitlines()
f.close()
print("real_headlines[0]: {}".format(real_headlines[0]))
f = open(CLEAN_FAKE, "r")
fake_headlines = f.read().splitlines()
f.close()
print("fake_headlines[0]: {}".format(fake_headlines[0]))
count_real = len(real_headlines)
count_fake = len(fake_headlines)
count_total = count_real + count_fake
print("count_real: {} | count_fake: {}".format(count_real, count_fake))
all_headlines_temp = real_headlines + fake_headlines
all_headlines = np.asarray(all_headlines_temp)
# vectorizer = CountVectorizer() # Tfidf seems better
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(all_headlines)
# print("X.shape: {}".format(X.shape))
# Make labels
real_y = np.full((count_real, 1), 1) # real headlines get label of 1
fake_y = np.full((count_fake, 1), 0) # fake headlines get label of 0
all_y = np.append(real_y, fake_y)
# Append original headline text so we can refer to it later
print(all_headlines.shape)
print(all_y.shape)
a = all_headlines.reshape(1, count_total)
b = all_y.reshape(1, count_total)
y = np.concatenate((a, b), axis=0).T
print("y.shape: {}".format(y.shape))
# print(y[: ,0]) # text
# print(y[: ,1]) # labels
# check we're correct
print("{} | {}".format(X[count_real-1].toarray(), y[count_real-1]))
print("{} | {}".format(X[count_real].toarray(), y[count_real]))
# 70 / 30 split
X_train, X_temp, y_train, y_temp = train_test_split(X, y, test_size=0.3, random_state=1)
# split 30 into 15 val, 15 test
X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.3, random_state=1)
# take a look at the shape of each of these
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
return X_train, X_val, X_test, y_train, y_val, y_test
def select_model(X_train, y_train, X_val, y_val, max_depth=5):
best_index = -1
best_score = -1
settings = generate_settings(max_depth)
for setting in settings:
print("index: {}".format(settings.index(setting)))
score = test_settings(setting, X_train, y_train, X_val, y_val)
if (score > best_score):
best_score = score
best_index = settings.index(setting)
print("Best model is: max_depth = {}, criterion = {}, score = {}".format(
settings[best_index]["max_depth"],
settings[best_index]["criterion"],
best_score))
def generate_settings(max_depth):
list = []
for i in range(1, max_depth+1):
for criterion in CRITERIA:
list.append({
"max_depth": i,
"criterion": criterion
})
return list
def test_settings(setting, X_train, y_train, X_val, y_val):
clf = DecisionTreeClassifier(
max_depth=setting["max_depth"],
criterion=setting["criterion"])
clf.fit(X=X_train, y=y_train[:, 1]) # train on labels only
# test on validation set
y_pred = clf.predict(X=X_val)
correct = sum(i == j for i, j in zip(y_pred, y_val[:, 1]))
score = correct / y_val.shape[0]
print("max_depth: {} | criterion: {} | score: {}".format(
setting["max_depth"],
setting["criterion"].ljust(7),
score))
# Same score behaviour as clf.score
# print(clf.score(X=X_val, y=y_val[:, 1]))
return score
def compute_information_gain():
return 0
def main():
X_train, X_val, X_test, y_train, y_val, y_test = load_data()
select_model(X_train, y_train, X_val, y_val, 20)
if __name__ == "__main__":
main()
| true |
b99df2e1f5a09fab099c126ac9e8fa4cd3ef7f5c | Python | jmocay/solving_problems | /binary_tree_find_nodes_with_sum_k.py | UTF-8 | 1,660 | 4.34375 | 4 | [] | no_license | """
Given the root of a binary search tree, and a target k,
return two nodes in the tree whose sum equals k.
For example, given the following tree and k of 20
10
/ \
5 15
/ \
11 15
Return the nodes 5 and 15
"""
from collections import deque
"""
Runs in O(n log(n)) time.
"""
def find_nodes_with_sum_k(root, k):
q = deque([root])
while len(q) > 0:
node_1 = q.popleft()
node_2 = find_helper(root, node_1, k - node_1.val)
if node_2 != None:
return (node_1.val, node_2.val)
if node_1.left != None:
q.append(node_1.left)
if node_1.right != None:
q.append(node_1.right)
return None
def find_helper(root, node, val):
if root.val == val and root != node:
return root
if val < root.val and (root.left != None):
return find_helper(root.left, node, val)
elif val >= root.val and (root.right != None):
return find_helper(root.right, node, val)
return None
class Node(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def create_tree():
node_5 = Node(5)
node_10 = Node(10)
node_11 = Node(11)
node_15_1 = Node(15)
node_15_2 = Node(15)
node_10.left = node_5
node_10.right = node_15_1
node_15_1.left = node_11
node_15_1.right = node_15_2
return node_10
if __name__ == '__main__':
root = create_tree()
for summ in [10, 15, 16, 20, 21, 25, 26, 30, 31]:
print('sum:', summ, find_nodes_with_sum_k(root, summ))
| true |
089060a274d1b09a32d26d9eecdf96cf39f14bb6 | Python | ArbelRivitz/Four-in-a-row-game | /runner.py | UTF-8 | 4,114 | 3.1875 | 3 | [] | no_license | #############################################################
# FILE : runner.py
# WRITER : arbelr, noamiel,207904632, 314734302,Arbel Rivitz, Noa Amiel
# EXERCISE : intro2cs ex12 2017-2018
# DESCRIPTION:
# In this excercise we made the game four in a row. This game is moduled to different parts. There is some pares at this
# this game.
# There is the Game class that has all the game rules and methods.
# there is the Gui class that includes all the graphic parts of the game.
# there is the runner class, that has all the functions for the game to run
# there is the communicator class, that communicates between the server and the client in this game.
# there is the ai class that has the way of how to play
# and there is the four in a row file that that runs the whole game.
#############################################################
import game
import communicator
import gui
from tkinter import messagebox as mb
class Runner():
"""
This is the main class that runs all the different functions
"""
def __init__(self, root, port, ai=None, ip=None):
"""
:param root: a root
:param port: n endpoint of communication in the computer of the player
:param ip: the ip address of the client
:param ai: True if it is not human, and False otherwise
"""
self.ai = ai
self.root = root
self.port = port
self.game = game.Game()
self.communicator = communicator.Communicator(self.root, port, ip)
self.communicator.connect()
self.communicator.bind_action_to_message(self.msg_received)
if ip is not None: # that means that there is a server
self.player = self.game.PLAYER_TWO
else:
self.player = self.game.PLAYER_ONE
self.gui = gui.Gui(self.root, self, self.game,self.player)
def run(self):
"""
This func runs the game in a case of human or not
:return:
"""
if self.ai is not None and self.game.get_current_player() != self.player:
self.ai.find_legal_move(self.game, self.do_a_move)
self.root.mainloop()
def do_a_move(self, column):
"""
This func is the whole process of one move
:param column: the column that we want to put the disc in
:return:
"""
if self.player != self.game.get_current_player() and column is not None:
try:
coords = self.game.make_move(column)
except Exception as e:
mb.showerror("Error", e)
return
if coords is not None:
new_row = coords[0]
new_col = coords[1]
self.gui.update_board(new_row, new_col) # updates the board
self.communicator.send_message(column) # sends msg to the other
# player
self.gui.what_player_is_it(self.player)
winner, lst = self.game.get_winner()
self.gui.declare_winner(winner, lst, self.player)
def msg_received(self, message):
"""
This function tells the player what to do after getting a message from
the other player
:param message: the message that we get from the other plyer that tells
us where a disc has been put
:return:
"""
column = int(message)
if column is not None:
new_row, new_col = self.game.make_move(column)
self.gui.update_board(new_row, new_col)
winner, lst = self.game.get_winner()
self.gui.declare_winner(winner, lst, self.player)
self.gui.what_player_is_it(self.player)
if self.ai is not None and self.game.get_current_player() != self.player\
and self.game.get_is_game_on():
try:
self.ai.find_legal_move(self.game, self.do_a_move)
except Exception as e:
mb.showerror("Error", e)
return
| true |
dcc23b0f2ba1f769483ac2591b2b7d94b415a98f | Python | daniel-reich/turbo-robot | /87YxyfFJ4cw4DsrvB_15.py | UTF-8 | 1,417 | 3.78125 | 4 | [] | no_license | """
Create a function that takes in parameter `n` and generates an `n x n` (where
`n` is odd) **concentric rug**.
The center of a concentric rug is `0`, and the rug "fans-out", as show in the
examples below.
### Examples
generate_rug(1) ➞ [
[0]
]
generate_rug(3) ➞ [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
]
generate_rug(5) ➞ [
[2, 2, 2, 2, 2],
[2, 1, 1, 1, 2],
[2, 1, 0, 1, 2],
[2, 1, 1, 1, 2],
[2, 2, 2, 2, 2]
]
generate_rug(7) ➞ [
[3, 3, 3, 3, 3, 3, 3],
[3, 2, 2, 2, 2, 2, 3],
[3, 2, 1, 1, 1, 2, 3],
[3, 2, 1, 0, 1, 2, 3],
[3, 2, 1, 1, 1, 2, 3],
[3, 2, 2, 2, 2, 2, 3],
[3, 3, 3, 3, 3, 3, 3]
]
### Notes
* `n >= 0`.
* Always increment by 1 each "layer" outwards you travel.
"""
import random
import math
def generate_rug(n):
if n == 1:
return [[0]]
height = int(math.floor(n/2))
toReturn = []
for row in range(0,n):
if row >= int(math.ceil(n/2)):
toReturn.append(toReturn[n-1-row])
else:
toReturn.append([])
for column in range(0,n):
if column >= (math.ceil(n/2)):
toReturn[row].append(toReturn[row][n-1-column])
else:
for _ in range(0, n-1):
if row == _ or column == _:
toReturn[row].append(height-_)
break
return toReturn
| true |
e4cd6674f7f91a93af64e7a394ab384e256a3087 | Python | gistable/gistable | /all-gists/3522314/snippet.py | UTF-8 | 2,230 | 3.125 | 3 | [
"MIT"
] | permissive | class Accessor(object):
def __init__(self, wrapper, d):
self.wrapper = wrapper
self.d = d
def __repr__(self):
return repr(self.d)
def _get_failback(self, k):
chained = self.wrapper.chained
if chained:
return chained.data[k]
def __getitem__(self, k):
if self.d is None:
return None
return self.d.get(k) or self._get_failback(k)
def getall(self, k):
r = []
this = self.wrapper
while this:
v = this.data.d.get(k)
if v:
r.append(v)
this = this.chained
return r
def access(self, target):
return target
class Traverser(object):
def __init__(self, accessor_impl=Accessor):
self.chained = None
self.target = None
self._configured = False
self._accessor_impl = accessor_impl
def traverse(self, target):
if not self._configured:
getattr(self, "traverse_"+(target.__class__.__name__))(target)
self._configured = True
return self
def _set_data(self, data):
self._data = data
self.data = self._accessor_impl(self, data)
def traverse_MoreSpecific(self, s):
specific = s.specific
self.target = s
self._set_data(s.extra_info)
self.chained = self.__class__(accessor_impl=self._accessor_impl)
self.chained.traverse(specific)
def traverse_Specific(self, s):
item = s.item
self.target = s
self._set_data(s.extra_info)
self.chained = self.__class__(accessor_impl=self._accessor_impl)
self.chained.traverse(item)
def traverse_Item(self, item):
self.target = item
self._set_data(item.info)
class Item:
info = {"data": "something", "name": "item"}
class Specific:
extra_info = {"data": "anything"}
item = Item()
class MoreSpecific:
extra_info = {"description": "this-is-long-text"}
specific = Specific()
trv = Traverser().traverse(MoreSpecific())
trv.data["data"] # => anything
trv.data["name"] # => item
trv.data["description"] # => this-is-long-text
print trv.data.getall("data") # => ['anything', 'something']
| true |
3d8344fca13cd573741ad09a1b96fa6a55f28dcb | Python | sanskrit-lexicon/PWK | /pwkvn/step0/page299/addnum.py | UTF-8 | 715 | 2.734375 | 3 | [] | no_license | #-*- coding:utf-8 -*-
"""addnum.py
"""
import sys,re,codecs
def addnum(lines):
recs = []
n0 = 172000
for iline,line in enumerate(lines):
num = n0 + iline + 1
ident = '%07d' %num
newline = re.sub('<p pc=','<p n="%s" pc=' %ident,line)
recs.append(newline)
return recs
def write(fileout,lines):
with codecs.open(fileout,"w","utf-8") as f:
for line in lines:
f.write(line+'\n')
print(len(lines),"records written to",fileout)
if __name__=="__main__":
filein = sys.argv[1] # pwkvn
fileout = sys.argv[2] #
with codecs.open(filein,"r","utf-8") as f:
lines = [x.rstrip('\r\n') for x in f]
print(len(lines),"lines read from",filein)
newlines = addnum(lines)
write(fileout,newlines)
| true |
5e44dcecd772d92c9769e299ecbd101cc08ee6e8 | Python | IvanaH/PyTest | /Py_ChatDemo/ChatClient.py | UTF-8 | 335 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2017/10/19
@author:Ivana
'''
# Echo client program
import socket,time
HOST = '10.1.80.209'
PORT = 43502
a = (HOST,PORT)
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(a)
s.sendall("Hello World")
data = s.recv(1024)
s.close()
print"Recieve msg:",data | true |
b65f86fe3ecdba9c95fee59426c58447372d15b7 | Python | kbharathala/ProjectEuler | /Question 24.py | UTF-8 | 513 | 3.515625 | 4 | [] | no_license | def fact(num):
ans = 1
for n in range(num,1,-1):
ans = ans * n
return ans
def perm(number):
list = [0,1,2,3,4,5,6,7,8,9]
ans = []
hold = number
for n in range(9,-1,-1):
residue = int(hold/fact(n))
#print(residue)
ans.append(list[residue])
list.pop(residue)
hold = hold - (fact(n) * residue)
return ans
#for n in range(1,10):
# print(fact(n))
string = ''
for each in perm(999999):
string = string + str(each)
print(string)
| true |
2d87d3933cf381a5acfd2f32a6a57639754b228b | Python | idow09/follow_up | /data_structures.py | UTF-8 | 3,478 | 2.90625 | 3 | [] | no_license | import math
from utils.utils import *
@auto_str
class SampleData:
"""
A class to bundle all data for a single image (image_path, labels, predictions, stats, etc.)
"""
def __init__(self, path, labels, preds, time, scale, algo_id):
self.path = path
self.labels = labels
self.preds = preds
self.stats = {}
self.time = time
self.scale = scale
self.algo_id = algo_id
@auto_str
class Prediction:
"""
Contains coordinates as well as score, a (calculated) matched label, and some stats.
"""
def __init__(self, x, y, r, score):
self.x = x
self.y = y
self.r = r
self.score = score
self.matched_label = None
self.iou = None
self.center_dist = None
def calc_rect_iou(self, label):
"""
Prefer :calc_circle_iou
"""
box_a = [self.x - self.r, self.y - self.r, self.x + self.r, self.y + self.r]
box_b = [label.x - label.r, label.y - label.r, label.x + label.r, label.y + label.r]
# determine the (x, y)-coordinates of the intersection rectangle
x_a = max(box_a[0], box_b[0])
y_a = max(box_a[1], box_b[1])
x_b = min(box_a[2], box_b[2])
y_b = min(box_a[3], box_b[3])
# compute the area of intersection rectangle
inter_area = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1)
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)
return inter_area / float(box_a_area + box_b_area - inter_area)
def calc_circle_iou(self, label):
r1 = self.r
r2 = label.r
d = np.linalg.norm(np.array([self.x, self.y]) - np.array([label.x, label.y]))
if d > (r1 + r2): # No congruent
return 0
if d <= abs(r1 - r2): # One inside another
if (r1 * r2) == 0:
return (r1 == r2) * 1.0
iou = r1 ** 2 / r2 ** 2
return iou if r1 < r2 else 1 / iou
a = r1 ** 2 * np.arccos((d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1)) + \
r2 ** 2 * np.arccos((d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2)) - \
0.5 * math.sqrt((-d + r1 + r2) * (d + r1 - r2) * (d - r1 + r2) * (d + r1 + r2))
return a / (np.pi * r1 ** 2 + np.pi * r2 ** 2 - a)
def calc_center_dist(self):
if self.matched_label is None:
return
a = np.array([self.x, self.y])
b = np.array([self.matched_label.x, self.matched_label.y])
self.center_dist = np.linalg.norm(a - b)
def match_label(self, labels):
"""
Match the prediction with the most probable (highest IoU) label from the given list.
If None found, no matched_label will be stored.
:param labels: The pool of labels to match with.
"""
match_iou = 0
match = None
for label in labels:
iou = self.calc_circle_iou(label)
if iou > match_iou:
match_iou = iou
match = label
self.matched_label = match
self.iou = match_iou
self.calc_center_dist()
@auto_str
class Label:
"""
A class to bundle the coordinates for a label (x, y, r)
"""
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
| true |
5d926632a1e63f82ef08795a7d5a1ca707e6f9ca | Python | kentaro7214/programs | /for_chords.py | UTF-8 | 6,026 | 2.84375 | 3 | [] | no_license | #%% 引数1にBPM,引数2にループ回数をとってる。
import random
import os
import numpy
from scipy.io import wavfile
import pyaudio
import wave
import sys
import fcntl
import time
import pandas as pd
import math
import threading
#%% コードと度数と周波数の定義。key_frequencyにキーと周波数の辞書として格納
tone = ["Ab","A","A#","Bb","B","C","C#","Db","D","D#","Eb","E","F","F#","Gb","G","G#"]
chord = ["M7","m7","7","m7b5"]
tone_deg = {"Ab":-1,"A":0,"A#":1,"Bb":1,"B":2,"C":-9,"C#":-8,"Db":-8,"D":-7,"D#":-6,"Eb":-6,"E":-5,"F":-4,"F#":-3,"Gb":-3,"G":-2,"G#":-1}
chord_deg = ["M7","m7","7","m7b5"]
key_frequency = {}
degree = -9
for k,v in tone_deg.items():
key_frequency[k] = 440*2**(int(v)/12)
#%% 正弦波を作成するコマンド
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
#%% 平均律で各種コードを定義。ルートからの度数乗してる。←純正律の方がゆらぎの関係
def Mm7(frequency, length, rate):
# 音源生成
src = []
src.append(sine(frequency,length,rate))
src.append(sine(frequency * math.pow(2,(4/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(7/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(10/12.0)),length,rate))
res = numpy.array([0] * len(src[0])) #ダミーの空配列
#加算&クリッピング
for s in src:
res = res + s
res *= 0.5
return res
def play_7(stream, frequency, length=240/int(sys.argv[1]), rate=44100):
chunks = []
chunks.append(Mm7(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tobytes())
def M7(frequency, length, rate):
# 音源生成
src = []
src.append(sine(frequency,length,rate))
src.append(sine(frequency * math.pow(2,(4/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(7/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(11/12.0)),length,rate))
res = numpy.array([0] * len(src[0])) #ダミーの空配列
#加算&クリッピング
for s in src:
res = res + s
res *= 0.5
return res
def play_M7(stream, frequency, length=240/int(sys.argv[1]), rate=44100):
chunks = []
chunks.append(M7(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tobytes())
def m7(frequency, length, rate):
# 音源生成
src = []
src.append(sine(frequency,length,rate))
src.append(sine(frequency * math.pow(2,(3/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(7/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(10/12.0)),length,rate))
res = numpy.array([0] * len(src[0])) #ダミーの空配列
#加算&クリッピング
for s in src:
res = res + s
res *= 0.5
return res
def play_m7(stream, frequency, length=240/int(sys.argv[1]), rate=44100):
chunks = []
chunks.append(m7(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tobytes())
def m7b5(frequency, length, rate):
# 音源生成
src = []
src.append(sine(frequency,length,rate))
src.append(sine(frequency * math.pow(2,(3/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(7/12.0)),length,rate))
src.append(sine(frequency * math.pow(2,(9/12.0)),length,rate))
res = numpy.array([0] * len(src[0])) #ダミーの空配列
#加算&クリッピング
for s in src:
res = res + s
res *= 0.5
return res
def play_m7b5(stream, frequency, length=240/int(sys.argv[1]), rate=44100):
chunks = []
chunks.append(m7b5(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tobytes())
#%% コードならす関数。選ばれたコードの文字列一致で条件付け。
def play_chord(chord,key):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, output=1)
if chord == "M7":
play_M7(stream,frequency=key_frequency[key])
#print(key_frequency[key])
#print("M7")
if chord == "7":
play_7(stream,frequency=key_frequency[key])
#print(key_frequency[key])
#print("7")
if chord == "m7":
play_m7(stream,frequency=key_frequency[key])
#print(key_frequency[key])
#print("m7")
if chord == "m7b5":
play_m7b5(stream,frequency=key_frequency[key])
#print(key_frequency[key])
#print("m7b5")
stream.close()
p.terminate()
def metro(BPM):
os.system("afplay /System/Library/Sounds/Tink.aiff")
time.sleep(60/BPM)
os.system("afplay /System/Library/Sounds/Pop.aiff")
time.sleep(60/BPM)
os.system("afplay /System/Library/Sounds/Pop.aiff")
time.sleep(60/BPM)
os.system("afplay /System/Library/Sounds/Pop.aiff")
time.sleep(60/BPM)
#%%
#sys.argv[2] = 2
count = 1
while count <= int(sys.argv[2]):
print("loop" + str(count) + ": 3. 2. 1." )
k1 = random.choice(tone)
c1 = random.choice(chord)
k2 = random.choice(tone)
c2 = random.choice(chord)
k3 = random.choice(tone)
c3 = random.choice(chord)
k4 = random.choice(tone)
c4 = random.choice(chord)
C1 = k1 + c1
C2 = k2 + c2
C3 = k3 + c3
C4 = k4 + c4
print(C1.ljust(16," ")+C2.ljust(16," ")+C3.ljust(16," ")+C4.ljust(16," "))
metro(BPM=int(sys.argv[1]))
print(" |")
#metro(BPM=int(sys.argv[1]))
play_chord(chord=c1,key=k1)
print(" |")
#metro(BPM=int(sys.argv[1]))
play_chord(chord=c2,key=k2)
print(" |")
#metro(BPM=int(sys.argv[1]))
play_chord(chord=c3,key=k3)
print(" |")
#metro(BPM=int(sys.argv[1]))
play_chord(chord=c4,key=k4)
count +=1
time.sleep(60/int(sys.argv[1]))
print("Finish")
os.system("afplay /System/Library/Sounds/Bottle.aiff")
| true |
88ec8cff2274a35a0679a87a5f961fae65ce4974 | Python | LnC-Study/Acmicpc-net | /Dynamic Programming/2011 암호코드/sdk_python.py | UTF-8 | 804 | 3.109375 | 3 | [] | no_license | MOD = 1000000
def data_in():
return input()
def isAlpha( prev, ch2):
return 10 <= int( prev['ch'] + ch2) <= 26 \
if prev['ch'] != None else False
def solution( code):
prev = [{'ch': None, 'count': 1} for _ in range(2)]
for ch in code:
try: int(ch)
except: return 0
if ch == '0':
if prev[-1]['ch'] not in ['1','2']: return 0
else:
count = prev[-2]['count']
else:
count = prev[-1]['count']
if isAlpha( prev[-1], ch):
count += prev[-2]['count']
prev[-2], prev[-1] = prev[-1], {'ch': ch, 'count': count % MOD}
return prev[-1]['count']
if __name__ == '__main__':
code = data_in()
ans = solution( code)
print( ans) | true |
01b6dd6d7c334187a3daed7210f51cff2b65ad37 | Python | bak-minsu/geditdone | /geditdone/stories/US23.py | UTF-8 | 917 | 3.0625 | 3 | [] | no_license | from geditdone.error_objects import GedcomError, ErrorType
def unique_names_and_birth_date(parser):
"""Returns errors for individuals sharing a name and a birth date"""
individuals = parser.individuals
errors = []
indivs_by_name_birthdate = {}
for indi in individuals.values():
# If name or birth are not given for the individual, these keys will be None
# This means two people with the same name and no birthday will give a warning
name_birthdate = (indi.name, indi.birth)
if name_birthdate in indivs_by_name_birthdate:
other_indi = indivs_by_name_birthdate[name_birthdate]
errorMessage = f'Individual {indi} has the same name and birth date as {other_indi}'
errors.append(GedcomError(ErrorType.anomaly, 'US23', indi, errorMessage))
else:
indivs_by_name_birthdate[name_birthdate] = indi
return errors | true |
e64e774b10694402fad2d039ba7b759dc6eea992 | Python | alphasaft/werewolf_bot | /bot/devtools.py | UTF-8 | 15,885 | 2.75 | 3 | [] | no_license | import discord
from discord.ext.commands import has_permissions
import asyncio
import datetime
import time
import re
import random
from assets.constants import PREFIX, TIMEZONE
# Exceptions
class DevCommandNotFound(discord.DiscordException):
def __init__(self, command):
self._msg = "The dev command %s couldn't be found" % command
def __str__(self):
return self._msg
class TransformationError(discord.DiscordException):
pass
# Assets
class _AsyncIterator:
"""Asynchronous iterator on an iterable"""
def __init__(self, iterable):
self._iterable = iterable
self._i = 0
def __aiter__(self):
return self
def __iter__(self):
return self
async def __anext__(self):
try:
result = next(self)
except StopIteration:
raise StopAsyncIteration() from None
else:
return result
def __next__(self):
if self._i >= len(self._iterable):
raise StopIteration()
result = self._iterable[self._i]
self._i += 1
return result
def flatten(self):
return list(self._iterable)
# core
class _MockedUser(discord.User):
"""
Mocks a discord user, allowing the DevTool class instances to fool discord.
If a method doc starts with, or contains, "Mocked.", this means that this method won't do what it should initially
do, but just _inform() the mocked user's owner, or something very similar to it. Read its whole doc for further
info.
If a method doc starts with, or contains, "Delegated.", this means that this method will call self.parent.theMethod
with the given args, and return the result. Read its whole doc for further info.
Notice that discord's commands checks such as has_permissions are applied on self.parent.
"""
__slots__ = (
'id', 'name', 'discriminator', 'avatar', 'bot', 'system', 'parent', '_created_at', '_history', '_relationship'
)
def __init__(self, _id: int, name: str, parent: discord.User):
"""
We copy the data of the discord.User PARENT, except for the name and the id : they are manually chosen.
Warning, even if a _MockedUser is created by a bot, its attribute self.bot is always False.
We initialize a few private attributes too, needed for mocking.
"""
self.id = _id
self.name = name
self.bot = False
# Copying
self.parent = parent # We keep the parent too
self.discriminator = parent.discriminator
self.avatar = parent.avatar
self.system = parent.system
# Mocking-needed attributes
self._created_at = datetime.datetime.utcfromtimestamp(time.time())
self._relationship = None
self._history = []
@property
def default_avatar(self):
"""Delegated. Returns self.parent.default_avatar"""
return self.parent.default_avatar
@property
def default_avatar_url(self):
"""Delegated. Returns self.parent.default_avatar_url"""
return self.parent.default_avatar_url
@property
def display_name(self):
"""Returns self.name"""
return self.name
@property
def mention(self):
"""Returns @{self.name}. This isn't a legal discord mention, but it looks like one of them."""
return "@%s" % self.name
@property
def dm_channel(self):
"""
Delegated. As _MockedUsers doesn't own their own dm_channel, we just return the parent's dm_channel.
Notice that it is better to do myMockedUser.send(...) that myMockedUser.dm_channel.send(...), because the first
solution informs self.parent that the message was originally destined to his mocked user.
"""
return self.parent.dm_channel
@property
def color(self):
"""
Delegated. Because the mocked users have the same permissions as their owner, they should have the same color
too. We also returns self.parent.color
"""
return self.parent.color
@property
def colour(self):
"""Alias for self.color"""
return self.color
@property
def relationship(self):
"""
Returns the current state of the relationships between this user and the bot.
None -> No relationship
1 -> friend
2 -> blocked
3 -> incoming_request
4 -> outgoing_request
If you just want to know if this mocked user is blocked / friend with you, use self.is_blocked() or
self.is_friend()
Practically, this could never return 1 or 4, because mocked user cannot accept or send friend requests.
"""
return self._relationship
@property
def created_at(self):
"""Returns a datetime.datetime object corresponding at the initialization with __init__() of self"""
return self._created_at
async def _inform(self, message: str):
"""Calls self.parent.send(message % self.name)"""
await self.parent.send(message % self.name)
def _push_history(self, message):
"""Deleted the header of the message's content (ex. "<To aUser>") and append the mesage to the history."""
message.content = re.sub(r"<To .+> +", "", message.content)
self._history.append(message)
async def send(self, content=None, *, tts=False, embed=None, file=None, files=None, delete_after=None, nonce=None):
"""
Mocked. Sends a message. This actually sends <To {self.name}> {content} to self.parent if content is provided,
else just calls self.parent.send() with the given args.
"""
if content:
content = "<To %s> %s" % (self.name, content)
elif embed:
title = embed.title
embed.title = "<To %s> %s" % (self.name, title)
else:
raise ValueError("One of CONTENT or EMBED should be provided")
request = dict(
content=content,
tts=tts,
embed=embed,
file=file,
files=files,
delete_after=delete_after,
nonce=nonce
)
ret = await self.parent.send(**request)
self._push_history(ret)
if embed and not content:
embed.title = title
return ret
def avatar_url_as(self, *, format=None, static_format='webp', size=1024):
"""Delegated. Returns self.parent.avatar_url_as(...) with the given args"""
return self.parent.avatar_url_as(
format=format,
static_format=static_format,
size=size
)
def is_avatar_animated(self):
"""Delegated. Returns self.parent.is_avatar_animated"""
return self.parent.is_avatar_animated()
async def send_friend_request(self):
"""
Warns the user self.parent that someone tried to send a friend request to his mocked user, and set
self.relationship to 3
"""
self._relationship = 3
await self._inform("Someone tried to send a friend request to your mocked user %s")
async def remove_friend(self):
"""
Warns the user that someone removed from his friends his mocked user. It only removes friend invitations if some
were sent, because a mocked user cannot accept friends invitation.
"""
if self.relationship == 3:
self._relationship = None
await self._inform("Someone removed your mocked user %s from his friends.")
async def block(self):
"""
Warns self.parent that someone blocked his mocked user.
In fact, self can always send messages, blocked or not. However, it sets self.relationship to 2
"""
self._relationship = 2
await self._inform("Someone blocked you mocked user %s")
def is_blocked(self):
"""Returns True if self was blocked using self.block(), False otherwise."""
return self.relationship == 2
def is_friend(self):
"""Returns True if self is friend with the client, False otherwise. This also always returns False."""
return self.relationship == 1
def typing(self):
"""Delegated. Given self is not a real user and doesn't own a DMChannel, returns self.parent.typing instead."""
return self.parent.typing()
async def trigger_typing(self):
"""
Delegated. Given self is not a real user and doesn't own a DMChannel, this triggers self.parent's typing
instead.
"""
await self.parent.trigger_typing()
async def create_dm(self):
"""Delegated. This create a DM channel with self.parent if it doesn't already exist, and returns it."""
if not self.parent.dm_channel:
return self.parent.create_dm()
else:
return self.parent.dm_channel
async def fetch_message(self, id):
"""Search a message sent through self.send that have the corresponding id."""
for message in self._history:
if message.id == id:
return message
raise ValueError("No message with id %i was sent to this user." % id)
def mentioned_in(self, message):
"""
Returns True if self was mentioned in message. Discord won't recognize mentions of self (because self.id
isn't an official id), so we just return True if self.mention is in message.content, False otherwise.
"""
return self.mention in message.content
def history(self, *, limit=100, before=None, after=None, around=None, oldest_first=None):
"""
Returns an AsyncIterator object that contains the LIMIT last messages sent trough self.send.
Because tests using the DevTool and so _MockedUsers shouldn't take long, I haven't implemented the BEFORE, AFTER
and AROUND features. It won't raise anything if you try to use them, but it won't work.
"""
if oldest_first:
return _AsyncIterator(self._history[:limit])
else:
return _AsyncIterator(self._history[-limit:])
async def pins(self):
"""
Returns a list of messages that are both pinned on the DMChannel of self.parent and contained in self.history
(so that were sent trough self.send())
"""
return [message for message in await self.parent.pins() if message in self._history]
def permissions_in(self, channel):
"""
Delegated. Returns self.parent.permissions_in(channel), and is basically equivalent to
channel.permissions_for(self.parent)
"""
return self.parent.permissions_in(channel)
async def profile(self):
"""Delegated. Gets the profile of self.parent (so his flags etc), changes its user for self and returns it."""
profile = await self.parent.profile()
profile.user = self
return profile
class DevTool:
USERS_CREATED = """<SYSTEM> The users %s were successfully created."""
LOGGED_AS = """<SYSTEM> Logged in as %s"""
INFO = """<SYSTEM> Users : %s | Logged in as : %s"""
def __init__(self, master=None):
self.master = master
self.users = {}
self.logged_in = None
def reset(self):
"""Calls self.__init__()"""
self.__init__()
def set_master(self, master):
self.master = master
def create_users(self, users, master=None):
"""
Create a _MockedUser for each user in USERS.
Their parent is either MASTER if provided or self.master, and their id are randomly chosen between 10**20 and
10**30. Notice that even if there's few chances that it happens, it can pick already existing ids.
"""
if not (master or self.master):
raise NameError("Got no master")
if len(users) == 1 and users[0].isnumeric() and int(users[0]) <= 26:
user_count = int(users[0])
users = []
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[:user_count]:
users.append(letter)
for user in users:
_id = random.randint(10**20, 10**30)
name = user
parent = master or self.master
self.users[user] = _MockedUser(_id, name, parent)
if not self.logged_in:
self.log(users[0])
return users
def log(self, user):
"""Log the discord user self.master in as USER."""
if user not in self.users:
raise NameError("User %s doesn't exist." % user)
self.logged_in = self.users[user]
async def process_commands(self, msg):
"""
Try to execute the command contained in the discord.Message msg.
It could raise exceptions, either from commands, or DevCommandNotFound if the command wasn't found
"""
msg.content = msg.content.strip()
def _is_command(cmd):
return msg.content.startswith(PREFIX + cmd)
if _is_command("create"):
users = msg.content.split()[1:]
users = self.create_users(users)
await msg.channel.send(self.USERS_CREATED % ", ".join(users))
elif _is_command("log"):
user = msg.content.split()[1]
self.log(user)
await msg.channel.send(self.LOGGED_AS % user)
elif _is_command("info"):
await msg.channel.send(self.INFO % (", ".join(self.users), self.logged_in or "Nobody"))
else:
raise DevCommandNotFound(msg.content.split()[0])
def transform_ctx(self, ctx):
"""Transform the discord Context CTX so that for discord, its author is self._logged_in, and returns it"""
ctx.message = self.transform_msg(ctx.message)
return ctx
def transform_msg(self, msg):
"""Transform the discord Message MSG so that for discord, its author is self._logged_in, and returns it"""
if not self.logged_in:
raise TransformationError("No user was logged in")
msg.author = self.logged_in
return msg
def __implement_dev_commands__(bot):
"""
Implements additional development commands, such as !devmode or !forall.
You must have the permission to manage the server to use them.
These commands are auto-implemented in the ExtendedBot class.
"""
@bot.command(help="[dev] Active le mode developpeur.")
@has_permissions(administrator=True)
async def devmode(ctx):
bot.devmode = not bot.devmode
await ctx.channel.send("Devmode set to %s" % str(bot.devmode).lower())
if bot.devmode:
bot.devtool.set_master(ctx.author)
else:
bot.devtool.reset()
@bot.command(help="[dev] Execute la commande qui suit pour tous les utilisateurs simulés")
@has_permissions(administrator=True)
async def forall(ctx, *cmd):
if not bot.devmode:
await ctx.channel.send("Error : The devmode isn't on.")
return
# Author was truncated
ctx.message.author = ctx.message.author.parent
ctx.message.content = " ".join(cmd)
old_logged = bot.devtool.logged_in
for user in bot.devtool.users.values():
bot.devtool.log(user.name)
await bot.process_commands(ctx.message)
bot.devtool.log(old_logged.name)
@bot.command(name='as', help="[dev] Execute une commande en tant qu'utilisateur simulé")
@has_permissions(administrator=True)
async def _as(ctx, user, *cmd):
if not bot.devmode:
await ctx.channel.send("Error : The devmode isn't on.")
return
# Author was truncated
ctx.message.author = ctx.message.author.parent
ctx.message.content = " ".join(cmd)
old_logged = bot.devtool.logged_in
bot.devtool.log(user)
await bot.process_commands(ctx.message)
bot.devtool.log(old_logged.name)
| true |
43db991ba3f3d5618254787c4ed9e2acc670ee43 | Python | akashsengupta1997/continuous_optimisation | /evolution_strategy.py | UTF-8 | 16,534 | 3.3125 | 3 | [] | no_license | import numpy as np
import math
import time
class EvolutionStrategy:
"""
Contains all methods for evolutionary strategy optimisation.
"""
def __init__(self, objective_func, num_control_vars, elitist=False,
full_discrete_recombination=False, global_recombination=False):
"""
:param objective_func: function to minimise
:param num_control_vars: number of control variables
:param elitist: bool, use elitist selection if true
:param full_discrete_recombination: bool, use local-discrete recombination on both
control variables and strategy parameters if true.
:param global_recombination: bool, use global-discrete recombination on both control
variables and strategy parameters if true.
"""
self.objective_func = objective_func
self.num_control_vars = num_control_vars
self.allowed_evals = 2000 # Don't need all 10000 allowed evaluations
self.elitist = elitist
self.full_discrete_recombination = full_discrete_recombination
self.global_recombination = global_recombination
self.num_parents = 55
self.num_children = self.num_parents * 7
self.recombination_weight = 0.5 # weight for intermediate recombination
def generate_intial_population(self):
"""
Generate an initial population with num_children solutions.
Solutions are represented as a tuple with 3 elements. The first element is a
5-dimensional vector representing the control variables. The second element is
a 5-dimensional vector representing variances/step sizes. The third element,
representing 10 rotation angles, is a 5-by-5 skew-symmetric matrix
with all diagonal elements equal to 0.
The population is then a list of tuples.
:return: initial population.
"""
population = []
for i in range(self.num_children):
control_vars = 500*np.random.uniform(-1.0, 1.0, self.num_control_vars)
# Choose small initial values to get PSD covariance matrix post-mutation
stds = 0.01*np.ones(self.num_control_vars)
rot_angles = np.zeros((self.num_control_vars, self.num_control_vars))
population.append([control_vars, stds, rot_angles])
return population
def select_parents(self, children, previous_parents):
"""
Deterministically select parents from children or from children + previous parents,
depending on whether self.elitist is true or not.
:param children: list of solutions (tuples)
:param previous_parents: list of solutions (tuples)
:return: parents
"""
if self.elitist and previous_parents is not None:
population = children + previous_parents
else:
population = children
# Check that all control variables within bounds - store the population indices of
# invalid solutions, for later removal from population
invalid_indices = []
for i in range(len(population)):
control_vars = population[i][0]
if np.any(control_vars > 500) or np.any(control_vars < -500):
invalid_indices.append(i)
# Assess population
# Using map is a little bit faster than appending in for loop
fvals = list(map(self.objective_func, [solution[0] for solution in population]))
sorted_indices = np.argsort(fvals)
# Remove invalid indices from sorted_indices list
for index in invalid_indices:
sorted_indices = list(sorted_indices)
sorted_indices.remove(index)
# Select top num_parents solutions as new parents
parents = []
for i in range(self.num_parents):
parents.append(population[sorted_indices[i]])
num_func_evals = len(fvals)
children_fvals = fvals[:self.num_children]
return parents, num_func_evals, children_fvals
def construct_cov_matrix(self, rotation_angles, stds):
"""
Construct a PSD covariance matrix from rotation angles and standard deviations (for a
single solution).
:param rotation_angles
:param stds
:return: covariance matrix
"""
cov_matrix = np.zeros((self.num_control_vars, self.num_control_vars))
for i in range(self.num_control_vars):
for j in range(self.num_control_vars):
if i == j:
cov_matrix[i, j] = stds[i] ** 2
else:
cov_matrix[i, j] = 0.5 * (stds[i] ** 2 - stds[j] ** 2) * np.tan(
2 * rotation_angles[i, j])
# Ensure that covariance matrix is positive definite by adding eI till all
# eigenvalues > 0
i = 0
epsilon = 0.1
while not np.all(np.linalg.eigvals(cov_matrix) > 0):
cov_matrix = cov_matrix + epsilon*np.identity(self.num_control_vars)
i = i + 1
if i > 30:
epsilon = 1
return cov_matrix
def mutate_stratetgy_params(self, solution):
"""
Mutate strategy parameters using Eqns 3 and 4 from ES handout.
:param solution: tuple, solution[1] and solution[2] are the stds and rot_angles to be
mutated
:return: mutated stds and rot_angles
"""
tau = 1/math.sqrt(2*math.sqrt(self.num_control_vars))
tau_prime = 1/math.sqrt(2*self.num_control_vars)
beta = 0.0873
chi_0 = np.random.randn()
chi_i = np.random.randn(self.num_control_vars)
temp = np.sqrt(2)*np.random.randn(self.num_control_vars, self.num_control_vars)
chi_ij = (temp - temp.T)/2 # skew-symmetric
# multiplying temp by sqrt(2) to make chi_ij ~ N(0,1)
stds = solution[1]
new_stds = np.multiply(stds, np.exp(tau_prime * chi_0 + tau * chi_i))
# For rotation angle matrices, only off-diagonal terms are relevant
rot_angles = solution[2]
new_rot_angles = rot_angles + beta * chi_ij
return new_stds, new_rot_angles
def mutate_solutions(self, parents):
"""
Mutate parents before recombination. First mutate strategy params, then mutate control
variables (Eqns 3, 4, 5, 6 from ES handout).
:param parents:
:return:
"""
mutated_population = []
for solution in parents:
new_stds, new_rot_angles = self.mutate_stratetgy_params(solution)
# Cov_matrix should be symmetric (and rot_angles matrix is skew-symmetric)
cov_matrix = self.construct_cov_matrix(new_rot_angles, new_stds)
n = np.random.multivariate_normal(np.zeros(self.num_control_vars), cov_matrix,
check_valid='warn')
new_control_vars = solution[0] + n
mutated_population.append([new_control_vars, new_stds, new_rot_angles])
return mutated_population
def control_var_discrete_recombination(self, parent_control_vars1, parent_control_vars2):
"""
Discrete recombination of control variables.
:param parent_control_vars1: control variables of 1 of the 2 randomly sampled parents
i.e. list
:param parent_control_vars2: control variables of 1 of the 2 randomly sampled parents
i.e. list
:return: child control variables (list)
"""
# Discrete recombination
cross_points = np.random.rand(self.num_control_vars) < 0.5 # p(cross) = 0.5 (fair coin toss)
child_control_vars = np.where(cross_points, parent_control_vars1, parent_control_vars2)
return child_control_vars
def global_discrete_recombination(self, parents):
"""
Global discrete recombination of control variables and strategy parameters.
:param parents: All num_parents parent from this generation, i.e. list of tuples.
:return: child solution (tuple)
"""
child_control_vars = []
child_stds = []
child_rot_angle = np.zeros((self.num_control_vars, self.num_control_vars))
fixed = np.zeros((self.num_control_vars, self.num_control_vars), dtype=bool)
for i in range(self.num_control_vars):
parent_choice_cv = np.random.randint(0, self.num_parents)
child_control_vars.append(parents[parent_choice_cv][0][i])
parent_choice_std = np.random.randint(0, self.num_parents)
child_stds.append(parents[parent_choice_std][1][i])
for i in range(self.num_control_vars):
for j in range(self.num_control_vars):
if not fixed[i, j]:
parent_choice_rot_angle = np.random.randint(0, self.num_parents)
child_rot_angle[i, j] = parents[parent_choice_rot_angle][2][i, j]
child_rot_angle[j, i] = parents[parent_choice_rot_angle][2][j, i]
fixed[i, j] = True
fixed[j, i] = True
return np.array(child_control_vars), np.array(child_stds), child_rot_angle
def strategy_params_intermediate_recombination(self, parent_strat_params1,
parent_strat_params2):
"""
Intermediate recombination of strategy parameters.
:param parent_strat_params1: strategy params of 1 of 2 randomly sampled parents
(tuple of list and matrix)
:param parent_strat_params2: strategy params of 1 of 2 randomly sampled parents
(tuple of list and matrix)
:return: child strategy params (tuple of list and matrix)
"""
# Intermediate recombination of stds and rotation angles
child_stds = self.recombination_weight * parent_strat_params1[0] + \
(1-self.recombination_weight) * parent_strat_params2[0]
child_rot_angles = self.recombination_weight * parent_strat_params1[1] + \
(1-self.recombination_weight) * parent_strat_params2[1]
return child_stds, child_rot_angles
def strategy_params_discrete_recombination(self, parent_strat_params1,
parent_strat_params2):
"""
Discrete recombination of strategy parameters.
:param parent_strat_params1:strategy params of 1 of 2 randomly sampled parents
(tuple of list and matrix)
:param parent_strat_params2: strategy params of 1 of 2 randomly sampled parents
(tuple of list and matrix)
:return: child strategy params (tuple of list and matrix)
"""
std_cross_points = np.random.rand(self.num_control_vars) < 0.5 # p(cross) = 0.5 (fair coin toss)
child_stds = np.where(std_cross_points, parent_strat_params1[0], parent_strat_params2[0])
temp = np.random.rand(self.num_control_vars, self.num_control_vars)
temp = (temp + temp.T) / 2
rot_angle_cross_points = temp < 0.5
child_rot_angles = np.where(rot_angle_cross_points, parent_strat_params1[1],
parent_strat_params2[1])
return child_stds, child_rot_angles
def recombination(self, parents):
"""
Recombination between parents. Default recombination configuration is discrete
recombination on control variables and intermediate recombination on strategy
parameters.
:param parents: list of tuples, each representing a single parent solution.
:return: children, list of tuples, each representing a single solution,
len = self.num_children
"""
children = []
for i in range(self.num_children):
if self.global_recombination:
# Global discrete recombination of all solution components
child_control_vars, child_stds, child_rot_angles = self.global_discrete_recombination(parents)
children.append([child_control_vars, child_stds, child_rot_angles])
else:
# Randomly sample 2 parents
parent_1 = 0
parent_2 = 0
while parent_1 == parent_2:
(parent_1, parent_2) = np.random.randint(0, self.num_parents, size=2)
# Discrete recombination of control variables
child_control_vars = self.control_var_discrete_recombination(parents[parent_1][0],
parents[parent_2][0])
if self.full_discrete_recombination:
# Discrete recombination of strategy params
child_stds, child_rot_angles = self.strategy_params_discrete_recombination(
parents[parent_1][1:], parents[parent_2][1:])
children.append([child_control_vars, child_stds, child_rot_angles])
else:
# Intermediate recombination of strategy params
child_stds, child_rot_angles = self.strategy_params_intermediate_recombination(
parents[parent_1][1:], parents[parent_2][1:])
children.append([child_control_vars, child_stds, child_rot_angles])
return children
def optimise(self):
"""
Perform evolutionary strategy algorithm.
:return: best setting of control variables, best function value, history of control
variable solutions in each generation, history of fvals in each generation, computation
time for each generation.
"""
children = self.generate_intial_population()
# Store all control variable settings for each generation in history list
children_control_vars_history = [[child[0] for child in children]]
# Store all objective function values for each generation in history list
children_fvals_history = []
previous_parents = None
total_func_evals = 0
best_fval = np.inf
best_control_vars = None
generation_times = []
start = time.time()
while total_func_evals < self.allowed_evals:
# Assess population and select parents
parents, num_func_evals, children_fvals = self.select_parents(children,
previous_parents)
total_func_evals += num_func_evals
children_fvals_history.append(children_fvals)
# If best child solution found in this generation, save it.
# Note that children CAN have control variable values outside [-500, 500], although
# it is ensured that these invalid children are not selected to be parents.
# Need to check to ensure that best child solution is a valid solution.
# parents list is sorted - parents[0] is best child solution in this generation.
best_generation_fval = self.objective_func(parents[0][0])
total_func_evals += 1
if best_generation_fval < best_fval:
if np.all(parents[0][0] < 500) \
and np.all(parents[0][0] > -500):
best_fval = best_generation_fval
best_control_vars = parents[0][0]
# Mutate parents
mutated_parents = self.mutate_solutions(parents)
# Recombination
children = self.recombination(mutated_parents)
previous_parents = mutated_parents.copy()
children_control_vars_history.append([child[0] for child in children])
# Recording times
generation_times.append(time.time() - start)
# Final assessment outside the loop - note: not same structure as Fig 1 in ES handout
final_parents, _, final_children_fvals = self.select_parents(children,
previous_parents)
children_fvals_history.append(final_children_fvals)
best_final_fval = self.objective_func(final_parents[0][0])
if best_final_fval < best_fval:
if np.all(final_parents[0][0] < 500) \
and np.all(final_parents[0][0] > -500):
best_fval = best_final_fval
best_control_vars = final_parents[0][0]
generation_times.append(time.time() - start)
return best_control_vars, best_fval, \
children_control_vars_history, children_fvals_history, generation_times
| true |
fdf2084b20910b1c9903c7ad39f52d601858b794 | Python | dilshod/python-sitemap | /tests/test_urlset.py | UTF-8 | 1,747 | 2.796875 | 3 | [
"BSD-2-Clause"
] | permissive |
import unittest
import os
from urlparse import urlparse
from sitemap import *
class TestUrlSet(unittest.TestCase):
def setUp(self):
self.base = os.path.dirname(os.path.abspath(__file__))
self.fixtures = os.path.join(self.base, 'fixtures')
self.small_sitemap = os.path.join(self.fixtures, 'sitemap.xml')
self.google_sitemap = os.path.join(self.fixtures, 'google-sitemap.xml')
self.large_sitemap = os.path.join(self.fixtures, 'large-sitemap.xml')
def checkContent(self, urlset, expected_count=None):
count = 0
for url in urlset:
count += 1
parts = urlparse(url.loc)
self.assertEquals(parts.netloc, 'www.example.com')
if expected_count is not None:
self.assertEquals(count, expected_count)
def testParseStandardSitemap(self):
urlset = UrlSet.from_file(self.small_sitemap)
self.checkContent(urlset, 4)
def testParseLargeSitemap(self):
urlset = UrlSet.from_file(self.large_sitemap)
self.checkContent(urlset, 1620)
def testParseGoogleSitemap(self):
urlset = UrlSet.from_file(self.google_sitemap, validate=False)
self.checkContent(urlset, 6)
def testParseStandardSitemapAsString(self):
content = open(self.small_sitemap).read()
urlset = UrlSet.from_str(content)
self.checkContent(urlset, 4)
def testCreateContainer(self):
urlset = UrlSet.empty_container()
data = {
'loc' : 'http://www.example.com'
}
for i in range(0,50):
loc = "http://www.example.com/content/%d" % i
urlset.append(UrlSetElement(loc=loc))
self.checkContent(urlset, 50)
| true |
c5cacdd2bd2361330d50a963468c4b18b8445c93 | Python | ravenkls/Blob-Climbers | /main.py | UTF-8 | 6,129 | 2.703125 | 3 | [] | no_license | import pygame
from pygame.locals import *
import os
import entities
import json
import random
pygame.init()
class GridLayout:
def __init__(self, tile_width, tile_height):
self.tile_width = tile_width
self.tile_height = tile_height
self.window_w, self.window_h = pygame.display.get_surface().get_size()
self.grid = []
for y in range(int(self.window_h/self.tile_height)):
row = []
for x in range(int(self.window_w/self.tile_width)):
tile = entities.Block(self.tile_width, self.tile_height, 0, 0)
row.append(tile)
self.grid.append(row)
self.set_grid_positions(self.grid)
def set_dynamic_sprites(self):
for block in filter(lambda x: isinstance(x, entities.DynamicBlock), self.blocks):
block.set_dynamic_sprite(self.grid)
def add_layer(self):
row = [entities.Block(self.tile_width, self.tile_height, 0, 0) for b in range(len(self.grid[0]))]
self.grid.insert(0, row)
def set_grid_positions(self, grid):
grid_height = len(grid)
grid_width = len(grid[0])
top_most_position = self.window_h - grid_height*self.tile_height
for y in range(grid_height):
for x in range(grid_width):
block = grid[y][x]
block.rect.x = x*self.tile_width
block.rect.y = top_most_position + y*self.tile_height
@property
def blocks(self):
return pygame.sprite.Group(*[block for row in self.grid for block in row])
@classmethod
def from_json(cls, filename):
with open(filename, 'r') as json_file:
level = json.load(json_file)
width, height = level['width'], level['height']
references = level['references']
grid_blueprint = level['blueprint']
grid = []
for row_blueprint in grid_blueprint:
row = []
for tile_repr in row_blueprint:
if tile_repr:
tile_type = references[tile_repr[:-1]]
solid = tile_repr[-1] == 's'
row.append(entities.Block.from_image_file(tile_type, 0, 0, solid=solid))
else:
row.append(entities.Block(width, height, 0, 0))
grid.append(row)
gridclass = cls(width, height)
gridclass.grid = grid
gridclass.set_grid_positions(grid)
return gridclass
WINDOW_WIDTH = 640
WINDOW_HEIGHT = 480
SKY_BLUE = (135,206,235)
game_running = True
window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Blob Climbers V2')
clock = pygame.time.Clock()
camera_x = 0
camera_y = 0
camera_sensitivity_x = 150
camera_sensitivity_y = 100
grass_floor_thickness = 10
player = entities.PlayerEntity(spawn=(WINDOW_WIDTH/2, -100))
#grid = GridLayout.from_json('levels/level_1.json')
grid = GridLayout(32, 32)
def generate_platform(block_type, i=0, start_platform_point='auto', max_length=9999):
row = grid.grid[i]
if not any(block.solid for block in row):
if start_platform_point != 'auto':
platform_length = min(random.randint(2, len(grid.grid[i])//2), 19-start_platform_point, max_length)
end_platform_point = start_platform_point + platform_length
else:
platform_length = min(random.randint(2, len(grid.grid[i])//2), max_length)
start_platform_point = random.randint(0, len(grid.grid[i]) - 1 - platform_length)
end_platform_point = start_platform_point + platform_length
grid.grid[i][start_platform_point:end_platform_point] = [block_type() for _ in range(platform_length)]
grid.set_dynamic_sprites()
return start_platform_point, platform_length
next_point = 'auto'
max_next_length = 999
def generate_realistic_platform(i=0):
global next_point
global max_next_length
grid.add_layer()
grid.add_layer()
grid.add_layer()
last_point, length = generate_platform(entities.GrassBlock, i, start_platform_point=next_point, max_length=max_next_length)
next_point = -1
while 0 >= next_point or next_point >= len(grid.grid)-1:
next_point = random.choice([random.randint(last_point-4, last_point-1), random.randint(last_point+1, last_point+4)])
if next_point < last_point:
max_next_length = length + next_point
grid.set_grid_positions(grid.grid)
return last_point, length
for _ in range(grass_floor_thickness):
grid.grid.append([entities.GrassBlock() for _ in range(len(grid.grid[0]))])
grid.set_dynamic_sprites()
for i in range(0, len(grid.grid), 3):
generate_platform(entities.GrassBlock, i)
for _ in range(20):
generate_realistic_platform()
def load_all_sprites():
all_entities = pygame.sprite.Group(player)
blocks = grid.blocks
all_sprites = pygame.sprite.Group(blocks, all_entities)
return all_entities, blocks, all_sprites
all_entities, blocks, all_sprites = load_all_sprites()
grid.set_grid_positions(grid.grid)
keys_pressed = []
while game_running:
for event in pygame.event.get():
if event.type == QUIT:
game_running = False
elif event.type == KEYDOWN:
keys_pressed.append(event.key)
elif event.type == KEYUP:
if event.key == K_RIGHT or event.key == K_LEFT:
player.stop_moving()
keys_pressed.remove(event.key)
for key in keys_pressed:
if key == K_RIGHT:
player.move_right(2)
elif key == K_LEFT:
player.move_left(2)
elif key == K_UP:
player.jump(4)
pygame.display.update()
window.fill(SKY_BLUE)
if player.rect.left < camera_sensitivity_x: # too left
camera_x += camera_sensitivity_x - player.rect.left
elif player.rect.right > WINDOW_WIDTH - camera_sensitivity_x: # too right
camera_x -= camera_sensitivity_x - (WINDOW_WIDTH - player.rect.right)
else:
camera_x = 0
if player.rect.top < camera_sensitivity_y: # too high
camera_y += camera_sensitivity_y - player.rect.top
elif player.rect.bottom > WINDOW_HEIGHT - camera_sensitivity_y: # too low
camera_y -= camera_sensitivity_y - (WINDOW_HEIGHT - player.rect.bottom)
else:
camera_y = 0
platform_removed = False
for sprite in all_sprites:
# Process All sprites
sprite.rect.x += camera_x
sprite.rect.y += camera_y
if sprite.rect.top > WINDOW_HEIGHT:
sprite.image.fill((0,0,0))
sprite.image.set_colorkey((0,0,0))
sprite.solid = False
all_sprites.remove(sprite)
platform_removed = True
if platform_removed:
generate_realistic_platform()
blocks.update()
all_entities.update()
player.check_collisions(blocks)
all_entities.draw(window)
blocks.draw(window)
clock.tick(120)
pygame.quit()
| true |
775f424d4ad8b46a74074dc65561fadfbb2eca4c | Python | elpiankova/oxwall_tests | /oxwall_helper.py | UTF-8 | 2,959 | 2.53125 | 3 | [] | no_license | from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from custom_wait_conditions import presence_of_N_elements_located
from page_objects.locators import InternalPagesLocators, SignInPageLocators
class OxwallApp:
""" Class that aggregate actions on Oxwall site """
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 3)
self.actions = ActionChains(self.driver)
def sign_in(self, user):
"""
Sign in with user
parameters: user as a dictionary object
"""
el = self.driver.find_element(*InternalPagesLocators.SIGN_IN_MENU)
el.click()
# el_submit = dr.find_element_by_id("input_iqaqewaj")
el_name = self.driver.find_element(*SignInPageLocators.USERNAME_FIELD)
el_name.clear()
el_name.send_keys(user["username"])
el_pass = self.driver.find_element(*SignInPageLocators.PASSWORD_FIELD)
el_pass.clear()
el_pass.send_keys(user["password"])
el_confirm = self.driver.find_element(*SignInPageLocators.SIGN_IN_BUTTON)
el_confirm.click()
wait = WebDriverWait(self.driver, 3)
wait.until(ec.presence_of_element_located(InternalPagesLocators.DASHBOARD_MENU))
def sign_out(self):
# sign out
el_profile = self.driver.find_element_by_css_selector(".ow_console_item.ow_console_dropdown.ow_console_dropdown_hover")
self.actions.move_to_element(el_profile)
self.actions.perform()
el_sign_out = self.driver.find_elements_by_css_selector("li.ow_dropdown_menu_item.ow_cursor_pointer")
self.actions.move_to_element(el_sign_out[5])
self.actions.click(el_sign_out[5])
self.actions.perform()
def wait_new_post_appear(self, number_of_posts_before):
# Wait new post appears
expected_amount = number_of_posts_before + 1
el_posts_new = self.wait.until(
presence_of_N_elements_located((By.XPATH, "//li[contains(@id, 'action-feed')]"), expected_amount),
message=f"Amount of posts is not {expected_amount}")
el_post_text = el_posts_new[0].find_element(By.CLASS_NAME, "ow_newsfeed_content")
return el_post_text
def create_new_post(self, input_text):
# Create new post
el_post = self.driver.find_element_by_name("status")
el_post.send_keys(input_text)
el_send = self.wait.until(ec.element_to_be_clickable((By.NAME, "save")),
message="Clickable SEND button is not found")
el_send.click()
def count_posts(self):
# Amount of posts
el_posts = self.driver.find_elements_by_xpath("//li[contains(@id, 'action-feed')]")
number_of_posts_before = len(el_posts)
return number_of_posts_before
| true |
4ebbe5aa44f9c4de69b8639ac1418dfdabde7fa7 | Python | miguepoloc/Mercury | /test/tiraled_test.py | UTF-8 | 813 | 3.609375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
#Librería de tiempo
import time
#Librería de los GPIO
import RPi.GPIO as GPIO
#Variable que controla el ciclo infinito de la vizualización de la tira led
tira_led = True
#Ponemos los pines en modo Board
GPIO.setmode(GPIO.BCM)
red = 25
green = 10
blue = 9
colores = ("000", "011", "001", "101", "100", "110", "010")
GPIO.setup(red, GPIO.OUT)
GPIO.setup(green, GPIO.OUT)
GPIO.setup(blue, GPIO.OUT)
def color(R, G, B):
"""Color"""
GPIO.output(red, R)
GPIO.output(green, G)
GPIO.output(blue, B)
print(" R G B\n--------------")
# Main loop
try:
while tira_led:
for todo in colores:
print ((todo[0], todo[1], todo[2]))
color(int(todo[0]), int(todo[1]), int(todo[2]))
time.sleep(2)
except KeyboardInterrupt:
tira_led = False
print ("Fin")
finally:
GPIO.cleanup() | true |
a511de44bbf6862bb7672f66f4515f71b5b46ecc | Python | diavy/twitter-science | /scripts/track_retweet_relation.py | UTF-8 | 4,334 | 2.65625 | 3 | [] | no_license | #! /usr/bin/python
# coding: utf-8
#######################################################################################################build up retweeting relation#############################################################################################################
from parse_tweet import *
import sys, os
from extract_user_relation import *
import networkx as nx
####################################################
class RetweetGraph:
"""retweet graph"""
def __init__(self, graph_file):
f = open(graph_file, 'r')
G = nx.read_gpickle(f)
f.close()
self.graph = G
def export_to_format(self, format_file, format="csv"):
G = self.graph
if format == 'csv':
nx.write_edgelist(G, format_file, data=False)
def find_largest_component(self):
G = self.graph
list_Graphs = nx.weakly_connected_component_subgraphs(G)
max_component = list_Graphs[0]
for g in list_Graphs:
if nx.number_of_nodes(g) > nx.number_of_nodes(max_component):
max_component = g
return max_component
#def draw_graph(self, use_largest=True):
# G = self.graph
def convert_graph_to_csv(G, cvs_file):
write_lines = []
for n, nbrs in G.adjacency_iter():
for nbr in nbrs.keys():
newline = n + ',' + nbr + '\n'
write_lines.append(newline)
outfile = open(cvs_file, 'w')
outfile.writelines(write_lines)
outfile.close()
def add_edge_to_graph(G, tweet, method='retweet'):
#t = Tweet(tweet)
#author = t.user
#author = 'a'
relation_users = []
if method == 'retweet':
relation_users = extract_retweet_users(tweet, use_meta=True)
if relation_users:###find retweet users#####
nodes = relation_users
#if len(nodes) < 2:
# return
for i in range(len(nodes) - 1):
node1 = nodes[i]
node2 = nodes[i+1]
if node1 == node2 or (node1, node2) in G.edges():##does not allow reverse propagation or self propagation
continue
try:
G[node2][node1]['weight'] += 1
except KeyError:
G.add_edge(node2, node1, weight=1)
def gen_retweet_graph(tweet_file, graph_dir, tweet_type='arxiv', use_id=None):
"""generate a retweet graph"""
infile = open(tweet_file, 'r')
if not use_id:
G = nx.DiGraph()
index = 0
for line in infile:
content = line.rstrip('\n').split('\t')
id = content[0]
freq = int(content[1])
tweets = content[2].split('&&&')
if not use_id:#####generate all retweet network##########
for tweet in tweets:
try:
add_edge_to_graph(G, tweet, method='retweet')
except:
print tweet
exit(0)
elif use_id == 'total':
if freq < 5:####filter low mentioned ids
continue
print 'current id:', id
G = nx.DiGraph()
for tweet in tweets:
add_edge_to_graph(G, tweet, method='retweet')
gpath = os.path.join(graph_dir, id + '.gpickle')
nx.write_gpickle(G, gpath)
#csv_file = os.path.join(graph_dir, tweet_type + '.csv')
#mG = g.find_largest_component()
#nx.write_edgelist(G, csv_file, data=False)
else:
if id == use_id:
G = nx.DiGraph()
for tweet in tweets:
add_edge_to_graph(G, tweet, method='retweet')
gpath = os.path.join(graph_dir, id + '.gpickle')
nx.write_gpickle(G, gpath)
index += 1
if not (index % 1000):
print index
if not use_id:
gpath = os.path.join(graph_dir, tweet_type + '.gpickle')
nx.write_gpickle(G, gpath)
print len(G.nodes())
print len(G.edges())
def main():
tweet_type = parse_args()
tweet_file = os.path.join(dat_dir, tweet_type, 'id_tweets.txt')
graph_dir = os.path.join(dat_dir, tweet_type, 'retweet_graphs')
use_id = 'total'
gen_retweet_graph(tweet_file, graph_dir, tweet_type, use_id)
for r, d, files in os.walk(graph_dir):
break
for f in files:
if '.gpickle' in f:
gpath = os.path.join(r, f)
g = RetweetGraph(gpath)
csv_file = os.path.join(graph_dir, f.rstrip('.gpickle') + '.csv')
#G = g.find_largest_component()
#nx.write_edgelist(G, csv_file, data=False)
g.export_to_format(csv_file)
if __name__ == "__main__":
main()
| true |
a61c24b094d021586225d5f11967c2cea3b2d954 | Python | jin14/CS3245 | /HW3/index.py | UTF-8 | 3,635 | 2.71875 | 3 | [] | no_license | from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords as sw
import os
import string
import nltk
from collections import OrderedDict
import json
import math
from util import tf,L2norm
import sys
import getopt
import time
stemmer = PorterStemmer()
def make_dictionary(directory,dictionary_file,postings_file):
filepaths = os.listdir(directory) # get all the directories
results = {}
ids = []
for filepath in sorted(filepaths,key=int):
ids.append(int(filepath))
with open(directory+filepath,'r') as file:
for line in file:
sent = [nltk.word_tokenize(tokens) for tokens in nltk.sent_tokenize(line)] #sent tokenisation and word tokenisation
for tokens in sent:
for token in tokens:
token = stemmer.stem(token.lower())
if token not in results:
results[token]= {'postings':{filepath:1}} # add in the docid and the natural term frequency
elif filepath not in results[token]['postings']:
results[token]['postings'][filepath] = 1
else:
results[token]['postings'][filepath] +=1
length = [] # store all the log frequency of the terms in a document
for token in results:
if filepath in results[token]['postings']:
tfreq = tf(results[token]['postings'][filepath]) # convert all the natural term frequency to logarithmic term frequency
results[token]['postings'][filepath] = tfreq
length.append(tfreq)
for token in results: # convert all the log term frequency into normalised term frequency, lnc
if filepath in results[token]['postings']:
results[token]['postings'][filepath] = results[token]['postings'][filepath]/L2norm(length)
#get all the idfs for all the terms which will be used in calculating the tfidf value of the query terms
for token in results:
results[token]['idf'] = math.log10(len(filepaths)/len(results[token]['postings']))
dictionary = open(dictionary_file,'w')
postings = open(postings_file,'w')
new = {}
for term,info in results.items():
start = postings.tell()
line = ''
for docid,termf in info['postings'].items():
line+=docid + ',' + str(termf) +' '
postings.write(line)
new[term]={'s':start,'l':len(line),'i':info['idf']}
json.dump(new,dictionary)
dictionary.close()
postings.close()
def usage():
print ("usage: " + sys.argv[0] + " -i directory-of-documents -d dictionary-file -p postings-file")
directory_of_documents = dictionary_file = postings_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')
except (getopt.GetoptError, err):
usage()
sys.exit(2)
for o, a in opts:
if o == '-i':
directory_of_documents = a
elif o == '-d':
dictionary_file = a
elif o == '-p':
postings_file = a
else:
assert False, "unhandled option"
if directory_of_documents == None or dictionary_file == None or postings_file == None:
usage()
sys.exit(2)
start = time.time()
print("Creating index...")
make_dictionary(directory_of_documents, dictionary_file, postings_file)
end = time.time()
print("Time taken to index: " + str(end-start))
| true |
04718b8597c25ac5573b943cf66bd5e474e461c2 | Python | MatteRubbiani/peach-protein | /Models/WeightModel.py | UTF-8 | 2,196 | 2.859375 | 3 | [] | no_license | import operator
from db import db
from Models.SheetModel import SheetModel
from Models.ExerciseModel import ExerciseModel
import time
class WeightModel(db.Model):
__table_name__ = "weights"
id = db.Column(db.Integer, primary_key=True)
exercise_id = db.Column(db.Integer)
weight = db.Column(db.Integer)
unit = db.Column(db.String(10))
date = db.Column(db.Integer)
def __init__(self, exercise_id, weight, unit):
self.id = None
self.exercise_id = exercise_id
self.weight = weight
self.unit = unit
self.date = int(time.time())
@classmethod
def find_by_id(cls, id):
return WeightModel.query.filter_by(id=id).first()
@classmethod
def find_lasts_by_sheet_id(cls, sheet_id):
exercises = [i for i in ExerciseModel.find_by_sheet_id(sheet_id)]
weights = []
for ex in exercises:
weights += [i for i in WeightModel.find_by_exercise_id(ex.id)]
weights = reversed(sorted(weights, key=operator.attrgetter("date")))
exercise_ids_used = []
lasts = []
for w in weights:
if not w.exercise_id in exercise_ids_used:
lasts.append(w)
exercise_ids_used.append(w.exercise_id)
return lasts
@classmethod
def find_by_workout_id(cls, workout_id):
sheets = [i for i in SheetModel.find_by_workout_id(workout_id)]
exercises = []
for sheet in sheets:
exercises+=[i for i in ExerciseModel.find_by_sheet_id(sheet.id)]
weights = []
for ex in exercises:
weights += [i for i in WeightModel.find_by_exercise_id(ex.id)]
return weights
@classmethod
def find_by_exercise_id(cls, exercise_id):
return WeightModel.query.filter_by(exercise_id=exercise_id)
@classmethod
def find_all(cls):
return WeightModel.query.filter_by()
@classmethod
def delete_all(cls):
for i in WeightModel.query:
i.delete_from_db()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit() | true |
ca794f355aa7c9da8e9a5f8feea93370544e2cea | Python | PatForAll/training_python | /basics/10-regular_expressions.py | UTF-8 | 2,209 | 3.578125 | 4 | [] | no_license | import re
## RE MATCHES
# patterns = ['term1','term2']
#
# text = 'This is a string with term1, not the other!'
# print('Text to be searched: "' + text + '"')
#
# for pattern in patterns:
# print("I'm searching for: " + pattern)
#
# if re.search(pattern, text):
# print('MATCH!')
# else:
# print('NO MATCH!')
#
# match = re.search('term1',text)
# print(type(match))
# print(match.start())
## RE SPLITS
# split_term = '@'
# email = 'user@gmail.com'
#
# print(re.split(split_term,email))
## RE FIND
# print(re.findall('match','test phrase match in middle'))
# print(re.findall('match','match will match twice here'))
## MULTI RE FIND
def multi_re_find(patterns,phrase):
for pattern in patterns:
print('Searching for pattern {}'.format(pattern))
print(re.findall(pattern,phrase))
print('\n')
# test_phrase = 'sdsd..sssddd..sdddsddd...dsds..dsssss..sddddd'
#
# test_patterns1 = ['sd*']
# test_patterns2 = ['sd+']
# test_patterns3 = ['sd{3}']
# test_patterns4 = ['sd{1,3}']
# test_patterns5 = ['s[sd]+']
#
# multi_re_find(test_patterns1,test_phrase)
# multi_re_find(test_patterns2,test_phrase)
# multi_re_find(test_patterns3,test_phrase)
# multi_re_find(test_patterns4,test_phrase)
# multi_re_find(test_patterns5,test_phrase)
test_phrase1 = 'This is a string! But it has punctuation. How can we remove it?'
test_phrase2 = 'This is a string with numbers 12312 and a symbol #hashtag'
test_patterns1 = ['[^!.?]+'] # punctuation
test_patterns2 = ['[a-z]+'] # lower case letters
test_patterns3 = ['[A-Z]+'] # capital letters
test_patterns4 = [r'\d+'] # digits
test_patterns5 = [r'\D+'] # non-digits
test_patterns6 = [r'\s+'] # white space
test_patterns7 = [r'\S+'] # non-white space
test_patterns8 = [r'\w+'] # alphanumerical
test_patterns9 = [r'\W+'] # non-alphanumerical
multi_re_find(test_patterns1,test_phrase1)
multi_re_find(test_patterns2,test_phrase1)
multi_re_find(test_patterns3,test_phrase1)
multi_re_find(test_patterns4,test_phrase2)
multi_re_find(test_patterns5,test_phrase2)
multi_re_find(test_patterns6,test_phrase2)
multi_re_find(test_patterns7,test_phrase2)
multi_re_find(test_patterns8,test_phrase2)
multi_re_find(test_patterns9,test_phrase2)
| true |
55e8afa503f765657086d7849e72b700b520db6a | Python | matheus-bernat/Visual1ze | /server/dbtest.py | UTF-8 | 4,880 | 2.703125 | 3 | [
"MIT"
] | permissive | import unittest
from datetime import datetime
from visualize import create_app
from visualize.models import *
app = create_app()
from visualize.models import db # database is created in models module
app.app_context().push() # make all test use this context
db.drop_all() # make sure first test starts with empty database
class TestDbModels(unittest.TestCase):
"""Tests that all database models behave as expected."""
def setUp(self):
"""Rebuild all tables before test"""
db.create_all()
def tearDown(self):
"""Drop all tables before test"""
db.session.rollback()
db.drop_all()
def test_no_readers(self):
"""Test the database is empty at start of test."""
self.assertEqual(len(Reader.query.all()), 0, "Expected database to be empty after droping tables.")
def test_create_reader(self):
user = Reader("fakeuser@fakesite.com", "password", "name", "surname")
db.session.add(user)
db.session.commit()
self.assertEqual(len(Reader.query.all()), 1, "Expected database to have reader after commit.")
def test_still_no_readers(self):
"""Another test to make sure database is always empty, and the first test is not only succeding because it ran first."""
self.assertEqual(len(Reader.query.all()), 0, "Expected database to be empty in third test.")
def test_reader_inheritance(self):
"""Add one user with each role and make sure they appear in a query"""
reader = Reader("fakeuser@fakesite.com", "password", "name", "surname")
approver = Approver("fakeuser2@fakesite.com", "password2", "name", "surname")
admin = Admin("fakeuser3@fakesite.com", "password3", "name", "surname")
db.session.add(reader)
db.session.add(approver)
db.session.add(admin)
db.session.commit()
self.assertEqual(len(Reader.query.all()), 3, "Expected database to have 3 readers after commit.")
self.assertEqual(len(Approver.query.all()), 2, "Expected database to have 2 approvers after commit.")
self.assertEqual(len(Admin.query.all()), 1, "Expected database to have 1 admin after commit.")
def test_add_card_reader(self):
"""Test adding two rooms and adding a connection"""
room_a = Room(name="A", text_id="A")
room_b = Room(name="B", text_id="B")
db.session.add(room_a)
db.session.add(room_b)
db.session.commit()
self.assertEqual(len(Room.query.all()), 2, "Expected database to have 2 rooms after commit.")
card_reader = CardReader(room_a=room_a, room_b=room_b)
db.session.add(card_reader)
db.session.commit()
self.assertEqual(len(CardReader.query.all()), 1, "Expected database to have 1 card reader after commit.")
def test_responsible_for_room(self):
"""Test the relation between """
approver = Approver("fakeuser2@fakesite.com", "password2", "name", "surname")
room1 = Room(name="A", text_id="A")
room2 = Room(name="B", text_id="B")
room3 = Room(name="C", text_id="C")
db.session.add(approver)
db.session.add(room1)
db.session.add(room2)
db.session.add(room3)
db.session.commit()
responsible_for_room1 = ResponsibleForRoom(approver, room1, 1)
responsible_for_room2 = ResponsibleForRoom(approver, room2, 2)
responsible_for_room3 = ResponsibleForRoom(approver, room3, 3)
db.session.add(approver)
db.session.add(room1)
db.session.add(room2)
db.session.add(room3)
db.session.add(responsible_for_room1)
db.session.add(responsible_for_room2)
db.session.add(responsible_for_room3)
db.session.commit()
found_relation = ResponsibleForRoom.query.filter_by(room=room2).first()
self.assertIsNotNone(found_relation)
self.assertEqual(found_relation.room, room2)
self.assertEqual(found_relation.approver, approver)
found_relations = ResponsibleForRoom.query.filter_by(approver=approver).all()
self.assertEqual(len(found_relations), 3)
self.assertTrue(room1 in [found_relation.room for found_relation in found_relations])
self.assertTrue(room2 in [found_relation.room for found_relation in found_relations])
self.assertTrue(room3 in [found_relation.room for found_relation in found_relations])
def test_has_access_to(self):
"""Test allowing one way passage between rooms for a reader"""
reader = Reader("fakeuser@fakesite.com", "password", "name", "surname")
room1 = Room(name="A", text_id="A")
room2 = Room(name="B", text_id="B")
db.session.add(reader)
db.session.add(room1)
db.session.add(room2)
db.session.commit()
card_reader_ab = CardReader(room_a=room1, room_b=room2)
card_reader_ba = CardReader(room_a=room2, room_b=room1)
db.session.add(card_reader_ab)
db.session.add(card_reader_ba)
db.session.commit()
allow_ab = HasAccessTo(reader=reader, card_reader=card_reader_ab, expiration_datetime=datetime.today())
db.session.add(allow_ab)
db.session.commit()
res = CardReader.query.join(HasAccessTo, HasAccessTo.card_reader_id == CardReader.id).filter_by(
reader_id=reader.id).all()
self.assertEqual(len(res), 1)
self.assertEqual(res[0], card_reader_ab)
if __name__ == '__main__':
unittest.main()
| true |
373a1a96cfe07abf17783489b46e08c25294e375 | Python | cp-helsinge/2020-attack | /common/dashboard.py | UTF-8 | 3,069 | 3.328125 | 3 | [] | permissive | """============================================================================
Dashboard
Show game and player status
============================================================================"""
import pygame
from common import globals
from game_objects import setting
class Dashboard:
def __init__(self):
self.rect = pygame.Rect(setting.dashboard_rectangle)
# Create a background image
self.background = pygame.Surface((self.rect.width, self.rect.height))
# Paint background
self.background.fill(setting.dashboard_background_color)
# Paint health in the center of the dashboard
w = self.rect.width // 5 # Width
h = self.rect.height // 2 # Height
cx = self.rect.width // 2 # Center x
cy = h # Center y
bw = 2 # border line width
# Draw health bar background
pygame.draw.polygon(
self.background,
setting.dashboard_color,
(
(cx - w//2, cy - h//2), # Top left
(cx + w//2, cy - h//2), # Top right
(cx + w//2 + 3 * bw , cy ), # middel right
(cx + w//2, cy + h//2), # bottom right
(cx - w//2, cy + h//2), # Bottom left
(cx - w//2 - 3 * bw , cy ), # middel right
(cx - w//2, cy - h//2), # Top left
)
)
pygame.draw.polygon(
self.background,
(0,0,0),
(
(cx - w//2 + bw, cy - h//2 + bw ), # Top left
(cx + w//2 - bw, cy - h//2 + bw ), # Top right
(cx + w//2 + bw , cy ), # middel right
(cx + w//2 - bw, cy + h//2 - bw ), # bottom right
(cx - w//2 + bw, cy + h//2 - bw ), # Bottom left
(cx - w//2 - bw , cy ), # middel right
(cx - w//2 + bw, cy - h//2 + bw ), # Top left
)
)
self.health_bar_rect = pygame.Rect((cx - w//2 + 2* bw, cy - h//2 - 2 * bw, w - 4 * bw, h - 4 * bw) )
self.health_bar_rect.center = self.rect.center
# Create a font
self.font = pygame.font.Font(
setting.dashboard_font,
setting.dashboard_font_size
)
def draw(self, surface = False ):
if( not surface ):
surface = globals.game.window
surface.blit( self.background, self.rect )
# Paint Score in left middle of dashboard
text = self.font.render(
" Score: " + str(globals.game.score),
True,
setting.dashboard_color
)
text_rect = text.get_rect()
text_rect.midleft = self.rect.midleft
surface.blit( text, text_rect )
# Paint health in the center of the dashboard
if globals.player.health > 0:
hb_rect = pygame.Rect(self.health_bar_rect)
hb_rect.width = (self.health_bar_rect.width * min(globals.player.health, 100)) // 100
pygame.draw.rect(surface,(200,0,0),hb_rect)
# Paint level middle right of dashboard
text = self.font.render(
"Level: " + str(globals.game.level) +" ",
True,
setting.dashboard_color
)
text_rect = text.get_rect()
text_rect.midright = self.rect.midright
surface.blit( text, text_rect )
| true |
6fa52af369ee4838e13a613512e941b874b1ec1a | Python | wangjcStrive/PYLeetCode | /LongestSubstringWithoutRepeatingCharacters.py | UTF-8 | 713 | 3.546875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 5/22/2018 9:02 AM
# @FileName: LongestSubstringWithoutRepeatingCharacters.py
# Info: LCode 3.
# solution: Hash
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
head = 0
dic = {}
res = 0
for i in range(len(s)):
if s[i] in dic and dic[s[i]] >= head:
res = max(res, i-head)
head = dic[s[i]]+1
dic[s[i]] = i
return max(res, len(s)-head)
if __name__ == '__main__':
input = ['aa', 'abcabcbb', 'pwwkew']
myInstance = Solution()
for x in input:
print(myInstance.lengthOfLongestSubstring(x)) | true |