blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
be984359813ff69a89cf37ca1c54303d1437c4e2 | Python | cgiroux86/leetcode- | /reverseParentheses.py | UTF-8 | 595 | 3.28125 | 3 | [] | no_license | class Solution:
def reverseParentheses(self, s: str) -> str:
res = ''
stack = []
for char in s:
if char == "(":
stack.append([])
elif char == ")":
if len(stack) == 1:
res += "".join(stack.pop()[::-1])
continue
word = stack[-1][::-1]
stack.pop()
stack[-1] += word
else:
if stack:
stack[-1].append(char)
else:
res += char
return res
| true |
69cc87cdedff5a6dbf19ef1d3b8f51ea1158c2b4 | Python | webclinic017/sagetrader_api | /mspt/apps/users/crud.py | UTF-8 | 1,580 | 2.53125 | 3 | [] | no_license | from typing import Optional
from sqlalchemy.orm import Session
from mspt.apps.users import models
from mspt.apps.users import schemas
from mspt.settings.security import verify_password, get_password_hash
from mspt.apps.mixins.crud import CRUDMIXIN
class CRUDUser(CRUDMIXIN[models.User, schemas.UserCreate, schemas.UserUpdate]):
def get_by_email(self, db_session: Session, *, email: str) -> Optional[models.User]:
result = db_session.query(models.User).filter(models.User.email == email).first()
return result
def create(self, db_session: Session, *, obj_in: schemas.UserCreate) -> models.User:
db_obj = models.User(
first_name=obj_in.first_name,
last_name=obj_in.last_name,
email=obj_in.email,
hashed_password=get_password_hash(obj_in.password),
is_superuser=obj_in.is_superuser,
)
db_session.add(db_obj)
db_session.commit()
db_session.refresh(db_obj)
return db_obj
def authenticate(
self, db_session: Session, *, email: str, password: str
) -> Optional[models.User]:
user = self.get_by_email(db_session, email=email)
if not user:
return None
if not verify_password(password, user.hashed_password):
return None
return user
def is_active(self, user: models.User) -> bool:
return user.is_active
def is_superuser(self, user: models.User) -> bool:
return user.is_superuser
user = CRUDUser(models.User)
| true |
f84081aeef5fdae433e2bbc77b0d5b699b7035c4 | Python | Ford-z/Nowcoder | /天弃之子.py | UTF-8 | 1,024 | 3.46875 | 3 | [] | no_license | #作者:一只酷酷熊
#链接:https://www.nowcoder.com/discuss/612463
#来源:牛客网
#题意
#游戏共有 nn 关,每一关有 a_i个按钮,其中只有一个可以过关,选择错误就会重新开始
#玩家可以通过试错记住正确的按钮
#问玩家运气最差时(每一关都要试 a_i次才过关)共需要按多少次按钮才能通关。
#分析
#这道题最重要的环节就是读懂题目
#从题意中分析出【每一关都要试 a_i次】之后就比较容易了
#对于每一关来说,都要进行 a_i - 1a次失败
#每次失败要先通过前面的 i - 1关,再算上当前这关,需要按 ii 次按钮
#所以往答案里累加 (a_i−1)⋅i
#再算上最终成功的那次,通过 nn 关需要按 nn 次按钮即可
class Solution:
def findMaxButtons(self , buttons ):
# write code here
dp=[0]*len(buttons)
dp[0]=buttons[0]
for i in range(1,len(buttons)):
dp[i]=1+(buttons[i]-1)*(i+1)
return sum(dp)
| true |
047970cbc89ffb4bc43549e72f2f2853b8aa765b | Python | harsha444/toppr_training | /day_wise_work_done/22nd_june/python_prac/multiple_inheritance.py | UTF-8 | 720 | 4.1875 | 4 | [] | no_license | class Aquatic:
def __init__(self, name):
self.name = name
def swim(self):
print(self.name + " is swimming")
def greet(self):
print(self.name + " from Sea")
class Amulatory:
def __init__(self, name):
self.name = name
def walk(self):
print(self.name + " is walking")
def greet(self):
print(self.name + " from land")
# When there is a multiple inheritance, the class that you pass first gets higher priority
# So. pingu.greet() gives a result of pingu from sea since we passed Aquatic first
class Penguin(Aquatic, Amulatory):
def __init__(self, name):
super().__init__(name)
pingu = Penguin("pingu")
print(pingu.name)
pingu.greet()
| true |
28d22c2b07ab128d30bfbf99e0a727a97c89a635 | Python | zephod-exodius/pysecuritycenterdevelop | /examples/sc4/lce_wmi_tuner/wmi_config_gen.py | UTF-8 | 4,330 | 2.640625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
import securitycenter
import os
from ConfigParser import ConfigParser
# Main configuration template
conf_tpl = '''
options = {
log-directory {LOG_DIR}
lce-server {LCE_HOST} {
client-auth auth-secret-key {LCE_AUTH_KEY}
}
server-port {LCE_PORT}
{SYSLOG_SERVERS}
heartbeat-frequency {HEARTBEAT}
statistics-frequency {STATISTICS}
{DEBUG}
{WMI_HOSTS}
}
'''
# WMI host configuration Template
wmi_tpl = '''
WMI-host {
address {WMI_ADDRESS}
{DOMAIN}
username {USERNAME}
password {PASSWORD}
monitor {MONITOR}
}
'''
# First thing we need to do is open the config file.
conf = ConfigParser()
conf.read(os.path.join(os.path.dirname(__file__), 'wmi_config_gen.conf'))
# Then we will build the base config off of our own config file.
wmi = conf_tpl
wmi = wmi.replace('{LOG_DIR}', conf.get('LCE Settings', 'log_directory'))
wmi = wmi.replace('{LCE_HOST}', conf.get('LCE Settings', 'lce_server'))
wmi = wmi.replace('{LCE_AUTH_KEY}', conf.get('LCE Settings', 'lce_auth_key'))
wmi = wmi.replace('{LCE_PORT}', conf.get('LCE Settings', 'lce_server_port'))
wmi = wmi.replace('{HEARTBEAT}', conf.get('LCE Settings', 'heartbeat'))
wmi = wmi.replace('{STATISTICS}', conf.get('LCE Settings', 'stats'))
# Here we will parse out all of the sysog servers specified and add the needed
# entries for them into the wmi_monitor.conf template.
syslog_servers = conf.get('LCE Settings', 'syslog').split(',')
syslog_entries = []
for syslog in syslog_servers:
syslog = syslog.strip()
if syslog is not '':
syslog_entries.append('syslog-server %s' % syslog)
wmi = wmi.replace('{SYSLOG_SERVERS}', '\n'.join(syslog_entries))
# Next we check to see if debug is turned on, if so, then we need to add that
# entry.
if conf.getboolean('LCE Settings', 'debug'):
wmi = wmi.replace('{DEBUG}', 'debug')
else:
wmi = wmi.replace('{DEBUG}', '')
# Now that we got all of the base configuration stuff out of the way, its time
# to poll Security Center for the IPs in the asset list that we have been told
# to talk to and build the wmi hosts based off of that.
sc4 = securitycenter.SecurityCenter4(conf.get('SC4 Settings', 'host'),
port=conf.get('SC4 Settings', 'port'))
sc4.login(conf.get('SC4 Settings', 'user'), conf.get('SC4 Settings', 'pass'))
assets = sc4.assets()['response']['assets']
# Here we are querying SC4 for the IPs in the asset list then reformatting the
# information into a simple list.
ip_list = []
for asset in assets:
if asset['name'] == conf.get('SC4 Settings', 'asset'):
ip_list = sc4.vuln_search(tool='sumip',
startOffset=0,
endOffset=100000,
sourceType='cumulative',
filters=[{
'filterName': 'assetID',
'value': 6,
'operator': '='
}])['response']['results']
# This way didnt work so well. SC4 liked to glob data together in a way we
# cant use without a lot of extra parsing.
# for item in sc4.asset_get_ips(asset['id'])['response']['viewableIPs']:
# for address in item['ipList'].split('\n'):
# ip_list.append(address)
# A simple catch incase we get an empty dataset.
if len(ip_list) < 1:
exit()
# And now for the magic. We are going to build a template for each host we
# queried and then join them together into the config file.
hosts = []
for ip in ip_list:
domain = ''
host = wmi_tpl
host = host.replace('{WMI_ADDRESS}', ip['ip'])
host = host.replace('{USERNAME}', conf.get('WMI Settings', 'user'))
host = host.replace('{PASSWORD}', conf.get('WMI Settings', 'pass'))
host = host.replace('{MONITOR}', conf.get('WMI Settings', 'monitor'))
if conf.get('WMI Settings', 'domain') != '':
domain = 'domain %s' % conf.get('WMI Settings', 'domain')
host = host.replace('{DOMAIN}', domain)
hosts.append(host)
wmi = wmi.replace('{WMI_HOSTS}', '\n'.join(hosts))
# And lastly, we write the config to a file.
wmi_file = open('wmi_monitor.conf', 'w')
wmi_file.write(wmi)
wmi_file.close() | true |
7ec96bb2bbfd69ae53fb3f3ba8462a852468be99 | Python | abhishekgupta5/loktra_task | /crawler/crawl.py | UTF-8 | 3,171 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/python3
#Standard library import
import sys
#Third party imports
import requests
from bs4 import BeautifulSoup as bs
class CrawlIt(object):
#For 1st query(total number of results for a given keyword). Argument- kw:keyword
def query_one(self, kw):
#URL construction
kw = '+'.join(kw.split('_'))
baseurl = "http://www.shopping.com/products?KW={}"
url = baseurl.format(kw)
print("Crawling into www.shopping.com for query one...")
#Getting result of page request
page = requests.get(url)
#Check if request is successful
if (page.status_code != 200):
print("Invalid page request")
sys.exit()
#Creating BeautifulSoup parse tree
soup = bs(page.text, 'html.parser')
#Extracting number of results from CSS class selector
get_results = soup.find_all(class_="numTotalResults")[0].text
#Parsing actual result from raw data
return int(get_results.split()[-1])
#For 2nd query(to find all results for a given keywords on a specified page). Arguments- kw:keyword, pn:page_number
def query_two(self, kw, pn):
#URL construction
kw = '+'.join(kw.split('_'))
baseurl = "http://www.shopping.com/products~PG-{}?KW={}"
url = baseurl.format(pn, kw)
print("Crawling into www.shopping.com for query two...")
#Getting result of page request
page = requests.get(url)
#Check if request is successful
if (page.status_code != 200):
print("Invalid url")
sys.exit()
#Creating BeautifulSoup parse tree
soup = bs(page.text, 'html.parser')
#Local variables
id_=1
dic = {}
#Parsing products data from id tags inside span tags. Storing in dic.
while True:
product_title = soup.find('span', id='nameQA'+str(id_))
if product_title is not None:
dic[id_] = product_title.text[:-6]
id_ += 1
else:
break
#Check for no results on page
if id_ == 1:
return 0
#Return data in dictionary
return dic
#Method to parse arguments and invoking respective method
def parse_arguments(self):
try:
#Query two
if len(sys.argv) == 3:
keyword = str(sys.argv[1])
page_number = int(sys.argv[2])
get_dic = self.query_two(keyword, page_number)
print()
if get_dic != 0:
print(get_dic)
else:
print('No results on Page', page_number)
print()
#Query one
elif len(sys.argv) == 2:
keyword = str(sys.argv[1])
print("Number of results -", self.query_one(keyword))
else:
print("Incorrect number of arguments")
sys.exit()
except ValueError:
print("Incorrect order/type of arguments")
if __name__ == '__main__':
obj = CrawlIt()
obj.parse_arguments()
| true |
57d2d3112aa79b188a38aaddcb7256531879aaca | Python | VargheseVibin/dabble-with-python | /Day38_ApiWorkoutTracker/main.py | UTF-8 | 1,702 | 3 | 3 | [] | no_license | import requests
import datetime
import os
# Nutritionix API Details
APP_ID = os.environ["NT_APP_ID"]
API_KEY = os.environ.get("NT_API_KEY")
print(f"APP_ID:{APP_ID}")
GENDER = "male"
WEIGHT_KG = 89.9
HEIGHT_CM = 178.2
AGE = 36
exercise_endpoint = "https://trackapi.nutritionix.com/v2/natural/exercise"
headers = {
"x-app-id": APP_ID,
"x-app-key": API_KEY,
"Content-Type": "application/json",
}
user_query = input("Tell me the exercises you did today:")
exercise_params = {
"query": user_query,
"gender": GENDER,
"weight_kg": WEIGHT_KG,
"height_cm": HEIGHT_CM,
"age": AGE
}
exercise_resp = requests.post(url=exercise_endpoint, headers=headers, json=exercise_params)
exercise_resp.raise_for_status()
exercise_list = exercise_resp.json()["exercises"]
now = datetime.datetime.now()
date_now = now.strftime("%d/%m/%Y")
time_now = now.strftime("%H:%M:%S")
# Sheet Update API Details
SHEET_ENDPOINT = "https://api.sheety.co/9cbbfd5fb78ba1c880bee3c677110bf8/myWorkouts/workouts"
SHEET_TOKEN = os.environ.get("SHEET_TOKEN")
sheet_headers = {
"Authorization": "Bearer " + SHEET_TOKEN
}
for ex_item in exercise_list:
sheet_log_data = {
"workout": {
"date": date_now,
"time": time_now,
"exercise": ex_item["name"].title(),
"duration": float(ex_item["duration_min"]),
"calories": float(ex_item["nf_calories"]),
}
}
print(f"Ex Item:\n{ex_item}")
sheet_update_resp = requests.post(url=SHEET_ENDPOINT, headers=sheet_headers, json=sheet_log_data)
print(sheet_update_resp.text)
sheet_get_resp = requests.get(url=SHEET_ENDPOINT, headers=sheet_headers)
print(sheet_get_resp.json())
| true |
77bc399538bafd2c04ec7dad1e8dce9d3a61e6d9 | Python | StephTech1/Joke1 | /main.py | UTF-8 | 400 | 3.96875 | 4 | [] | no_license | print("Do you want to hear a joke?")
print("Pick your favourite number!")
number = int(input("Choose a number between 1 and 3!:"))
if (number == 1):
print("Why did the chicken cross the road? To get to the other side!")
elif (number == 2):
print("Why dont scientists trust atoms? Because they make up everthing!")
elif (number == 3):
print("Why are pirates pirates? Because they arghhh!")
| true |
efd978fb5993aa3ea94abb0cef90a165a669b460 | Python | Zombor00/tequila | /tests/test_binary_pauli.py | UTF-8 | 6,964 | 2.796875 | 3 | [
"MIT"
] | permissive | import tequila as tq
from tequila.hamiltonian import QubitHamiltonian, PauliString, paulis
from tequila.grouping.binary_rep import BinaryPauliString, BinaryHamiltonian
from collections import namedtuple
import numpy as np
BinaryPauli = namedtuple("BinaryPauli", "coeff, binary")
def prepare_test_hamiltonian():
'''
Return a test hamiltonian and its solution
'''
H = -1.0 * paulis.Z(0) * paulis.Z(1) - 0.5 * paulis.Y(0) * paulis.Y(
1) + 0.1 * paulis.X(0) * paulis.X(1) + 0.2 * paulis.Z(2)
coeff_sol = np.array([-1.0, -0.5, 0.1, 0.2])
binary_sol = np.array([[0, 0, 0, 1, 1, 0], [1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]])
return H, H.n_qubits, binary_sol, coeff_sol
def test_binarypauli_conversion():
'''
Testing PauliString's built-in binary form conversion
'''
H, n_qubits, binary_sol, coeff_sol = prepare_test_hamiltonian()
word1 = H.paulistrings[0].binary(n_qubits)
word2 = H.paulistrings[1].binary(n_qubits)
word3 = H.paulistrings[2].binary(n_qubits)
word4 = paulis.I().paulistrings[0].binary(n_qubits)
assert (word1.coeff == coeff_sol[0])
assert (all(word1.binary == binary_sol[0, :]))
assert (word2.coeff == coeff_sol[1])
assert (all(word2.binary == binary_sol[1, :]))
assert (word3.coeff == coeff_sol[2])
assert (all(word3.binary == binary_sol[2, :]))
assert (all(word4.binary == np.zeros(2 * n_qubits)))
def test_binary_pauli():
'''
Testing binary form of the pauli strings
'''
x1 = BinaryPauliString([1, 0], 1)
x1_other = BinaryPauliString(np.array([1, 0]), 2.1)
assert (x1.same_pauli(x1_other))
assert (x1.commute(x1_other))
y1 = BinaryPauliString([1, 1], 1)
assert (not x1.commute(y1))
xx = BinaryPauliString([1, 1, 0, 0], 2)
yy = BinaryPauliString([1, 1, 1, 1], 2.1 + 2j)
assert (xx.commute(yy))
def test_binary_hamiltonian_initialization():
'''
Testing binary form of the hamiltonian
'''
H, n_qubits, binary_sol, coeff_sol = prepare_test_hamiltonian()
H_binary = BinaryHamiltonian.init_from_qubit_hamiltonian(H)
binary_equal_matrix = np.array(H_binary.get_binary()) == binary_sol
assert (binary_equal_matrix.all())
assert (all(np.array(H_binary.get_coeff()) == coeff_sol))
def test_to_qubit_hamiltonian():
'''
Testing transformation to qubit hamiltonian
'''
H, n_qubits, binary_sol, coeff_sol = prepare_test_hamiltonian()
binary_hamiltonian = BinaryHamiltonian.init_from_qubit_hamiltonian(H)
assert (equal_qubit_hamiltonian(H,
binary_hamiltonian.to_qubit_hamiltonian()))
def test_single_qubit_basis_transfrom():
'''
Testing whether transformations using the binary form
and the transformation through direct computation agree
'''
H, n_qubits, binary_sol, coeff_sol = prepare_test_hamiltonian()
single_qub_H, old_basis, new_basis = BinaryHamiltonian.init_from_qubit_hamiltonian(
H).single_qubit_form()
H_brute_force = brute_force_transformation(H, old_basis, new_basis)
assert (equal_qubit_hamiltonian(single_qub_H.to_qubit_hamiltonian(),
H_brute_force))
H = -1.0 * paulis.X(0) * paulis.X(1) * paulis.X(2) + 2.0 * paulis.Y(
0) * paulis.Y(1)
single_qub_H, old_basis, new_basis = BinaryHamiltonian.init_from_qubit_hamiltonian(
H).single_qubit_form()
H_brute_force = brute_force_transformation(H, old_basis, new_basis)
assert (equal_qubit_hamiltonian(single_qub_H.to_qubit_hamiltonian(),
H_brute_force))
def brute_force_transformation(H, old_basis, new_basis):
def pair_unitary(a, b):
'''
Accepts a BinaryPauliString.
Return the paired unitary 1/sqrt(2) (a + b) in qubit hamiltonian.
'''
a = QubitHamiltonian.from_paulistrings(a.to_pauli_strings())
b = QubitHamiltonian.from_paulistrings(b.to_pauli_strings())
return (1 / 2) ** (1 / 2) * (a + b)
U = QubitHamiltonian(1)
for i, i_basis in enumerate(old_basis):
U *= pair_unitary(i_basis, new_basis[i])
return U * H * U
def equal_qubit_hamiltonian(a, b):
tiny = 1e-6
for key, value in a.items():
if key in b.keys():
if not (abs(value - b[key]) < tiny):
return False
else:
if not (abs(value) < tiny):
return False
return True
def test_commuting_groups():
'''
Testing whether the partitioning gives commuting parts
'''
H, _, _, _ = prepare_test_hamiltonian()
H = H + paulis.X(0) + paulis.Y(0)
H = BinaryHamiltonian.init_from_qubit_hamiltonian(H)
commuting_parts = H.commuting_groups()
for part in commuting_parts:
assert part.is_commuting()
def test_qubit_wise_commuting():
'''
Testing whether method is_qubit_wise_commuting correctly
recognizes qubit wise commuting parts.
'''
not_qwc = -1.0 * paulis.Z(0) * paulis.Z(1) - 0.5 * paulis.Y(0) * paulis.Y(1)
not_qwc = BinaryHamiltonian.init_from_qubit_hamiltonian(not_qwc)
qwc = paulis.Z(0) * paulis.Z(1) + paulis.Z(1) * paulis.Y(2)
qwc = BinaryHamiltonian.init_from_qubit_hamiltonian(qwc)
assert not not_qwc.is_qubit_wise_commuting()
assert qwc.is_qubit_wise_commuting()
def test_get_qubit_wise():
'''
Testing whether the get_qubit_wise methods correctly gives the all-Z form of the hamiltonian
'''
H, _, _, _ = prepare_test_hamiltonian()
H = BinaryHamiltonian.init_from_qubit_hamiltonian(H)
qwc, qwc_U = H.get_qubit_wise()
# Check qwc has all z
for term, val in qwc.items():
for qub in term:
assert qub[1] == 'Z'
# Checking the expectation values are the same
U = tq.gates.ExpPauli(angle="a", paulistring=tq.PauliString.from_string('X(0)Y(1)'))
variables = {"a": np.random.rand(1) * 2 * np.pi}
e_ori = tq.ExpectationValue(H=H.to_qubit_hamiltonian(), U=U)
e_qwc = tq.ExpectationValue(H=qwc, U=U + qwc_U)
e_integrated = tq.ExpectationValue(H=H.to_qubit_hamiltonian(), U=U, optimize_measurements=True)
result_ori = tq.simulate(e_ori, variables)
result_qwc = tq.simulate(e_qwc, variables)
result_integrated = tq.simulate(e_qwc, variables)
assert np.isclose(result_ori, result_qwc)
assert np.isclose(result_ori, result_integrated)
# Checking the optimized expectation values are the same
initial_values = {k: np.random.uniform(0.0, 6.0, 1) for k in e_ori.extract_variables()}
sol1 = tq.minimize(method='bfgs', objective=e_ori, initial_values=initial_values)
sol2 = tq.minimize(method='bfgs', objective=e_qwc, initial_values=initial_values)
sol3 = tq.minimize(method='bfgs', objective=e_integrated, initial_values=initial_values)
assert np.isclose(sol1.energy, sol2.energy)
assert np.isclose(sol1.energy, sol3.energy)
| true |
f914ce9ffd9cf4a66820ad8f655a1e8253d139ef | Python | Willbeckh/Hussle-flask-app | /app/forms/forms.py | UTF-8 | 2,254 | 2.515625 | 3 | [] | no_license | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, DateField, TextAreaField, IntegerField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
from app.models.models import User
from datetime import date
#register form
class RegisterForm(FlaskForm):
username = StringField('username', validators=[DataRequired(),Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(),Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign up')
def validate_username(self, username):
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('This username is already taken please use a different username')
def validate_email(self, email):
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('This email is already taken please use different one')
#login formself
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(),Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember me')
submit = SubmitField('Login')
#businesses form
class BusinessForm(FlaskForm):
name = StringField('Business name', validators = [DataRequired()])
location = StringField('Location', validators = [DataRequired()])
date = DateField('Start date', default = date.today(), format = '%d/%m/%Y',
validators = [DataRequired(message = 'Enter the start date of your business.')])
business_description = TextAreaField('Give the description of your business')
submit = SubmitField('Submit')
def validate_business(self, name):
business = Businesses.query.filter_by(name = form.name.data).first()
if business:
raise ValidationError('Business already registered')
# delete business
class DeleteForm(FlaskForm):
id = IntegerField('Business id', validators=[DataRequired()])
submit = SubmitField('Delete')
| true |
8bbcb87a69de46c370dfce48b018b0b98abb55e5 | Python | takecian/ProgrammingStudyLog | /hackerrank/30-days-of-code/day2.py | UTF-8 | 284 | 2.90625 | 3 | [] | no_license | import itertools
from collections import Counter
from collections import defaultdict
import bisect
def main():
meal = float(input())
tip = int(input())
tax = int(input())
print(round(meal + meal * tip / 100 + meal * tax / 100))
if __name__ == '__main__':
main()
| true |
771e5d93d4d0e53df2c0a8092a8244bb36e93759 | Python | spaceuniverse/QLSD | /CORE/fSandFun.py | UTF-8 | 4,264 | 2.828125 | 3 | [] | no_license | # ---------------------------------------------------------------------# IMPORTS
import numpy as np
# ---------------------------------------------------------------------# MAIN
class Features(object):
@staticmethod
def normal(wfn):
fmax = np.max(wfn)
if fmax == 0.0:
fmax = 1.0
koef = 1.0 / fmax
wfn = wfn * koef
return wfn
@staticmethod
def get(objState):
if objState["class"] == "blockplayer":
# Other features
#vector = np.append(vector, objState["health"] / 100.0)
#vector = np.append(vector, int(objState["live"]))
#vector = np.append(vector, int(objState["plus"]))
#vector = np.append(vector, int(objState["minus"]))
# Features
vector = np.array([])
vector = np.append(vector, np.reshape(objState["environment"] / 10.0, 18)) # 18 is total length of 2*3*3 matrix of environment; *17 /10.0
vector = np.append(vector, np.sum(objState["environment"][0]) / 100.0) # Hit num; 80 is idea that no more than 10 obj in one zone; *18 *0 /80.0
vector = np.append(vector, np.sum(objState["environment"][1]) / 100.0) # Heal num; *19 *1 /80.0
vector = np.append(vector, np.sum(objState["environment_dist"][0]) / 1000.0) # Hit dist; max range for 1 obj is 100 and 8 zones; *20 *2 /800.0
vector = np.append(vector, np.sum(objState["environment_dist"][1]) / 1000.0) # Heal dist; *21 *3 /800.0
vector = np.append(vector, objState["ignition"] / 100.0) # 22 max value of ignition; *22 *4 /22.0
#"""
if objState["ignition"] / 100.0 == 0.0 and np.sum(objState["environment"] / 10.0) == 0.0:
vector = np.append(vector, 0.1)
else:
vector = np.append(vector, 0.0)
#"""
#vector = Features.normal(vector)
#print "Vector: ", vector
else:
vector = None
return vector
class Rewards(object):
@staticmethod
def get(objVector, objVectorOld):
reward = 0.0
rs = 0.0
rb = 0.0
rh = 0.0
# Every square scan
#for i in xrange(9):
# if objVector[i] > 0:
# reward -= 100 * objVector[i]
#for i in xrange(9, 18):
# if objVector[i] > 0:
# reward += 100 * objVector[i]
# More complex
# Reward for speed
if objVector[22] > 0: # and objVector[18] == 0 and objVector[19] == 0
reward -= 100.0 * objVector[22] # 100 500
rs -= 100.0 * objVector[22] # 100 500
#if objVector[22] == 0:
# reward += 10.0
# rs += 10.0
print "RSpeed: ", rs
# Reward for hit dist
if objVector[20] < objVectorOld[20] and objVector[18] == objVectorOld[18]:
reward -= 100000.0 * (np.abs(objVectorOld[20] - objVector[20]))
rb -= 100000.0 * (np.abs(objVectorOld[20] - objVector[20]))
if objVector[20] > objVectorOld[20] and objVector[18] == objVectorOld[18]:
reward += 100000.0 * (np.abs(objVector[20] - objVectorOld[20]))
rb += 100000.0 * (np.abs(objVector[20] - objVectorOld[20]))
if objVector[20] == objVectorOld[20] and objVector[18] == objVectorOld[18] and objVector[18] != 0:
reward -= 100.0
rb -= 100.0
print "RBullet: ", rb
# Reward for heal dist
if objVector[21] > objVectorOld[21] and objVector[19] == objVectorOld[19]:
reward -= 100000.0 * (np.abs(objVector[21] - objVectorOld[21]))
rh -= 100000.0 * (np.abs(objVector[21] - objVectorOld[21]))
if objVector[21] < objVectorOld[21] and objVector[19] == objVectorOld[19]:
reward += 100000.0 * (np.abs(objVectorOld[21] - objVector[21]))
rh += 100000.0 * (np.abs(objVectorOld[21] - objVector[21]))
if objVector[21] == objVectorOld[21] and objVector[19] == objVectorOld[19] and objVector[19] != 0:
reward -= 100.0
rh -= 100.0
print "RHeal: ", rh
print reward, rs + rb + rh
return reward
# ---------------------------------------------------------------------# | true |
5655db244b3ee432741ac72cae9f8ee751e94cc9 | Python | upasanapradhan/IW-Python-Assignment | /IW-PythonAssignment/5.py | UTF-8 | 228 | 3.6875 | 4 | [] | no_license | str1 = input("enter a string: ")
if len(str1) >= 3:
for i in str1:
if 'ing' in str1:
result = str1 + 'ly'
else:
result = str1 + 'ing'
print(result)
else:
print(str1) | true |
c9adf542ceee354cf573cd411250fe6cc4fa8eab | Python | wanng-ide/Algorithms-and-Data-Structures | /SelectionSort.py | UTF-8 | 590 | 3.90625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 3 01:00:53 2019
@author: wanng
SelectionSort
O(n^2)
find the smallest #, and pop it, then find the smallest # in the rest
"""
def FindSmallest(array):
small = array[0]
small_index = 0
for i in range(1, len(array)):
if array[i] < small:
small = array[i]
small_index = i
return small_index
def SelectionSort(array):
ordered = []
for i in range(len(array)):
small_index = FindSmallest(array)
ordered.append(array.pop(small_index))
return ordered
test = [11, 2, 5, 7, 10, 1, 0]
print(len(test))
print(SelectionSort(test)) | true |
b867dc9323bf68d1800a3efea3f6c1486a1c1415 | Python | vskritsky/fasten | /like rates.py | UTF-8 | 993 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 23:05:45 2017
@author: administrator
"""
import numpy as np
import pandas as pd
#initialize random like rates for 100 couriers
couriers = np.random.rand(100,1)
#round floats to 2 decimals to be like
np.around(couriers, decimals=2, out=couriers)
#index numeration
index = [int(i) for i in range(1, len(couriers)+1)]
#here data frame is created
columns = ['likes rate']
df = pd.DataFrame(couriers, index=index, columns=columns)
df['to terminate'] = 'no'
#mean of like rate for all couriers is estimated
mean = np.mean(df['likes rate'])
#couriers having like rate less than 2/3 mean are adviced for termination
to_terminate=(df[(3*df <= 2*mean).all(axis=1)])
df=(df[(3*df > 2*mean).all(axis=1)])
to_terminate['to terminate'] = 'yes'
#results are concatenated and sorted in single dataframe
df_concatenated = pd.concat([df, to_terminate])
df_sorted=df_concatenated.sort_index(axis=0, kind='mergesort')
print(df_sorted)
| true |
0fb04792f8299f4e505db2d9ea62df0e12054c6b | Python | mrklees/market-agent | /script/train_model.py | UTF-8 | 4,121 | 2.828125 | 3 | [] | no_license | import sys
sys.path.append('.')
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from multiprocessing import Pool, freeze_support
from tqdm import tqdm
from MarketAgent.Market import Market, StockData
from MarketAgent.Trader import ValueTrader
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
n_epochs = 25
n_episodes = 100
episode_size = 100
starting_assets = 10000
window_size = 10
memory_size = 1000000
exploration_ratio = 0.3
get_fresh_data = False
train_new_model = True
if get_fresh_data:
data = StockData(secret_path="./.secrets/alphavantage.key")
stocks, meta = data.collect_data(
asset="GOOGL",
interval='daily',
output='full'
)
stocks.to_csv("./.data/stock_series.csv", index=False)
stocks = pd.read_csv("./.data/stock_series.csv")
market = Market(
stocks,
window_size=window_size,
starting_assets=starting_assets
)
trader = ValueTrader(
window_size=window_size,
memory_size=memory_size
)
if train_new_model:
trader.build_value_network(window_size)
else:
trader.model = tf.keras.models.load_model("value_10_model.tf")
def process_episode(
episode,
trader=trader,
market=market,
starting_assets=starting_assets
):
current_assets = starting_assets
# Apply the policy to each timestep in the episode, making a
# -1, 0, 1 decision
decisions = []
orders = []
assets = []
for row in episode.iterrows():
# Combine market state with current assets as feature
# for policy network
state = np.concatenate([row[1], [current_assets]])
if random.random() < exploration_ratio:
# To help with exploration, we'll make random decisions
# some times during training
decision = trader.random_action()
else:
values = [
trader.value(proposed_action, state)
for proposed_action in [-1, 0, 1]
]
decision = trader.policy(values)
decisions.append(decision)
# Determine the value of the decision at t_0
order = decision * row[1][1]
orders.append(order)
# Update assets
new_asset_value = current_assets + order
assets.append(new_asset_value)
current_assets = new_asset_value
episode['assets'] = assets
episode['decisions'] = decisions
# If the cumulative sum is greater than zero, than we've sold more
# stock than we've bought.
INVALID_SALE_PENALTY = -10000
validate_sell = np.cumsum(episode['decisions']) > 0
# If outcomes goes below zero... we're broke
OUT_OF_MONEY_PENALTY = -10000
out_of_money = episode['assets'] < 0
# Net from orders
net = np.array(orders).sum()
# Penalties
sale_penalty = validate_sell * INVALID_SALE_PENALTY
money_penalty = out_of_money * OUT_OF_MONEY_PENALTY
reward = net + sale_penalty + money_penalty
# Save this episode as a emory we can train models from
episode = np.concatenate(
[
episode,
reward.values.reshape(-1, 1)
],
axis=1
)
return episode
if __name__ == "__main__":
freeze_support()
pool = Pool(8)
for epoch in tqdm(range(n_epochs)):
#print(f"Starting Epock {epoch}")
episodes = [
market.get_episode(episode_size=episode_size)
for i in range(n_episodes)
]
# Each episode is independent, so we process them in parallel
memories = pool.map(
process_episode,
episodes
)
trader.memory.extend(list(memories))
trader.process_memory(window_size)
| true |
50ca2561d9be6a7ab3163931f8513d9c8d17d7a8 | Python | yukou-isshiki/aizu_online_judge | /JOI-Prelim/0663.py | UTF-8 | 210 | 3.5625 | 4 | [] | no_license | input_str_list = input().split(" ")
dict = {}
dict[1] = 0
dict[2] = 0
for input_str in input_str_list:
if input_str == "1":
dict[1] += 1
else:
dict[2] += 1
print(max(dict, key=dict.get)) | true |
968aedbebb9816db43510d813aeb74c4efd543c7 | Python | tgremi/queueSimulation | /Processador.py | UTF-8 | 1,597 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
-- Analise e desempenho de Software
"""
import numpy as np
import copy
import matplotlib.pyplot as plt
import time, threading
import random
class Processador :
ocupedTime = 0
packagesProcess = 0
packagesFinished = 0
flagProcess = False
def setFlagProcessamento (self, flag) :
return self.flagProcess == flag
def processaFila(fila, tempo):
ocupedTime = 0
packagesProcess = 0
packagesFinished = 0
ocupedTime = random.randint(1, 5)
start = time.time()
i = 0
resultado = { "pacotesProcessados": 0, "tempoOcupado": 0}
while (tempo > i) :
while not (fila.empty()):
print("flagando 1:", flagProcess)
self.setFlagProcessamento(True)
print("flagando 2:", flagProcess)
time.sleep(random.random())
packageNumber = fila.get()
end = time.time()
packagesFinished = packagesFinished + 1
timeElapsed = end - start
print('time : {:.1f}s'.format(timeElapsed))
print(packageNumber)
if(tempo <= float("{:.1f}".format(timeElapsed))):
fimProcesso = True
break
resultado = { "pacotesProcessados": packagesFinished, "tempoOcupado": "{:.1f}".format(timeElapsed)}
if(fimProcesso):
break
return resultado
| true |
6b2967de3dbe281792c04729453d8c911660ea3b | Python | JaehunYoon/Study | /Programming Language/Python/Facebook/fb_sdk.py | UTF-8 | 1,641 | 3.265625 | 3 | [] | no_license | import facebook
# 생성된 액세스 토큰을 인수로 전달해 사용할 수 있는 객체를 만들어 obj에 저장합니다.
obj = facebook.GraphAPI(access_token="users-token")
limit = int(input("몇건의 게시물을 검색할까요? "))
# facebook객체에서 obj.get_connections함수를 실행시킵니다. get_connections함수는 해당 아이디에서 connection_name으로 전달된 데이터를 가져오는 역할을 합니다. 세 번째 인수로 전달된 limit은 한번에 가져올 게시물의 개수를 정해주는 역할을 합니다. 이 프로그램에서는 id에는 me가 전달되었고 connection_name에는 posts가 전달되었습니다. 결과물은 json으로 반환됩니다.
response = obj.get_connections(id="me", connection_name="posts", limit=limit)
# /me?fields=posts.limit(1)
print(response)
f = open("C:\\Jaehun\\fb.txt", "w")
for data in response["data"]:
f.write("==" * 30 + "\n")
f.write("게시물 작성자 : " + str(data["from"]["name"]) + "\n")
f.write("게시물 아이디 : " + str(data["from"]["id"]) + "\n")
f.write("최종 업데이트 시간 : " + str(data["updated_time"]) + "\n")
f.write("게시물 링크 : " + str(data["actions"][0]["link"]) + "\n")
if "message" in data:
f.write("게시물 내용 : " + str(data["message"]) + "\n")
if "picture" in data:
f.write("게시물 사진 이름 : " + str(data["name"]) + "\n")
f.write("사진 주소 : " + str(data["picture"]) + "\n")
if "description" in data:
f.write("사진 설명 : " + str(data["description"]) + "\n")
f.write("==" * 30 + "\n")
f.close() | true |
635d2977671951cee88434d30cb97c53f07000a8 | Python | wagamama/alg-practice | /hashtable.py | UTF-8 | 2,110 | 3.640625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
class HashTable(object):
def __init__(self):
self.size = 11
self.slot = [None] * self.size
self.data = [None] * self.size
def put(self, key, data):
hashvalue = self.hashfunction(key)
if self.slot[hashvalue] == None:
self.slot[hashvalue] = key
self.data[hashvalue] = data
else:
if self.slot[hashvalue] == key:
self.data[hashvalue] = data
else:
nextslot = self.rehash(hashvalue)
while self.slot[nextslot] is not None and self.slot[nextslot] != key:
nextslot = self.rehash(nextslot)
if self.slot[nextslot] == None:
self.slot[nextslot] = key
self.data[nextslot] = data
else:
self.data[nextslot] = data
def hashfunction(self, key):
return key % self.size
def rehash(self, oldhash):
return (oldhash+1) % self.size
def get(self, key):
startslot = self.hashfunction(key)
data = None
stop = False
found = False
position = startslot
while self.slot[position] != None and not stop and not found:
if self.slot[position] == key:
found = True
data = self.data[position]
else:
position = self.rehash(position)
if position == startslot:
stop = True
return data
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, data):
self.put(key, data)
def __delitem__(self, key):
self.put(key, None)
if __name__ == '__main__':
h = HashTable()
h[54] = 'cat'
h[26] = 'dog'
h[93] = 'lion'
h[17] = 'tiger'
h[77] = 'bird'
h[31] = 'cow'
h[44] = 'goat'
h[55] = 'pig'
h[20] = 'chicken'
print h.slot
print h.data
print h[20]
print h[17]
h[20] = 'duck'
print h[20]
print h.data
del h[20]
print h[20]
print h.data
| true |
04b2eda4672b32051fd0071a1072b8e149675a3d | Python | alptureci/AWS-BOTO-PYTHON-S3 | /UploadFile.py | UTF-8 | 1,793 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
import boto
user_bucket_extension = '-ucscext-alptureciaws'
def isUserExists(username, password):
s3 = boto.connect_s3()
#first check is users exists
usersbucket = 'alptureci-users-bucket'
bucket = s3.get_bucket(usersbucket)
k = boto.s3.key.Key(bucket)
k.key = username
if k.exists():
#second check is password true
obj = k.get_contents_as_string()
valueList = obj.split(',')
if password == valueList[0]:
print 'password is true! go on'
return True
else:
print 'wrong password'
return False
else:
print 'User does not exit!'
return False
def uploadFile():
s3 = boto.connect_s3()
bucket = s3.get_bucket(username + user_bucket_extension)
bucket_location = bucket.get_location()
# this was necessary because otherwise for big data size i was taking
# Connection reset by peer ERROR
# solution from: https://github.com/boto/boto/issues/2207
if bucket_location:
conn = boto.s3.connect_to_region(bucket_location)
bucket = conn.get_bucket(username+'-ucscext-alptureciaws')
newkey = bucket.new_key(fileKey)
newkey.set_contents_from_filename(pathToFileToUpload, cb=percent_cb, num_cb=10)
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == "__main__":
if sys.argv.__len__() != 5:
print 'error in supplied paramters please provide 4 in the order of (username pass file-key path-to-file-to-upload)'
else:
username = sys.argv[1]
password = sys.argv[2]
fileKey = sys.argv[3]
pathToFileToUpload = sys.argv[4]
if isUserExists(username, password):
uploadFile()
| true |
5bf812d8cd60f89bd98cb3420695bf7806551a03 | Python | dovedevic/droiddevic | /Games/Gomoku.py | UTF-8 | 11,306 | 3.03125 | 3 | [
"MIT"
] | permissive | import datetime
import logging
import random
import re
from GameParent import Game
from GameParent import SetupFailure, SetupSuccess
logger = logging.getLogger(__name__)
handler = logging.FileHandler('../logs/{}.log'.format(str(datetime.datetime.now()).replace(' ', '_').replace(':', 'h', 1).replace(':', 'm').split('.')[0][:-2]))
formatter = logging.Formatter('%(asctime)s::%(levelname)s::%(name)s::%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
class GameObject(Game):
"""
Implements Gomoku
"""
@staticmethod
def get_game_name():
return "Gomoku"
@staticmethod
def how_to_play():
return "Gomoku is a game where you place tokens on a 10 by 10 grid. The purpose of the game is to get five of your tokens in a row diagonally, horizontally, or vertically. If provided, each player can customize their token to one of the following: blue, red, brown, green, yellow, purple, yellow, white. "
@staticmethod
def get_game_short_name():
return "GU"
async def setup(self, args):
self.__moves = 0
self.__board = """
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
:black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle::black_circle:
"""
self.__current_turn_index = 0
self._tokens = {
"blue": ":blue_circle:",
"red": ":red_circle:",
"green": ":green_circle:",
"orange": ":orange_circle:",
"purple": ":purple_circle:",
"yellow": ":yellow_circle:",
"white": ":white_circle:",
"brown": ":brown_circle:",
}
self._player_tokens = []
logger.info('Setting up a Gomoku game...')
if len(args) == 1 and args[0].lower() == 'help':
logger.debug('Could not setup game, invalid arguments or user requested help')
return SetupFailure(f'**Command \'play {self.get_game_short_name()}\' Usage: **`>play {self.get_game_short_name()} [users-to-play] (tokens-colors-player)`')
elif len(self.players) < 2:
logger.debug('Could not setup game, user provided too few users to play')
return SetupFailure('You can\'t play Gomoku by yourself.')
elif len(args) == len(self.players) and all(type(arg) == str and arg in self._tokens for arg in args):
for arg in args:
self._player_tokens.append(self._tokens[arg])
elif len(args) == 0:
self._player_tokens = [self._tokens[k] for k in self._tokens][:len(self.players)]
else:
logger.debug('Could not setup game, invalid arguments or user requested help')
return SetupFailure(f'**Command \'play {self.get_game_short_name()}\' Usage: **`>play {self.get_game_short_name()} [users-to-play] (tokens-colors-player)`')
logger.debug('Passed standard checks setting up turn...')
c = list(zip(self.players, self._player_tokens))
random.shuffle(c)
self.players, self._player_tokens = zip(*c)
self.players = list(self.players)
self._player_tokens = list(self._player_tokens)
self.__current_turn_index = 0
await self.show()
pidx = 0
for player in self.players:
if pidx == self.__current_turn_index:
await self.channel.send(f'{player.mention} your token is {self._player_tokens[pidx]}, you go first! Good luck!')
else:
await self.channel.send(f'{player.mention} your token is {self._player_tokens[pidx]}, waiting for your turn...')
pidx += 1
return SetupSuccess(self)
async def move(self, args, player):
logger.debug("Checking command move")
if player != self.players[self.__current_turn_index]:
await self.channel.send('It is not your turn currently.')
return
if not args or len(args) <= 1 or type(args[0]) != int or type(args[1]) != int or args[0] > 10 or args[0] < 1 or args[1] > 10 or args[1] < 1:
logger.debug("Invalid move or requested help, showing help menu...")
await self.channel.send("**Command \'move\' Usage:** `>move [column(1-10)] [row(1-10)]`")
return
logger.debug("Checking if place is appropriate")
if self._get_item_at(args[0] - 1, args[1] - 1) != ':black_circle:':
logger.debug("Invalid move, column full")
await self.channel.send("You can't put a piece in that spot, try somewhere else!")
return
logger.debug("Placing...")
self._place_item_at(args[0] - 1, args[1] - 1, self._player_tokens[self.__current_turn_index])
# Check for ending
logger.debug("Placed, checking for next turn...")
self.__moves += 1
if self._contains_connect_five():
logger.debug("Showing board...")
await self.show()
logger.debug("Placed piece resulted in a connect five!")
await self.channel.send("**{0}** wins! It took {1} turns!".format(self.players[self.__current_turn_index].name, self.__moves))
logger.debug("Clearing game...")
await self.end_game()
else:
logger.debug("Going to next turn...")
self.next_turn()
logger.debug("Showing board...")
await self.show()
def next_turn(self):
self.__current_turn_index = (self.__current_turn_index + 1) % len(self.players)
def get_current_player(self):
return self.players[self.__current_turn_index]
async def show(self):
await self.channel.send("It's **{}'s** turn.".format(self.players[self.__current_turn_index].name) + self.__board)
def _contains_connect_five(self):
checker = self._player_tokens[self.__current_turn_index]
# Horizontal Check
for j in range(0, 10 - 4):
for i in range(0, 10):
if self._get_item_at(i, j) == checker and \
self._get_item_at(i, j + 1) == checker and \
self._get_item_at(i, j + 2) == checker and \
self._get_item_at(i, j + 3) == checker and \
self._get_item_at(i, j + 4) == checker:
logger.info("Found win! Placing win pieces!")
self._place_item_at(i, j, ':large_orange_diamond:')
self._place_item_at(i, j + 1, ':large_orange_diamond:')
self._place_item_at(i, j + 2, ':large_orange_diamond:')
self._place_item_at(i, j + 3, ':large_orange_diamond:')
self._place_item_at(i, j + 4, ':large_orange_diamond:')
return True
# Vertical Check
for i in range(0, 10 - 3):
for j in range(0, 10):
if self._get_item_at(i, j) == checker and \
self._get_item_at(i + 1, j) == checker and \
self._get_item_at(i + 2, j) == checker and \
self._get_item_at(i + 3, j) == checker and \
self._get_item_at(i + 4, j) == checker:
logger.info("Found win! Placing win pieces!")
self._place_item_at(i, j, ':large_orange_diamond:')
self._place_item_at(i + 1, j, ':large_orange_diamond:')
self._place_item_at(i + 2, j, ':large_orange_diamond:')
self._place_item_at(i + 3, j, ':large_orange_diamond:')
self._place_item_at(i + 4, j, ':large_orange_diamond:')
return True
# Ascending Diagonal Check
for i in range(4, 10):
for j in range(0, 10 - 4):
if self._get_item_at(i, j) == checker and \
self._get_item_at(i - 1, j + 1) == checker and \
self._get_item_at(i - 2, j + 2) == checker and \
self._get_item_at(i - 3, j + 3) == checker and \
self._get_item_at(i - 4, j + 4) == checker:
logger.info("Found win! Placing win pieces!")
self._place_item_at(i, j, ':large_orange_diamond:')
self._place_item_at(i - 1, j + 1, ':large_orange_diamond:')
self._place_item_at(i - 2, j + 2, ':large_orange_diamond:')
self._place_item_at(i - 3, j + 3, ':large_orange_diamond:')
self._place_item_at(i - 4, j + 4, ':large_orange_diamond:')
return True
# Ascending Diagonal Check
for i in range(4, 10):
for j in range(4, 10):
if self._get_item_at(i, j) == checker and \
self._get_item_at(i - 1, j - 1) == checker and \
self._get_item_at(i - 2, j - 2) == checker and \
self._get_item_at(i - 3, j - 3) == checker and \
self._get_item_at(i - 4, j - 4) == checker:
logger.info("Found win! Placing win pieces!")
self._place_item_at(i, j, ':large_orange_diamond:')
self._place_item_at(i - 1, j - 1, ':large_orange_diamond:')
self._place_item_at(i - 2, j - 2, ':large_orange_diamond:')
self._place_item_at(i - 3, j - 3, ':large_orange_diamond:')
self._place_item_at(i - 4, j - 4, ':large_orange_diamond:')
return True
return False
def _get_item_at(self, col, row):
for idx, emoji in enumerate(re.compile(r':[a-zA-Z0-9_]+:').finditer(self.__board)):
if idx % 10 == col:
if idx // 10 == row:
return emoji.group()
def _place_item_at(self, col, row, placer):
for idx, emoji in enumerate(re.compile(r':[a-zA-Z0-9_]+:').finditer(self.__board)):
if idx % 10 == col:
if idx // 10 == row:
self.__board = self.__board[:emoji.start()] + placer + self.__board[emoji.end():]
return
| true |
cc7e787ce99f60917564bfb13b2653d588f40a9d | Python | Stasnnm/dz1 | /Lesson2/5.py | UTF-8 | 134 | 3.265625 | 3 | [] | no_license | x = float(input('введите х '))
if x>0:
print('sign(x) = 1')
elif x<0:
print('sign(x) = -1')
else:
print('x = 0') | true |
77d86174cbef14a8b2a852225756e6d77a9d0fa3 | Python | littlelienpeanut/DART_predicting_users_demographic_information | /kms_demo_KNN.py | UTF-8 | 12,846 | 2.59375 | 3 | [] | no_license | import pandas as pd
import itertools
import csv
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score
from sklearn.cluster import KMeans
import random
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import matplotlib.pyplot as plt
import numpy as np
def load_data_v4(usernum):
daily_v4 = []
for fname in usernum:
tmp_list = []
data = pd.read_csv(fname + "_daily_v4.csv")
for j in range(0, 24, 1):
### type("%.2f" % data['ratio'][j]) is string not float!!! ###
tmp_list.append(data["ratio"][j])
daily_v4.append(tmp_list)
return daily_v4
def load_data_v5(usernum):
daily_v5 = []
cate_list = load_cate_list()
for fname in usernum:
tmp_list = []
data = pd.read_csv(fname + "_daily_v5.csv")
for cate in cate_list:
tmp_list.append(float(data[cate][0]))
daily_v5.append(tmp_list)
return daily_v5
def load_data_all(v4, v5):
tmp_v4 = v4
tmp_v5 = v5
data_all = []
for kmm_i in range(len(v4)):
tmp_list = []
tmp_list = tmp_v4[kmm_i] + tmp_v5[kmm_i]
data_all.append(tmp_list)
return data_all
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
#compute overall accuracy
oc = 0
total = 0
tmp_acc = 0
for cm_i in range(len(cm)):
tmp_acc += cm[cm_i][cm_i]
for cm_j in range(len(cm)):
total += cm[cm_i][cm_j]
oc = float(tmp_acc) / float(total)
tmp_recall = np.array(recall(cm))
tmp_recall = tmp_recall.astype(float)
tmp_precision = np.array(precision(cm, oc))
tmp_precision = tmp_precision.astype(float)
cm_nn = cm
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
else:
pass
tmp_recall = tmp_recall[:, np.newaxis]
cm = np.hstack((cm, tmp_recall))
cm = np.vstack((cm, tmp_precision))
class_x = []
class_y = []
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes)+1)
#add recall tick
classes.append("recall")
plt.xticks(tick_marks, classes)
#delete recall tick
classes.pop(len(classes)-1)
#add precision tick
classes.append("precision")
plt.yticks(tick_marks, classes)
#delete precision tick
classes.pop(len(classes)-1)
fmt = '2.1%' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment='center',
verticalalignment='bottom',
color="black")
if i<cm.shape[0]-1 and j<cm.shape[0]-1:
plt.text(j, i, cm_nn[i, j],
weight = 'bold',
horizontalalignment='center',
verticalalignment='top',
color="black")
else:
pass
plt.ylabel('True label', fontsize=12)
plt.xlabel('Predicted label', fontsize=12)
def load_cate_list():
list = []
csv = pd.read_csv("cate_list_final.csv")
for i in range(len(csv)):
list.append(csv["cate"][i])
return list
def load_user_list():
user_list = pd.read_csv("user_list.csv")
return user_list
def choose_user(user_demo_id):
#return the user_num:1~672 who has demographic data as "userN" and the user index in user_demo
usernum = []
user_demo = load_user_demo() #509
user_id_list = load_user_list() #672
for i in range(len(user_id_list)):
try:
#if user has user_demo
if user_demo_id[user_id_list["id"][i]] == 0:
usernum.append("user" + str(i+1))
except:
pass
return usernum
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
#compute overall accuracy
oc = 0
total = 0
tmp_acc = 0
for cm_i in range(len(cm)):
tmp_acc += cm[cm_i][cm_i]
for cm_j in range(len(cm)):
total += cm[cm_i][cm_j]
oc = float(tmp_acc) / float(total)
tmp_recall = np.array(recall(cm))
tmp_recall = tmp_recall.astype(float)
tmp_precision = np.array(precision(cm, oc))
tmp_precision = tmp_precision.astype(float)
cm_nn = cm
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
else:
pass
tmp_recall = tmp_recall[:, np.newaxis]
cm = np.hstack((cm, tmp_recall))
cm = np.vstack((cm, tmp_precision))
class_x = []
class_y = []
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes)+1)
#add recall tick
classes.append("recall")
plt.xticks(tick_marks, classes)
#delete recall tick
classes.pop(len(classes)-1)
#add precision tick
classes.append("precision")
plt.yticks(tick_marks, classes)
#delete precision tick
classes.pop(len(classes)-1)
fmt = '2.1%' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment='center',
verticalalignment='bottom',
color="black")
if i<cm.shape[0]-1 and j<cm.shape[0]-1:
plt.text(j, i, cm_nn[i, j],
weight = 'bold',
horizontalalignment='center',
verticalalignment='top',
color="black")
else:
pass
plt.ylabel('True label', fontsize=12)
plt.xlabel('Predicted label', fontsize=12)
def load_user_demo():
user_demo = []
data = pd.read_csv("user_demo.csv")
user_demo_id = {}
for i in range(len(data)):
demo = {'age':0, 'gender':0, 'relationship':0, 'income':0, 'edu':0, 'location':0, 'industry':0, 'occupation':0}
demo.update({"age": data["age"][i]})
demo.update({"gender": data["gender"][i]})
demo.update({"relationship": data["relationship"][i]})
demo.update({"income": data["income"][i]})
demo.update({"edu": data["edu"][i]})
demo.update({"location": data["location"][i]})
demo.update({"occupation": data["occupation"][i]})
demo.update({"industry": data["industry"][i]})
demo.update({"id": data["id"][i]})
user_demo_id.update({data["id"][i]:0})
user_demo.append(demo)
return user_demo, user_demo_id
def recall(cnf):
recall = []
for i in range(len(cnf[0])):
tmp_recall = float(cnf[i][i]) / float(sum(cnf[i]))
recall.append(tmp_recall)
return recall
def precision(cnf, oc):
precision = []
for i in range(len(cnf[0])):
tmp = 0
pre = 0
for j in range(len(cnf[0])):
tmp += cnf[j][i]
if tmp == 0:
tmp += 1
pre = float(cnf[i][i]) / float(tmp)
precision.append(pre)
precision.append(oc)
return precision
def main():
#variable
### user_demo[id]['demographic'] ###
print("Loading users' info")
user_demo = []
user_demo_id = {}
user_demo, user_demo_id = load_user_demo()
usernum = choose_user(user_demo_id)
data_v4 = load_data_v4(usernum)
data_v5 = load_data_v5(usernum)
kms_data = []
data_all = load_data_all(data_v4, data_v5)
demo_list = ['age', 'gender', 'relationship']
#class name
class_name = {'age':["1", "2", "3", "4", "5", "6", "7"], 'gender':["1", "2", "3"], 'relationship':["1", "2", "3", "4", "5"]}
#main
'''
#find the best k in every clusters
#hyper parameter settings
#how many clusters?
n_clusters = 6
#which dataset?
user_data = data_all
# ------------------------------------------------------------------------#
#kms model training
for km4_i in range(len(user_data)):
kms_data.append(user_data[km4_i])
kms_model = KMeans(n_clusters, random_state=2018).fit(kms_data)
kms_data = {}
kms_demo = {}
kms_user_list = {}
kms_demo_label = {}
demo_pred = {}
demo_pred_res = {}
demo_pred_score = {'age':[], 'gender':[], 'relationship':[], 'income':[], 'edu':[]}
for c_num in range(n_clusters):
kms_data.update({str(c_num):[]})
kms_user_list.update({str(c_num):[]})
kms_demo.update({str(c_num):{'age':[], 'gender':[], 'relationship':[], 'income':[], 'edu':[]}})
demo_pred.update({str(c_num):{'age':[], 'gender':[], 'relationship':[], 'income':[], 'edu':[]}})
for o_i in range(len(user_data)):
kms_data[str(kms_model.labels_[o_i])].append(user_data[o_i]) #append user_daily
kms_user_list[str(kms_model.labels_[o_i])].append(o_i)
for demo_i in demo_list:
kms_demo[str(kms_model.labels_[o_i])][demo_i].append(user_demo[o_i][demo_i])
for k in range(1, 21, 1):
### classifier choosing
demo_pred_res.update({'age':[], 'gender':[], 'relationship':[], 'income':[], 'edu':[]})
kms_demo_label.update({'age':[], 'gender':[], 'relationship':[], 'income':[], 'edu':[]})
clf = KNeighborsClassifier(n_neighbors=k)
for c_num in range(n_clusters):
for demo_i in demo_list:
#f1-micro and f1-macro
demo_pred_res[demo_i].extend(cross_val_predict(clf, kms_data[str(c_num)], kms_demo[str(c_num)][demo_i], cv=5))
kms_demo_label[demo_i].extend(kms_demo[str(c_num)][demo_i])
# print(demo_pred_res[str(c_num)][demo_i])
for demo_i in demo_list:
demo_pred_score[demo_i].append(f1_score(kms_demo_label[demo_i], demo_pred_res[demo_i], average='micro'))
#print the best f1-micro with k_value
print('')
print('cnum: ' + str(n_clusters))
for demo_i in demo_list:
print(demo_i + ' / Best testing score: ' + str(max(demo_pred_score[demo_i])) + ' / k : ' + str(demo_pred_score[demo_i].index(max(demo_pred_score[demo_i]))+1))
'''
'''
clf: knn
the best score:
age / Best testing score: 0.435 / k : 19 / data_v5 / cnum = 3
gender / Best testing score: 0.618 / k : 14 / data_all / cnum = 5
relationship / Best testing score: 0.482 / k : 2 / data_all / cnum = 3
'''
#plot confusion matrix at best k of microF1
best_k = [19, 14, 2]
best_cnum = [3, 5, 3]
best_data = [data_v5, data_all, data_all]
for c in range(len(best_cnum)):
kms_data = []
user_data = best_data[c]
n_clusters = best_cnum[c]
for km4_i in range(len(user_data)):
kms_data.append(user_data[km4_i])
kms_model = KMeans(n_clusters, random_state=2018).fit(kms_data)
kms_data = {}
kms_demo = {}
kms_user_list = {}
demo_pred = {}
demo_pred_res = {}
demo_pred_score = {'age':[], 'gender':[], 'relationship':[]}
for c_num in range(n_clusters):
kms_data.update({str(c_num):[]})
kms_user_list.update({str(c_num):[]})
kms_demo.update({str(c_num):{'age':[], 'gender':[], 'relationship':[]}})
demo_pred.update({str(c_num):{'age':[], 'gender':[], 'relationship':[]}})
demo_pred_res.update({str(c_num):{'age':[], 'gender':[], 'relationship':[]}})
for o_i in range(len(user_data)):
kms_data[str(kms_model.labels_[o_i])].append(user_data[o_i]) #append user_daily
kms_user_list[str(kms_model.labels_[o_i])].append(o_i)
for demo_i in demo_list:
kms_demo[str(kms_model.labels_[o_i])][demo_i].append(user_demo[o_i][demo_i])
demo_pred = []
demo_label = []
clf = KNeighborsClassifier(n_neighbors = best_k[c])
for c_num in range(n_clusters):
demo_pred.extend(cross_val_predict(clf, kms_data[str(c_num)], kms_demo[str(c_num)][demo_list[c]], cv=5))
demo_label.extend(kms_demo[str(c_num)][demo_list[c]])
cnf_matrix = confusion_matrix(demo_label, demo_pred)
plt.figure()
plt.tight_layout(pad=0.4, w_pad=1.0, h_pad=1.0)
plot_confusion_matrix(cnf_matrix, classes=class_name[demo_list[c]], normalize=True, title=demo_list[c] + ' in k = ' + str(best_k[c]) + ' and cluster number = ' + str(n_clusters))
plt.savefig('kms_knn_' + demo_list[c] + '.eps', format='eps', dpi=1000)
#plt.show()
if __name__ == '__main__':
main()
| true |
925681aecdce742d9d981b56dbbe1acbd5c1e56e | Python | moret/peuler | /run | UTF-8 | 965 | 2.90625 | 3 | [] | no_license | #! /usr/bin/env python
from __future__ import division
import sys
from subprocess import call
import timeit
def sh(cmd):
try:
call(cmd)
except:
pass
def clear():
sh('find . -name "__pycache__" -delete')
sh('find . -name "*.pyc" -delete')
sh('find . -name "*~" -delete')
def main():
clear()
if not (len(sys.argv) == 2 or len(sys.argv) == 3):
print 'usage: run.py n <y>'
print 'where:'
print '\tn is the exercise number'
print '\ty the number of times to measure performance (default 1000)'
sys.exit(-1)
m = 1000
if len(sys.argv) == 3:
m = int(sys.argv[2])
problem = __import__('p' + sys.argv[1]).problem
t = timeit.Timer(stmt=problem.run)
try:
print 'avg time: %.5fms' % (1000 * t.timeit(number=m) / m)
print 'result: ' + str(problem.run())
except:
t.print_exc()
clear()
if __name__ == '__main__':
main()
| true |
c03904fddc708b458edd749711c9b566ea5cd029 | Python | sreeshavenkat/Machine-Learning | /hw7/low_rank/low_rank.py | UTF-8 | 522 | 2.515625 | 3 | [] | no_license | import scipy.io
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
ranks = [i for i in range (1, 101)]
MSE = []
for r in ranks:
print(r)
data = imread("face.jpg")
U, sigma, V = np.linalg.svd(data, full_matrices = False)
for i in range(sigma.shape[0]):
if i >= r:
sigma[i] = 0
img = np.matmul(U, np.multiply(V.T, sigma).T)
dist = np.matrix(np.square(data - img))
MSE.append(dist.sum())
plt.figure()
plt.plot(ranks, MSE)
plt.xlabel("Ranks")
plt.ylabel("MSE")
plt.grid()
plt.show()
| true |
2c74c9f3eb867ca4ab799c5c2f04f9cf1d4c6b32 | Python | JernejHenigman/Machine-Learning-6-Homeworks | /DN1/linear_regression.py | UTF-8 | 2,929 | 3.234375 | 3 | [] | no_license | __author__ = 'Jernej'
import time
import Orange
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
def load_data():
"""Loads the data. Returns one-column matrix X and vector y."""
y = np.loadtxt("alc_elim.dat.txt")[:,1:2]
X = np.loadtxt("alc_elim.dat.txt")[:,0:1]
return y,X
def add_constant_column(X):
"""Adds a row of 1s to X and returns augmented matrix."""
X = np.array(X)
return np.append(X,np.ones((X.shape[0],1)),axis=1)
def gradient(X, y, theta):
"""Return a gradient using an analytically computed function."""
k = analytical(X,y)
return k[0]*theta + k[1]
def grad_approx(J,X, y, theta, eps=1e-1):
"""Returns a gradient of function J using finite difference method."""
return np.array([(J(theta+e,X,y) - J(theta-e,X,y))/(2*eps)
for e in np.identity(len(theta)) * eps])
def gradient_descent(X, y, alpha=0.1, epochs=100000):
"""Return parameters of linear regression by gradient descent."""
theta0 = 0
theta1 = 0
for i in range(epochs):
sum0 = sum((theta0 + theta1*X) - y)[0]
sum1 = sum(((theta0 + theta1*X) - y)*X)[0]
theta0 = theta0 - (alpha*sum0)/len(y)
theta1 = theta1 - (alpha*sum1)/len(y)
return theta0,theta1
def plot_graph(X, y, thetas, filename="tmp.pdf"):
plt.xlabel('Breath alcohol elimination rates (mg per litre per hour)')
plt.ylabel('Blood alcohol elimination rates (g per litre per hour)')
plt.scatter(X[:,0], y)
plt.plot(thetas, gradient(X,y,thetas));
plt.show()
def analytical(X, y):
"""An analytical solution for the linear regression."""
return np.linalg.inv(X.T.dot(X)).dot(X.T.dot(y))
def J(theta,X,y):
"""Return a value of a optimization function for X, y, and theta."""
return sum((y - (theta[0] + theta[1]*X))**2)/len(y)
def dJ(theta,X,y):
return np.array([sum(theta[0] + theta[1]*X - y)/len(y), sum((theta[0] + theta[1]*X - y)*X)/len(y)])[:,0]
y, X = load_data()
X1 = add_constant_column(X)
theta0 = np.array([0,0])
start = time.time()
print "Gradientni spust: "
print gradient_descent(X,y,alpha=0.5,epochs=100000)
end = time.time()
print "Gradientni spust cas: "
print end - start
print "Analiticna resitev: "
start = time.time()
print analytical(X1, y)
end = time.time()
print "Analiticna resitev cas: "
print end - start
print "Optimizacija L-BFGS: "
start = time.time()
print fmin_l_bfgs_b(J,theta0,dJ,args=(X,y))
end = time.time()
print "Optimizacija L-BFGS cas: "
print end - start
thetas = np.arange(0.04, 0.12, 0.00001)
plot_graph(X1,y,thetas)
# load the data
# add the constand column to X
# compare analytical computation of gradient with that by finite differences
# compute theta by gradient descent
# plot the graph h(x) (show points and the regression line)
# compare computed theta with the analytical solution and the one obtained
# by lbfgs
| true |
5711ef324e9e997e3fb83db18ae5ddc5225f4d37 | Python | miguel-mzbi/computer-vision | /P5/picture.py | UTF-8 | 1,798 | 3.0625 | 3 | [] | no_license | import numpy as np
import cv2
from matplotlib import pyplot as plt
def getMask(hsvImage):
lowerRed1 = (0,55,40)
upperRed1 = (20,255,255)
maskRed1 = cv2.inRange(hsvImage, lowerRed1, upperRed1)
lowerRed2 = (160,55,40)
upperRed2 = (180,255,255)
maskRed2 = cv2.inRange(hsvImage, lowerRed2, upperRed2)
maskRed = maskRed1 + maskRed2
return maskRed
def applyMask(mask, hsvImage):
# r, g, b = cv2.split(hsvImage)
# maskedR = r * mask
# maskedG = g * mask
# maskedB = b * mask
# masked = cv2.merge((maskedR, maskedG, maskedB))
# return masked
return cv2.bitwise_and(hsvImage, hsvImage, mask=mask)
def processFrame(image):
hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = getMask(hsvImage)
segmentedImageHSV = applyMask(mask, hsvImage)
segmentedImage = cv2.cvtColor(segmentedImageHSV, cv2.COLOR_HSV2RGB)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(segmentedImage, cv2.MORPH_OPEN, kernel, iterations=2)
closing = cv2.morphologyEx(segmentedImage, cv2.MORPH_CLOSE, kernel, iterations=2)
gradient = cv2.morphologyEx(segmentedImage, cv2.MORPH_GRADIENT, kernel, iterations=1)
plt.title('Segmented')
plt.subplot(2,2,3)
plt.imshow(segmentedImage)
plt.xticks([]),plt.yticks([])
plt.subplot(2,2,1)
plt.title('Opening (Erosion->Dilation)')
plt.imshow(opening)
plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2)
plt.title('Closing (Dilation->Erosion)')
plt.imshow(closing)
plt.xticks([]),plt.yticks([])
plt.subplot(2,2,4)
plt.title('Original')
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.xticks([]),plt.yticks([])
if __name__ == "__main__":
img = cv2.imread('shadows.jpg', 1)
processFrame(img)
plt.show() | true |
0817de1991218f4d7dd8b94924436416c1764635 | Python | eacevedof/prj_python37 | /platziventas/pv_pruebas/decorators.py | UTF-8 | 928 | 3.921875 | 4 | [] | no_license | PASSWORD = "agua"
def password_required(func):
def envoltorio():
password = input("Cual es tu contrasena? ")
if password == PASSWORD:
#se le pasas needs_password. Imprime la contraseña es correcta
return func()
else:
print("La contraseña no es correcta.")
# devuelve envoltorio()
return envoltorio
#def password_required
@password_required
def needs_password():
print("La contraseña es correcta")
#def needs_password()
def upper(func):
def envoltorio(*args, **kwargs):
result = func(*args, **kwargs)
#print(result)
return result.upper()
return envoltorio
@upper
def say_my_name(name):
#print("Hola, {}".format(name))
return "Hola, {}".format(name)
if __name__ == "__main__":
#solo se ejecutara needs_password si su decoradora la ejecuta
# needs_password()
print(say_my_name("David")) | true |
7eaada350c88c1848a64225f5d02494c203196f0 | Python | adishavit/cvxpy | /cvxpy/atoms/elementwise/elementwise.py | UTF-8 | 2,391 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
from typing import Tuple
import numpy as np
import scipy.sparse as sp
import cvxpy.lin_ops.lin_utils as lu
import cvxpy.utilities as u
from cvxpy.atoms.atom import Atom
class Elementwise(Atom):
""" Abstract base class for elementwise atoms. """
__metaclass__ = abc.ABCMeta
def shape_from_args(self) -> Tuple[int, ...]:
"""Shape is the same as the sum of the arguments.
"""
return u.shape.sum_shapes([arg.shape for arg in self.args])
def validate_arguments(self) -> None:
"""
Verify that all the shapes are the same
or can be promoted.
"""
u.shape.sum_shapes([arg.shape for arg in self.args])
super(Elementwise, self).validate_arguments()
def is_symmetric(self) -> bool:
"""Is the expression symmetric?
"""
symm_args = all(arg.is_symmetric() for arg in self.args)
return self.shape[0] == self.shape[1] and symm_args
@staticmethod
def elemwise_grad_to_diag(value, rows, cols):
"""Converts elementwise gradient into a diagonal matrix for Atom._grad()
Args:
value: A scalar or NumPy matrix.
Returns:
A SciPy CSC sparse matrix.
"""
if not np.isscalar(value):
value = value.ravel(order='F')
return sp.dia_matrix((value, [0]), shape=(rows, cols)).tocsc()
@staticmethod
def _promote(arg, shape: Tuple[int, ...]):
"""Promotes the lin op if necessary.
Parameters
----------
arg : LinOp
LinOp to promote.
shape : tuple
The shape desired.
Returns
-------
tuple
Promoted LinOp.
"""
if arg.shape != shape:
return lu.promote(arg, shape)
else:
return arg
| true |
30098b9b5431d6aac57eec15ae1ed9eba0cedff3 | Python | mapooon/SLC | /slc/modeling/classification/SLCsvm.py | UTF-8 | 2,283 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
import os
sys.path.append(os.getcwd()+"/modeling/common")
from Model import Classification
from sklearn import svm
import pickle
class SLCsvm(Classification):
"""
サポートベクターマシン(分類)クラスです。
"""
def __init__(self):
super().__init__()
def make_parser(self):
"""
parse_argsによって内部的に呼ばれる関数です。
共通オプションを追加するsuper().make_parser()を実行した後、固有オプションを追加したパーサーを返します。
"""
#共通部分
parser=super().make_parser()
# 固有部分
parser.add_argument("-c", "--C", dest="C",help="setting penalty parameter C of the error term", default=1.0, type=float)
parser.add_argument(
"-k", "--kernel", dest="kernel", default="rbf", type=str,
help="setting kernel function", choices=['linear', 'poly', 'rbf', 'sigmoid', 'precomputed']
)
parser.add_argument("-g", "--gamma", dest="gamma", default=-1, type=float, # choiceを明確にする!
help="setting kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’. If gamma is ‘auto’ then 1/n_features will be used instead.")
return parser
def set_parsed_args_unique(self,parsed):
"""
固有のオプションを属性に追加する関数です。
:param parsed: コマンドライン引数をパースしたもの
"""
self.kernel = parsed.kernel
self.C = parsed.C
self.gamma = "auto" if parsed.gamma==-1 else parsed.gamma
def main(self,args):
"""
メイン関数です。
・コマンドライン引数の処理
・モデルの初期化
・クロスバリデーション
・モデルの学習
を行います。
"""
#コマンドライン引数をパース
parsed=self.parse_args(args)
#共通オプション
self.set_parsed_args_common(parsed)
# 固有オプション
self.set_parsed_args_unique(parsed)
# モデル生成
self.model=svm.SVC(C=self.C, kernel=self.kernel, gamma=self.gamma, probability=True)
# cv
self.cross_validation(self.x_train,self.y_train,self.n_splits,self.scoring)
# モデルの学習
self.model=self.model.fit(self.x_train,self.y_train)
return
if __name__=="__main__":
slcsvm=SLCsvm()
slcsvm.main(sys.argv[1:])
slcsvm.write()
| true |
182e29040aa2dc8a583eaa49e199ad50e1457200 | Python | bellerophons-pegasus/ssciwr_sdc_team14 | /src/team14-software/statistics14.py | UTF-8 | 2,387 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*- #
# statistics14.py
# docstring with sphinx and napoleon
"""
Module for statistic analyses.
Does correlation and euclidean distance.
"""
# import pandas
import numpy as np
def correlatedata(data, corrmethod='pearson', dropcols=[]):
"""Compute pairwise correlation of data with \
pandas.DataFrame.corr.
Args:
data (pandas dataframe): Data to be correlated.
corrmethod (str): Method to be used for correlation \
(pandas: pearson, kendall, spearman), \
defaults to 'pearson'
dropcols (list): A list with labels of columns to be dropped\
from the dataframe, defaults to []
"""
# correlate data
data = data.corr(method=corrmethod)
# drop given columns
for i in dropcols:
print('Removing column: {}'.format(i))
datadrop = data.drop([i], axis=1)
data = datadrop
# now collect superfluous cells to be removed
drop_values = set() # an unordered collection of items
cols = data.columns # get the column labels
for i in range(0, data.shape[1]):
# get rid of all diagonal entries and the lower triangle
for j in range(0, i + 1):
drop_values.add((cols[i], cols[j]))
print('Values to drop')
print(drop_values)
# pivot the correlation matrix
data = data.corr(method=corrmethod).unstack()
# sort by absolute values but keep sign
data = data.drop(labels=drop_values).sort_values(ascending=False,
key=lambda col: col.abs())
return data
# used from team0
def euclidean_distance(list_ref, list_comp, vectors):
"""Calculate the Euclidean distance (L2 norm) between pairs of\
vectors.
Args:
list_ref (integer list): A list with the indices of the\
reference vectors.
list_comp (integer list): A list with the indices of the\
vectors to compare to.
data (numpy array): The data object.
Returns:
numpy array: The Euclidean distance (L2 norm) for comparison\
vs. reference vectors.
"""
distances = np.zeros(len(list_ref))
for i in range(len(list_ref)):
distances[i] = np.linalg.norm(
vectors[list_comp[i]] - vectors[list_ref[i]])
return distances
| true |
524790ca4befee15264eb8bb86ef23d407ab41dc | Python | jedzej/tietopythontraining-basic | /students/adam_wulw/lesson_05_lists/comma_code.py | UTF-8 | 244 | 3.046875 | 3 | [] | no_license | spam = ['apples', 'bananas', 'tofu', 'cats']
def coma_code(_list):
_str = ''
for item in _list:
_str = _str + ' ' + str(item)
if item != _list[-1]:
_str = _str + ','
return _str
print coma_code(spam)
| true |
dee251e88e2a4774fd31f0b4fa354317a7e172c2 | Python | javicercasi/computacion1 | /58004-Cercasi Javier/clase02/test_pipe.py | UTF-8 | 936 | 3.015625 | 3 | [] | no_license | import unittest
from pipe_fixer import pipe_fix
class TestPipeFixing(unittest.TestCase):
def test_fix_simple_pipe(self):
fixed_pipe = pipe_fix([1, 2, 3, 5, 6, 8, 9])
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8, 9],fixed_pipe)
def test_fix_complex_pipe(self):
fixed_pipe = pipe_fix([1, 2, 3, 12])
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],fixed_pipe)
def test_fix_short_from_6_pipe(self):
fixed_pipe = pipe_fix([6, 9])
self.assertEqual([6, 7, 8, 9],fixed_pipe)
def test_fix_simple_and_negative_pipe(self):
fixed_pipe = pipe_fix([-1, 4])
self.assertEqual([-1, 0, 1, 2, 3, 4],fixed_pipe)
def test_fix_already_fixed_pipe(self):
fixed_pipe = pipe_fix([1, 2, 3])
self.assertEqual([1, 2, 3],fixed_pipe)
if __name__ == '__main__':
unittest.main() | true |
18a5d269c64addc2a4da52da6a14123295495cf2 | Python | lasseleth/prakprog | /num/LinearEquations/main.py | UTF-8 | 1,561 | 2.890625 | 3 | [] | no_license |
import sympy as sy
import numpy as np
from GR import qr_gs_decomp, qr_gs_solve, qr_gs_inverse
n = int(5) # Number of lines
m = int(4) # Number of columns
A = np.random.rand(n, m)
print('\nQR Decomposition\n')
print('Random 5x4 matrix A:\n')
print(A)
# Decomposition QR
(Q, R) = qr_gs_decomp(A)
print('\nMatrix Q 5x4:\n')
print(Q)
print('\nMatrix upper triangular R 4x4:\n')
print(R)
# Check Qt*Q = 1 and Q*R = A
print('\nCheck that Qt*Q = I:\n')
QtQ = np.matmul(Q.T, Q)
print(QtQ)
print('\nCheck that Q*R = A:\n')
QR = np.matmul(Q, R)
print(QR)
# Linear equation solve
print('\nSolving linear equation system\n')
ns = int(4)
x = np.empty(ns)
b = np.random.rand(ns)
AA = np.random.rand(ns, ns)
RR = np.empty((ns, ns))
print('Random matrix A:\n')
print(AA)
# Decomposition QR
(Qb, Rb) = qr_gs_decomp(AA)
# System solving
x_sol = qr_gs_solve(Qb, Rb, b)
print('\nRandom vector b:\n')
print(b)
print('\nSolution (x) of the system of linear equations A*x = Q*Rx = b:\n')
print(x_sol)
# Check that we get b back after A*x
print('\nCheck that A*x = b:\n')
b2 = np.empty(ns)
A2 = np.matmul(Qb, Rb)
b2 = np.matmul(A2, x_sol)
print(b2)
#print(np.linalg.solve(A2, b))
# Inverse calculation
print('\nInverse calculation:\n')
# We will use the same A matrix from the equation system
B = np.empty((ns, ns))
B_sol = qr_gs_inverse(AA)
print('Inverse matrix of A (from the linear equation system), B:\n')
print(B_sol)
print('\nChecking that A*B = I:\n')
I1 = np.matmul(AA, B_sol)
print(I1)
print('\nChecking that B*A = I:\n')
I2 = np.matmul(B_sol, AA)
print(I2) | true |
056f03a85d56d640467011d2b3c91005867ebf4b | Python | ipavel83/Python | /031TypeCheck.py | UTF-8 | 629 | 3.796875 | 4 | [
"MIT"
] | permissive | #py3.7
import types #from types import MethodType, FunctionType
#i = 2
#type(i) is int #not recommended
#isinstance(i, int)
class SomeClass:
def fun():
pass
print('what type is SomeClass.fun?', type(SomeClass.fun)) #<class 'function'>
if isinstance( SomeClass.fun, types.MethodType): #False
print('fun is types.MethodType')
if isinstance( SomeClass.fun, types.FunctionType): #True
print('fun is types.FunctionType')
print()
def justFun():
pass
print('what type is justFun?', type(justFun))
if isinstance( justFun, types.FunctionType): #True
print('justFun is types.FunctionType') | true |
6663f11c10550ea5832ba706a1f101213490d56e | Python | O-oBigFace/HK-VQA | /misc/ques_layer.py | UTF-8 | 1,370 | 2.5625 | 3 | [] | no_license | """
author: W J-H (jiangh_wu@163.com)
time: Mar 8, 2020 at 11:42:18 PM
-----------------------------------
句子级别特征
"""
import torch.nn as nn
import torch
from misc.helper import fact_extract
class QuesLayer(nn.Module):
def __init__(self, hidden_size, rnn, img_attn, word_attn, mlp, fact_attn, pooling, dropout):
super(QuesLayer, self).__init__()
self.hidden_size = hidden_size
self.rnn = rnn
self.img_attn = img_attn
self.word_attn = word_attn
self.mlp = mlp
self.fact_attn = fact_attn
self.pooling = pooling
self.dropout = dropout
def forward(self, word_embeddings, word_masks, img_embeddings, fact_embeddings, fact_masks, bank_w, feat_p, bank_p):
self.rnn.flatten_parameters()
ques_feat, _ = self.rnn(word_embeddings)
img_att_q = self.img_attn(img_embeddings, ques_feat, ques_feat)
ques_att_q = self.word_attn(ques_feat, img_att_q, img_att_q)
img_att_q, ques_att_q = self.pooling(img_att_q), self.pooling(ques_att_q)
feat_q = nn.Dropout(self.dropout)(torch.cat((img_att_q + ques_att_q, feat_p), dim=1))
feat_q = nn.Tanh()(self.mlp(feat_q))
bank_q = fact_extract(fact_embeddings, fact_masks, [img_att_q, ques_att_q, feat_q, bank_w, bank_p], self.fact_attn, self.pooling)
return feat_q, bank_q
| true |
5d4aa4fbbfdb2f3e0a5ca7c51aef92d2c77d4554 | Python | schneebergerlab/toolbox | /Support/Misc/tinytools | UTF-8 | 9,687 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 15:36:01 2017
@author: goel
"""
import argparse
import os
import sys
def unlist(nestedList):
"""Take a nested-list as input and return a 1d list of all elements in it"""
outList = []
for i in nestedList:
if type(i) in (list, np.ndarray):
outList.extend(unlist(i))
else:
outList.append(i)
return(outList)
def getValues(l, index):
"""from list l get the values at indices specified by index"""
return [l[i] for i in index]
def getColors(colorPalette, numOfCol):
return([colorPalette(i/numOfCol) for i in range(numOfCol)])
def plotDensity(data):
density = gaussian_kde(data)
xs = np.linspace(min(data), max(data),1000)
density.covariance_factor = lambda : .2
density._compute_covariance()
plt.plot(xs,density(xs))
plt.show()
def subList(lst1, lst2):
return(list(map(op.sub,lst1, lst2)))
def intersect(*lists):
return reduce(np.intersect1d,list(lists))
def extractSeq(args):
import pandas as pd
filePath = args.fasta.name
if args.fin == None:
seqID = args.chr
start = args.start
end = args.end
querySeq = [fasta for fasta in SeqIO.parse(filePath,'fasta') if fasta.id == seqID][0]
querySeq.seq = querySeq.seq[start:end+1]
SeqIO.write(querySeq,args.o.name,"fasta")
else:
fin = pd.read_table(args.fin.name, header = None)
fin.columns = ["chr","start","end"]
fin.sort_values(["chr","start","end"], inplace = True)
outF = deque()
for fasta in SeqIO.parse(filePath,'fasta'):
if fasta.id in fin.chr.values:
chrData = fin.loc[fin.chr == fasta.id]
for row in chrData.itertuples(index=False):
outF.append(SeqRecord(seq=fasta.seq[row.start:row.end], id="_".join(map(str,row)), description=""))
SeqIO.write(outF,args.o.name,"fasta")
def subnuc(args):
fasta = args.fasta.name
querySeq = [fasta for fasta in SeqIO.parse(fasta,'fasta')]
for i in range(len(querySeq)):
querySeq[i].seq = Seq(str(querySeq[i].seq).replace(args.q, args.t))
# print(querySeq)
if args.o == None:
fout = fasta+"_edited"
else:
fout = args.o
with open(fout,"w") as f:
spacer = ""
for seq in querySeq:
f.write(spacer)
f.write(">"+seq.id+" "+seq.description+"\n")
f.write('\n'.join(str(seq.seq)[i:i+60] for i in range(0, len(seq.seq), 60)))
if spacer == "":
spacer = "\n"
# SeqIO.write(querySeq,,"fasta")
# else:
# SeqIO.write(querySeq,args.o.name,"fasta")
def fileRemove(fName):
try:
os.remove(fName)
except OSError as e:
if e.errno != 2: ## 2 is the error number when no such file or directory is present https://docs.python.org/2/library/errno.html
raise
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
def getScaf(args):
fin = args.fasta.name
n = args.n
fout = args.o
gen = {fasta.id:fasta for fasta in parse(fin,'fasta')}
chromLen = {chrom.id:len(chrom.seq) for chrom in gen.values()}
chrID = [np.random.choice(list(chromLen.keys()), 1)[0] for i in range(n)]
cutoff = [np.random.randint(10000,chromLen[i] - 10000, 1)[0] for i in chrID]
coords = sorted(list(zip(chrID, cutoff)))
cid = ""
pos = -1
scafGenome = []
for i in range(len(coords)):
coord = coords[i]
if coord[0] != cid:
if cid in gen:
scafGenome.append(SeqRecord(seq=gen[cid][pos:chromLen[cid]].seq, id = cid+"_"+str(chromLen[cid]), description=""))
cid = coord[0]
pos = 0
scafGenome.append(SeqRecord(seq=gen[cid][pos:coord[1]].seq, id = cid+"_"+str(coord[1]), description=""))
pos = coord[1]
scafGenome.append(SeqRecord(seq=gen[cid][pos:chromLen[cid]].seq, id = cid+"_"+str(chromLen[cid]), description=""))
write(scafGenome,fout,"fasta")
def seqsize(args):
fin = args.fasta.name
[print(fasta.id, len(fasta.seq), sep="\t") for fasta in parse(fin,'fasta')]
def filsize(args):
"""
Remove molecules which are smaller than the specified threshold
"""
fin = args.fasta.name
size= args.size
gen = [fasta for fasta in parse(fin,'fasta') if len(fasta.seq) > size]
write(gen, fin.split(".fna")[0].split("/")[-1]+".filtered.fna", "fasta")
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers()
parser_exseq = subparsers.add_parser("exseq", help="extract sequence from fasta")
parser_getscaf = subparsers.add_parser("getscaf", help="generate scaffolds from a given genome")
parser_seqsize = subparsers.add_parser("seqsize", help="get size of dna sequences in a fasta file")
parser_filsize = subparsers.add_parser("filsize", help="filter out smaller molecules")
parser_subnuc = subparsers.add_parser("subnuc", help="Change character (in all sequences) in the fasta file")
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit()
parser_exseq.set_defaults(func=extractSeq)
parser_exseq.add_argument("fasta", help="fasta file", type=argparse.FileType('r'))
group = parser_exseq.add_mutually_exclusive_group(required = True)
group.add_argument("--loc", help="Location to extract: chr start end",nargs = 3)
# parser_exseq.add_argument()
# parser_exseq.add_argument("start", help = "start", type=int, default = 0)
# parser_exseq.add_argument("end",help="end",type = int, default=-1)
group.add_argument("--fin", help="File containing locations to extract", type=argparse.FileType('r'))
parser_exseq.add_argument("-o", help="Output file name", type=argparse.FileType('w'), default="out.fasta")
parser_getscaf.set_defaults(func=getScaf)
parser_getscaf.add_argument("fasta", help="genome fasta file", type=argparse.FileType('r'))
parser_getscaf.add_argument("n", help="number of scaffolds required", type=int)
parser_getscaf.add_argument("-o",help="output file name", default="scaf.fasta")
parser_seqsize.set_defaults(func=seqsize)
parser_seqsize.add_argument("fasta", help="genome fasta file", type=argparse.FileType('r'))
parser_filsize.set_defaults(func=filsize)
parser_filsize.add_argument("fasta", help="genome fasta file", type=argparse.FileType('r'))
parser_filsize.add_argument("size", help="molecule cut-off in bp, all smaller molecules will be filtered out", type=int,default=0)
parser_subnuc.set_defaults(func=subnuc)
parser_subnuc.add_argument("fasta", help = "genome fasta file", type=argparse.FileType('r'))
parser_subnuc.add_argument("q",help="character to change", default="", type=str)
parser_subnuc.add_argument("t",help="character to change to", default="", type = str)
parser_subnuc.add_argument("-o",help="output file name", type=argparse.FileType('w'))
args = parser.parse_args()
# print(args)
try:
from reprlib import repr
except ImportError:
pass
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import operator as op
from functools import reduce
from Bio import SeqIO
from itertools import cycle
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
from Bio.SeqIO import parse, write
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
# print(args)
args.func(args)
else:
try:
from reprlib import repr
except ImportError:
pass
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import operator as op
from functools import reduce
from Bio import SeqIO
from itertools import cycle
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
from Bio.SeqIO import parse, write
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
| true |
8e615161bfa114c34a58f7c4cdde595502b0b10c | Python | mrojas2005/TP1-TDA | /PycharmProjects/TP1/digrafo.py | UTF-8 | 3,360 | 3.625 | 4 | [] | no_license |
class Digrafo:
"""Grafo dirigido con un número fijo de vértices.
Los vértices son siempre números enteros no negativos. El primer vértice es 0.
El grafo se crea vacío, se añaden las aristas con agregarArista(). Una vez
creadas, las aristas no se pueden eliminar, pero siempre se pueden añadir
nuevas aristas."""
def __init__(self, v):
self.vertices = v
self.aristas = 0
self.adyacentes = dict()
self.numeroDeCFC = 0
self.componentesConexas = []
for i in range(0, v, 1):
self.adyacentes[i] = []
def __str__(self):
return str(self.adyacentes)
def obtenerNumeroDeVertices(self):
return self.vertices
def obtenerNumeroDeAristas(self):
return self.aristas
def agregarArista(self, verticeOrigen, verticeDestino):
self.adyacentes[verticeOrigen].append(verticeDestino)
self.aristas += 1
def adyacentesAlVertice(self, v):
return self.adyacentes[v]
def transponer(self):
transpuestaDelGrafo = Digrafo(self.vertices)
for i in self.adyacentes:
for j in self.adyacentes[i]:
transpuestaDelGrafo.agregarArista(j,i)
return transpuestaDelGrafo
#Imprime cada vertice con sus aristas
def obtenerGrafo(self):
s = "Vertice -> Adyacentes\n"
for i in self.adyacentes:
s += str(i) + " -> "
for j in self.adyacentes[i]:
s += str(j) + " "
s += "\n"
return s
def dfsGrafoTranspuesto(self, v, visitado):
# Marca el nodo actual como visitado y lo imprime
visitado[v] = True
self.componentesConexas.append(v)
# Se repite para todos los vértices adyacentes al actual
for i in self.adyacentes[v]:
if visitado[i] == False:
self.dfsGrafoTranspuesto(i, visitado)
def dfsParaCalcularTf(self, v, visitado, pila):
# Marca el nodo actual como visitado
visitado[v] = True
# Se repite para todos los vértices adyacentes al actual
for i in self.adyacentes[v]:
if visitado[i] == False:
self.dfsParaCalcularTf(i, visitado, pila)
pila.append(v)
# Esta es la funcion principal que encuentra e imprime todas las CFC
def imprimirCFC(self):
pila = []
# Marco todos los vertices como no visitados (DFS(G))
visitado = [False] * (self.vertices)
# Relleno la pila con los vertices de acuerdo con su tiempo de finalizacion
for i in range(self.vertices):
if visitado[i] == False:
self.dfsParaCalcularTf(i, visitado, pila)
# Calcula la transpuesta del Grafo
grafoTranspuesto = self.transponer()
# Marco todos los vertices como no visitados (DFS(Gt))
visitado = [False] * (self.vertices)
# Procesa todos los vértices en el orden definido por la pila
while pila:
i = pila.pop()
if visitado[i] == False:
grafoTranspuesto.dfsGrafoTranspuesto(i, visitado)
self.numeroDeCFC += 1
print(grafoTranspuesto.componentesConexas)
grafoTranspuesto.componentesConexas.clear()
def obtenerNumeroDeCFC(self):
return self.numeroDeCFC | true |
e3f4bf2ac8fdab12102b6b1e6696fad51c013427 | Python | FrancoIII/Pavages | /Anciens programmes/substitutions_dim2.py | UTF-8 | 1,719 | 3.015625 | 3 | [] | no_license | # francois oder le 22 juin 2017
import math
def fibo(n):
phi = (1 + math.sqrt(5))/2
phi_ = (1 - math.sqrt(5))/2
return (1/(math.sqrt(5)))*(phi**n - phi_**n)
def iterer(L, n):
m = int(fibo(n+3))
q = int(fibo(n+2))
d = m - q
M = [[0 for i in range(m)] for i in range(m)]
d_y = 0
d_y_cond = False
for u in range(q):
i = q-u-1
d_x = 0
for j in range(q):
if L[i][j] == 1:
M[(i - d_y + d)%m][(j + d_x)%m] = 1
M[(i - d_y - 1 + d) % m][(j + d_x)%m] = 2
M[(i - d_y + d) % m][(j + d_x + 1)%m] = 3
M[(i - d_y - 1 + d) % m][(j + d_x + 1)%m] = 4
d_y_cond = True
d_x += 1
elif L[i][j] == 2:
M[(i - d_y + d) % m][(j + d_x) % m] = 1
M[(i - d_y + d) % m][(j + d_x + 1) % m] =3
d_y_cond = False
d_x += 1
elif L[i][j] == 3:
M[(i - d_y + d) % m][(j + d_x) % m] = 1
M[(i - d_y - 1 + d) % m][(j + d_x) % m] = 2
else:
M[(i - d_y + d)%m][(j + d_x)%m] = 1
if d_y_cond :
d_y += 1
return M
def affiche(M):
for L in M:
print(L)
def est_dans(P, G):
for L in G:
if P == L:
return True
return False
def complexité(M, n):
K = []
p = 0
for i in range(len(M)-n):
for j in range(len(M)-n):
L = []
for x in range(n):
L += [M[i+x][j:j+n]]
if not est_dans(L, K):
p += 1
K += [L]
return p
M = [[1]]
for n in range(4):
M = iterer(M, n)
affiche(M)
| true |
11033913cdb030ec16a5ba2a9fcaaae60931efb6 | Python | GabrielAranhaMello2007/Login_Cadastro | /Login_Cadastro0.py | UTF-8 | 4,397 | 3.1875 | 3 | [] | no_license | # Login_Cadastro
# Um programa em que é possível fazer Login(Usa outro arquivo como banco de dados) e Cadastro
# Para fazer a instalação do "PySimpleGui"
# Copie "pip install PySimpleGUI" e logo em seguida cole isso no terminal do Python
import PySimpleGUI as sg
from Banco_de_dados import *
import PySimpleGUI as Sg
from Banco_de_dados import *
import time
def janela_login():
Sg.theme('DarkBlack')
layout = [
[Sg.Text('Login', size=(30, 1))],
[Sg.Input(key='login', size=(25, 1))],
[Sg.Text('', text_color='red', size=(25, 1), key='text1')],
[Sg.Text('Senha', size=(30, 1))],
[Sg.Input(size=(25, 1), key='password', password_char='*')],
[Sg.Text('', text_color='red', key='text2', size=(30, 1))],
[Sg.Text(size=(29, 10))],
[Sg.Button('Entrar')],
[Sg.Button('Cadastro')],
[Sg.Text(size=(0, 1))]
]
return Sg.Window('Janela Login', finalize=True, layout=layout)
def janela_login_correct():
Sg.theme('DarkBlack')
layout = [
[Sg.Text(size=(32, 1))],
[Sg.Text(size=(5, 0)), Sg.Text('Acesso concedido\n'
'A janela será encerrada em ...5 segundos', text_color='green', key='jlc')],
[Sg.Text()]
]
return Sg.Window('Acesso concedido', finalize=True, layout=layout)
def janela_cadastro_correct():
Sg.theme('DarkBlack')
layout = [
[Sg.Text(size=(32, 1))],
[Sg.Text(size=(5, 1)), Sg.Text('Conta criada com sucesso', text_color='green', key='jcc')],
[Sg.Text()]
]
return Sg.Window('Acesso concedido', finalize=True, layout=layout)
def janela_cadastro():
Sg.theme('DarkBlack')
layout = [
[Sg.Text('Novo login', size=(30, 1))],
[Sg.Input('', key='login_cadastro', size=(25, 1))],
[Sg.Text('', key='text3', text_color='red', size=(25, 1))],
[Sg.Text('Crie sua senha', size=(30, 1))],
[Sg.Input(key='password_cadastro_1', size=(25, 1), password_char='*')],
[Sg.Text('Confirme sua senha', size=(30, 1))],
[Sg.Input(key='password_cadastro_2', size=(25, 1), password_char='*')],
[Sg.Text('', key='text4', text_color='red', size=(30, 1))],
[Sg.Text(size=(29, 10))],
[Sg.Button('Cadastrar')],
[Sg.Text(size=(0, 1))]
]
return Sg.Window('Janela Cadastro', finalize=True, layout=layout)
janela1, janela2, janela3, janela4 = janela_login(), None, None, None
def verifica_login():
win = False
if window == janela1:
try:
users[value['login']]
except KeyError:
window['text1'].update('Usuário não encontrado')
else:
window['text1'].update('')
if users[value['login']] != value['password']:
window['text2'].update('Senha incorreta')
else:
window['text2'].update('')
if event == 'Entrar':
win = True
return win
def verifica_cadastro():
win = False
if window == janela2:
if value['login_cadastro'] in users:
window['text3'].update('Login já em uso')
else:
window['text3'].update('')
if value['password_cadastro_1'] != value['password_cadastro_2']:
window['text4'].update('As senhas não coincidem')
else:
window['text3'].update('')
if event == 'Cadastrar':
red = open('Banco_de_dados.py', 'a')
red.write(f'login = "{value["login_cadastro"]}"\n'
f'senha = "{value["password_cadastro_1"]}"\n'
'users.update({login: senha})\n')
win = True
return win
while True:
window, event, value = Sg.read_all_windows()
if event == Sg.WINDOW_CLOSED:
break
if window == janela1:
verifica_login()
if event == 'Cadastro':
janela1.hide()
janela2 = janela_cadastro()
if window == janela2:
verifica_cadastro()
if verifica_cadastro() is True:
janela2.hide()
janela4 = janela_cadastro_correct()
time.sleep(5)
break
if verifica_login() is True:
janela1.hide()
janela3 = janela_login_correct()
time.sleep(5)
break
| true |
a8918620b5eba6b0b33a9854054622615a1266fd | Python | EvgenySenkevich/DynamicList | /test_module.py | UTF-8 | 1,505 | 3.25 | 3 | [] | no_license | import unittest
import main
class TestList(unittest.TestCase):
def test_append(self):
dy = main.DynArray()
self.assertEqual(dy.capasity, 16)
for i in range(16):
dy.append(i)
self.assertEqual(dy[i], i)
def test_append2(self):
dy = main.DynArray()
self.assertEqual(dy.capasity, 16)
for i in range(100):
dy.append(i)
self.assertEqual(dy[i], i)
self.assertGreater(dy.capasity, 100)
def test_insert(self):
dy = main.DynArray()
dy.append(1)
dy.append(1)
dy.append(1)
dy.insert(10, 3)
for i in range(len(dy)):
self.assertNotEqual(dy[i], 3)
self.assertEqual(dy[i], 1)
def test_delete(self):
dy = main.DynArray()
dy.append(1)
dy.append(1)
dy.append(1)
dy.append(1)
self.assertEqual(dy.capasity, 16)
dy.delete(2)
self.assertEqual(dy.count, 3)
self.assertEqual(dy.capasity, 16)
def test_delete2(self):
dy = main.DynArray()
for i in range(100):
dy.append(i)
self.assertGreater(dy.capasity, 100)
for i in range(50):
dy.delete(i)
self.assertLess(dy.capasity, 70)
def test_delete3(self):
dy = main.DynArray()
dy.append(1)
dy.append(1)
dy.append(1)
self.assertEqual(dy.count, 3)
dy.delete(10)
self.assertEqual(dy.count, 3) | true |
f578ef3b88b45338a5d092fce12d10390f98e86c | Python | o-kei/design-computing-aij | /ch5/facility.py | UTF-8 | 1,028 | 3.03125 | 3 | [
"MIT"
] | permissive | import numpy as np # モジュールnumpyをnpという名前で読み込み
import csv # モジュールcsvの読み込み
from scipy import optimize # scipy内のoptimizeモジュールを読み込み
filename = 'out2' # 出力ファイル名
writer = csv.writer(open(filename + '.csv', 'w', newline='')) # 出力するcsvファイルの生成
writer.writerow(['step', 'f(x)', 'x1', 'x2']) # csvファイルへのラベルの書き込み
def f(x): # 目的関数の定義
return ((2 - x[0])**2 + (4 - x[1])**2)**0.5 + ((3 - x[0])**2 + (2 - x[1])**2)**0.5
def g(x): # 制約条件の定義(>0)
return np.array([-2 * x[0] - 3 * x[1] + 7, x[0], -x[0] + 2, x[1], -x[1] + 2])
def callbackF(x): # 最適化の各ステップで計算結果を記録する関数
global step
step += 1
writer.writerow([step, f(x), x[0], x[1]])
x = np.array([0.0, 0.0])
step = 0
writer.writerow([step, f(x), x[0], x[1]])
optimize.fmin_slsqp(f, x, f_ieqcons=g, iprint=2, callback=callbackF) # 逐次二次計画法
| true |
cb446e07e8738201a91f01a04a0adba7c72cb9ab | Python | UchinoMENG/PersonalLearn | /PAT/python版/1069.py | UTF-8 | 555 | 3.046875 | 3 | [] | no_license | num = input().split()
for i in range(len(num)):
num[i] = int(num[i])
result = []
sign = 0
hh=0
for i in range(num[0]):
name = input()
if i+1==num[2] and sign==0:
sign=1
result.append(name)
continue
elif sign==1:
hh+=1
if(hh==num[1]):
if name not in result:
result.append(name)
hh=0
else:
hh-=1
continue
if sign==0:
print("Keep going...")
else:
for i in result:
print(i)
| true |
a65da99e81cc5f28f1d70bd5f97364ad07f0172a | Python | Fowres-Co/personal-movie-dashboard | /app.py | UTF-8 | 2,461 | 2.609375 | 3 | [] | no_license | import movieSpider as spidy
import IMDB_scraper
#--- testing custom logger
from xlogger import Logger
xLogger = Logger(__name__, 'info')
logger = xLogger.log #getting logging object
#---
BASEPATH = 'C:\\movietest\\'
MEDIAEXTS = ['.mp4','.mkv','.avi']
METAFILE = 'metadata.vif'
scrapy = IMDB_scraper.IMDBscraper()
logger.info("Initialized scraper")
#metadata format - {title1: {'name':'', other things from details..,'filedata':{file info here}}, title2: ..}
metaData = {}
loadedBaseNames = []
updateMetaFile = False
totalCount, fetchCount = 0, 0
#----------Initializing App------------
#load pickled file here
if metaData:
logger.info("Metadata already loaded.")
else:
metaData = spidy.loadMetaData(BASEPATH+METAFILE)
loadedBaseNames = [val['filedata']['base'] for val in list(metaData.values())]
logger.info('Loading metadata finished' + str(metaData))
#walk current directory
mediaFiles = spidy.getMediaFiles(BASEPATH, MEDIAEXTS)
logger.info('Walk completed')
#check what all is new
for file in mediaFiles:
if file['base'] not in loadedBaseNames:
totalCount += 1
updateMetaFile = True
#cleaning base name for search
cleanedName, year = spidy.nameCleaner(file['base'])
logger.info('Found new - ' + str(cleanedName))
#trying to fetch details
try:
logger.debug('Fetching: '+str(cleanedName)+' year:'+str(year))
title, details = scrapy.movieDetails(cleanedName + ' ' + year)
logger.info('Fetched details')
fetchCount += 1
#adding fetched details
metaData[title] = {'filedata': file}
for key in details.keys():
metaData[title][key] = details[key]
logger.info('Added to metadata')
except Exception as e:
logger.exception('Error while fetching' + file['base'])
#saving if changes were made
if updateMetaFile:
logger.info('Fetch Accuracy: ' + str(fetchCount/totalCount))
spidy.saveMetaData(BASEPATH+METAFILE, metaData)
logger.info('Saving Finished')
updateMetaFile = False
else:
logger.info('No new file')
#testing
# if len(metaData.keys()) > 0:
# logger.debug(str(metaData[list(metaData.keys())[0]]))
#----------Utility functions here------
#sorting
#grouping
#show copies
#some gui here if using python based | true |
ef0eac8b2f63951303472113e335c36fcf54b754 | Python | puneet87m/Python-Basics | /Python-Basic(local)/ListOverlap.py | UTF-8 | 404 | 3.59375 | 4 | [] | no_license | import random
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
l=[]
for i in a:
if i in b:
l.append(i)
print("static list match", l)
x=random.sample(range(10),5)
y=random.sample(range(10),5)
ls=[]
for i in x:
if i in y:
ls.append(i)
print("Dynamic list 1",x)
print("Dynamic list 2",y)
print("dynamic list match", ls)
| true |
f1857bbddbbd7011a9289bd202b2a09a0d92430a | Python | xiaoge56/plot_beta | /beta_distribution_plot.py | UTF-8 | 1,858 | 3.109375 | 3 | [] | no_license | # coding=utf-8
from scipy.stats import beta
import matplotlib.pyplot as plt
import numpy as np
import math
class beta_distribution(object):
def __init__(self):
self.x = (np.linspace(0.001, 0.999,1000))
self.colors = "bgrcmykw"
self.colors_index=0
self.hyperparameter=self.choice_hyperparameter()
def prior_beta(self,a,b):
p_x=[]
for theta in self.x :
p_x.append(beta.pdf(theta, a, b))
return p_x
def plot_bete(self,data,color):
plt.axis([-0.1, 1, -0.1,6])
plt.plot(self.x,data,color,label='a=%s,b=%s'%(self.hyperparameter[0],self.hyperparameter[1]),linewidth=2.0)
plt.legend(loc='best')
def show_pic(self):
plt.grid(True)
plt.show()
def plot_single_curve(self,a=1,b=1):
p_x=self.prior_beta(a,b)
self.plot_bete(p_x,self.colors[self.colors_index])
self.show_pic()
def plot_multi_curve(self):
for (a,b) in self.hyperparameter:
p_x=self.prior_beta(a,b)
self.plot_bete(p_x,self.colors[self.colors_index])
self.colors_index+=1
self.show_pic()
return 0
def choice_hyperparameter(self):
return [(5,1),(2,2),(0.5,0.5),(2,2),(2,5)]
#----------------------------------------------------------------
class likelihood(object):
def __init__(self):
self.binomial_theta=[0.5]
self.d=0
self.multi_theta=[0.1,0.2,0.3,0.4]
def binomial(self,):
return
def plot_likelihood_beta(self):
pass
def plot_likelihood_multinomial(self):
pass
def nCr(n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
def main():
prior_beta=beta_distribution()
prior_beta.plot_multi_curve()
prior_beta.plot_single_curve(1.25,8.75)
if __name__=="__main__":
main()
| true |
56222617db3aa420108c971153c061487df0fa08 | Python | it-innoo/data-analysis | /hy-data-analysis-with-python-summer-2019/part01-e06_triple_square/src/triple_square.py | UTF-8 | 419 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env python3
def triple(x):
"multiplies its parameter by three."
return 3*x
def square(x):
"raises its parameter to the power of two"
return x**2
def main():
for i in range(1, 11):
s = square(i)
t = triple(i)
if s > t:
break
print("triple({})=={} square({})=={}"
.format(i, t, i, s))
if __name__ == "__main__":
main()
| true |
d3de506bf5ea07d11999f4ec5b23fcfa714e8e4a | Python | Alfinus/crash_course_in_python | /chapter_5/5-1 conditional_tests.py | UTF-8 | 1,531 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# 5-1 conditional_tests.py
#
# Copyright 2018 Devon <Devon@BETSY>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
car = 'subaru'
print('Is car == "subaru"? I predict True.')
print(car == 'subaru')
print("\n Is car == 'audi'? I predict False!")
print (car == 'audi')
if car != 'audi':
print('\nwhoops, seems we took a wrong direction')
print('\nlets add another variable')
car_1 = 'audi'
car_2 = 'BMW'
car_3 = 'Toyota'
print('\nIs car_1 == "audi"? I predict true!')
print(car_1 == 'audi')
print('\nThere we go')
print('\nLets try another one')
print("\nIs car_2 == 'subaru'? I predict False.")
print(car_2 == 'subaru')
print('\nwhoops there we go again....')
print("\nIs car_2 == 'BMW'? I predict True!")
print(car_2 == 'BMW')
| true |
72f2d7c6c105702c3a9b86a0d4c4baebad13e033 | Python | maet3608/nuts-ml | /nutsml/reader.py | UTF-8 | 12,144 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | """
.. module:: reader
:synopsis: Reading of sample data and images
"""
from __future__ import absolute_import
import os
import pandas as pd
import numpy as np
from glob import glob
from collections import namedtuple
from fnmatch import fnmatch
from nutsml.imageutil import load_image
from nutsml.fileutil import reader_filepath
from nutsflow import NutSource, nut_function, nut_source
from nutsflow.common import as_set
@nut_source
def ReadLabelDirs(basedir, filepattern='*', exclude='_*'):
"""
Read file paths from label directories.
Typically used when classification data is organized in folders,
where the folder name represents the class label and the files in
the folder the data samples (images, documents, ...) for that class.
>>> from __future__ import print_function
>>> from nutsflow import Sort
>>> read = ReadLabelDirs('tests/data/labeldirs', '*.txt')
>>> samples = read >> Sort()
>>> for sample in samples:
... print(sample)
...
('tests/data/labeldirs/0/test0.txt', '0')
('tests/data/labeldirs/1/test1.txt', '1')
('tests/data/labeldirs/1/test11.txt', '1')
:param string basedir: Path to folder that contains label directories.
:param string filepattern: Pattern for filepaths to read from
label directories, e.g. '*.jpg', '*.txt'
:param string exclude: Pattern for label directories to exclude.
Default is '_*' which excludes all label folders prefixed with '_'.
:return: iterator over labeled file paths
:rtype: iterator
"""
for label in os.listdir(basedir):
if os.path.isdir(os.path.join(basedir, label)):
if fnmatch(label, exclude):
continue
pathname = os.path.join(basedir, label, filepattern)
for filepath in glob(pathname):
yield filepath.replace('\\', '/'), label
@nut_function
def ReadNumpy(sample, columns, pathfunc=None, allow_pickle=False):
"""
Load numpy arrays from filesystem.
Note that the loaded numpy array replace the file name|path in the
sample.
>>> from nutsflow import Consume, Collect, PrintType
>>> samples = ['tests/data/img_arrays/nut_color.jpg.npy']
>>> samples >> ReadNumpy(None) >> PrintType() >> Consume()
(<ndarray> 213x320x3:uint8)
>>> samples = [('tests/data/img_arrays/nut_color.jpg.npy', 'class0')]
>>> samples >> ReadNumpy(0) >> PrintType() >> Consume()
(<ndarray> 213x320x3:uint8, <str> class0)
>>> filepath = 'tests/data/img_arrays/*.jpg.npy'
>>> samples = [(1, 'nut_color'), (2, 'nut_grayscale')]
>>> samples >> ReadNumpy(1, filepath) >> PrintType() >> Consume()
(<int> 1, <ndarray> 213x320x3:uint8)
(<int> 2, <ndarray> 213x320:uint8)
>>> pathfunc = lambda s: 'tests/data/img_arrays/{1}.jpg.npy'.format(*s)
>>> samples >> ReadNumpy(1, pathfunc) >> PrintType() >> Consume()
(<int> 1, <ndarray> 213x320x3:uint8)
(<int> 2, <ndarray> 213x320:uint8)
:param tuple|list sample: ('nut_data', 1)
:param None|int|tuple columns: Indices of columns in sample to be replaced
by numpy array (based on fileid in that column)
If None then a flat samples is assumed and
a tuple with the numpy array is returned.
:param string|function|None pathfunc: Filepath with wildcard '*',
which is replaced by the file id/name provided in the sample, e.g.
'tests/data/img_arrays/*.jpg.npy' for sample ('nut_grayscale', 2)
will become 'tests/data/img_arrays/nut_grayscale.jpg.npy'
or
Function to compute path to numnpy file from sample, e.g.
lambda sample: 'tests/data/img_arrays/{1}.jpg.npy'.format(*sample)
or
None, in this case the file id/name is taken as the filepath.
:param bool allow_pickle : Allow loading pickled object arrays in npy files.
:return: Sample with file ids/names replaced by numpy arrays.
:rtype: tuple
"""
def load(filename):
"""Load numpy array for given fileid"""
filepath = reader_filepath(sample, filename, pathfunc)
return np.load(filepath, allow_pickle=allow_pickle)
if columns is None:
return (load(sample),) # numpy array as tuple with one element
colset = as_set(columns)
elems = enumerate(sample)
return tuple(load(e) if i in colset else e for i, e in elems)
@nut_function
def ReadImage(sample, columns, pathfunc=None, as_grey=False, dtype='uint8'):
"""
Load images from filesystem for samples.
Loads images in jpg, gif, png, tif and bmp format.
Images are returned as numpy arrays of shape (h, w, c) or (h, w) for
color images or gray scale images respectively.
See nutsml.imageutil.load_image for details.
Note that the loaded images replace the image file name|path in the
sample. If the images file paths are directly proved (not as a tuple
sample) still tuples with the loaded image are returned.
>>> from nutsflow import Consume, Collect
>>> from nutsml import PrintColType
>>> images = ['tests/data/img_formats/nut_color.gif']
>>> images >> ReadImage(None) >> PrintColType() >> Consume()
item 0: <tuple>
0: <ndarray> shape:213x320x3 dtype:uint8 range:0..255
>>> samples = [('tests/data/img_formats/nut_color.gif', 'class0')]
>>> img_samples = samples >> ReadImage(0) >> Collect()
>>> imagepath = 'tests/data/img_formats/*.gif'
>>> samples = [(1, 'nut_color'), (2, 'nut_grayscale')]
>>> samples >> ReadImage(1, imagepath) >> PrintColType() >> Consume()
item 0: <tuple>
0: <int> 1
1: <ndarray> shape:213x320x3 dtype:uint8 range:0..255
item 1: <tuple>
0: <int> 2
1: <ndarray> shape:213x320 dtype:uint8 range:20..235
>>> pathfunc = lambda s: 'tests/data/img_formats/{1}.jpg'.format(*s)
>>> img_samples = samples >> ReadImage(1, pathfunc) >> Collect()
:param tuple|list sample: ('nut_color', 1)
:param None|int|tuple columns: Indices of columns in sample to be replaced
by image (based on image id in that column)
If None then a flat samples is assumed and
a tuple with the image is returned.
:param string|function|None pathfunc: Filepath with wildcard '*',
which is replaced by the imageid provided in the sample, e.g.
'tests/data/img_formats/*.jpg' for sample ('nut_grayscale', 2)
will become 'tests/data/img_formats/nut_grayscale.jpg'
or
Function to compute path to image file from sample, e.g.
lambda sample: 'tests/data/img_formats/{1}.jpg'.format(*sample)
or
None, in this case the image id is taken as the filepath.
:param bool as_grey: If true, load as grayscale image.
:param dtype dtype: Numpy data type of the image.
:return: Sample with image ids replaced by image (=ndarray)
of shape (h, w, c) or (h, w)
:rtype: tuple
"""
def load(filename):
"""Load image for given fileid"""
filepath = reader_filepath(sample, filename, pathfunc)
return load_image(filepath, as_grey=as_grey, dtype=dtype)
if columns is None:
return (load(sample),) # image as tuple with one element
colset = as_set(columns)
elems = enumerate(sample)
return tuple(load(e) if i in colset else e for i, e in elems)
class ReadPandas(NutSource):
"""
Read data as Pandas table from file system.
"""
def __init__(self, filepath, rows=None, colnames=None, dropnan=True,
replacenan=False, rowname='Row', **kwargs):
"""
Create reader for Pandas tables.
The reader returns the table contents as an interator over named tuples,
where the column names are derived from the table columns. The order
and selection of columns can be changed.
>>> from nutsflow import Collect, Consume, Print
>>> filepath = 'tests/data/pandas_table.csv'
>>> ReadPandas(filepath) >> Print() >> Consume()
Row(col1=1.0, col2=4.0)
Row(col1=3.0, col2=6.0)
>>> (ReadPandas(filepath, dropnan=False, rowname='Sample') >>
... Print() >> Consume())
Sample(col1=1.0, col2=4.0)
Sample(col1=2.0, col2=nan)
Sample(col1=3.0, col2=6.0)
>>> ReadPandas(filepath, replacenan=None) >> Print() >> Consume()
Row(col1=1.0, col2=4.0)
Row(col1=2.0, col2=None)
Row(col1=3.0, col2=6.0)
>>> colnames=['col2', 'col1'] # swap order
>>> ReadPandas(filepath, colnames=colnames) >> Print() >> Consume()
Row(col2=4.0, col1=1.0)
Row(col2=6.0, col1=3.0)
>>> ReadPandas(filepath, rows='col1 > 1', replacenan=0) >> Collect()
[Row(col1=2.0, col2=0), Row(col1=3.0, col2=6.0)]
:param str filepath: Path to a table in CSV, TSV, XLSX or
Pandas pickle format. Depending on file extension (e.g. .csv)
the table format is picked.
Note tables must have a header with the column names.
:param str rows: Rows to filter. Any Pandas filter expression. If
rows = None all rows of the table are returned.
:param list columns: List of names for the table columns to return.
For columns = None all columns are returned.
:param bool dropnan: If True all rows that contain NaN are dropped.
:param object replacenan: If not False all NaNs are replaced by
the value of replacenan
:param str rowname: Name of named tuple return as rows.
:param kwargs kwargs: Key word arguments passed on the the Pandas
methods for data reading, e.g, header=None.
See pandas/pandas/io/parsers.py for detais
"""
self.filepath = filepath
self.rows = rows
self.colnames = colnames
self.dropnan = dropnan
self.replacenan = replacenan
self.rowname = rowname
self.kwargs = kwargs
self.dataframe = self._load_table(filepath)
@staticmethod
def isnull(value):
"""
Return true if values is NaN or None.
>>> import numpy as np
>>> ReadPandas.isnull(np.NaN)
True
>>> ReadPandas.isnull(None)
True
>>> ReadPandas.isnull(0)
False
:param value: Value to test
:return: Return true for NaN or None values.
:rtype: bool
"""
return pd.isnull(value)
def _replacenan(self, row):
"""
Replace NaN values in row by None
:param iterable row: Any iterable.
:return: Row with None instead of NaN
:rtype: tuple
"""
value = self.replacenan
return tuple(value if pd.isnull(v) else v for v in row)
def _load_table(self, filepath):
"""
Load table from file system.
:param str filepath: Path to table in CSV, TSV, XLSX or
Pandas pickle format.
:return: Pandas table
:rtype: pandas.core.frame.DataFrame
"""
_, ext = os.path.splitext(filepath.lower())
if ext == '.tsv':
return pd.read_csv(filepath, sep='\t', **self.kwargs)
if ext == '.csv':
return pd.read_csv(filepath, **self.kwargs)
if ext == '.xlsx':
return pd.read_excel(filepath, engine='openpyxl', **self.kwargs)
return pd.read_pickle(filepath, **self.kwargs)
def __iter__(self):
"""
Return iterator over rows in table.
:return: Iterator over rows.
:rtype: iterator
"""
df = self.dataframe
rows = df.query(self.rows) if self.rows else df
series = rows[self.colnames] if self.colnames else rows
Row = namedtuple(self.rowname, series.columns.to_list())
if not self.replacenan is False:
values = (self._replacenan(row) for row in series.values)
elif self.dropnan:
values = series.dropna().values
else:
values = series.values
return (Row(*v) for v in values)
| true |
be5d10132aebbf197177489930df5a8ea58ca2cc | Python | lishuang1994/-1807 | /02day/05-乘法口诀表面向对象.py | UTF-8 | 486 | 3.59375 | 4 | [] | no_license | '''
class mouse:
def lei(self):
for i in range(10):
for j in range i:
if i*j= k:
print("%d * %d = %d"%(i,j,k),end=\n)
ls = mouse()
ls.lei()
'''
class mouse():
def lei(self):
i = 1
while i < 10:
j = 1
while j <= i:
d = i * j
print("%d * %d = %d"%(i,j,d), end = "\t ")
j+=1
print(" ")
i+=1
ls = mouse()
ls.lei()
| true |
ebc26444c79cee37ee54e58d6e33271a00f9db0f | Python | genialis/resolwe-bio-py | /src/resdk/exceptions.py | UTF-8 | 783 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | """.. Ignore pydocstyle D400.
==========
Exceptions
==========
Custom ReSDK exceptions.
.. autoclass:: ValidationError
"""
from slumber.exceptions import SlumberHttpBaseException
class ValidationError(Exception):
"""An error while validating data."""
class ResolweServerError(Exception):
"""Error response from the Resolwe API."""
def handle_http_exception(func):
"""Handle slumber errors in more verbose way."""
def wrapper(*args, **kwargs):
"""Transform slumber errors into ReSDK errors.
Use content of the HTTP response as exception error.
"""
try:
return func(*args, **kwargs)
except SlumberHttpBaseException as exception:
raise ResolweServerError(exception.content)
return wrapper
| true |
d8797319f06670bb3953a57aaf3d330b13c582fd | Python | zemo20/guitarshop | /database_seed.py | UTF-8 | 1,164 | 2.71875 | 3 | [] | no_license | from flask import Flask, render_template, request, redirect, url_for, jsonify
from sqlalchemy import *
from database_setup import Base, Category, Item
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Empty the tables
session.query(Category).delete()
session.query(Item).delete()
# Add categories
sample_categories = ['guitars', 'pianos', 'drums',
'accessories', 'books']
for category_name in sample_categories:
category = Category()
category.name = category_name
session.add(category)
session.commit()
# First index is for the item
# Second index is for title,category_id respectively
items = [['cort g110 stratocaster', 1],
['Yamaha P155 Contemporary Piano', 2],
['Riot PODRT522BK 5-Piece Drum Set', 3],
['Guitar capo', 4],
['Fingerstyle & Slide guitar in open tunings', 5]]
for i in range(0, 4):
itemm = Item()
itemm.name = items[i][0]
itemm.description = 'description'
itemm.category_id = items[i][1]
session.add(itemm)
session.commit()
| true |
9e2c55cb6e15f89ff2b73a78d5f15310d3cac672 | Python | demohack/yute | /done/18-2-python-ds-practice/23_list_check.py | UTF-8 | 254 | 3.921875 | 4 | [
"MIT"
] | permissive | def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
t = [1 if isinstance(x, list) else 0 for x in lst]
return len(lst) == sum(t) | true |
d402b9280146486fcda2a1563379f074c4228b00 | Python | southpawgeek/perlweeklychallenge-club | /challenge-194/robert-dicicco/python/ch-2.py | UTF-8 | 693 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env python
'''
AUTHOR: Robert DiCicco
DATE: 2022-12-06
Challenge 194 Frequency Equalizer ( Python )
SAMPLE OUTPUT
python .\FrequencyEqualizer.py
Input: $s = abbc
Output: 1
Input: $s = xyzyyxz
Output: 1
Input: $s = xzxz
Output: 0
'''
ss = ["abbc", "xyzyyxz", "xzxz"]
x = 0
for s in ss :
x = 0
seen = dict()
print(f"Input: $s = {s}")
ln =len(s)
while x < ln :
zsub = s[x:x+1]
if zsub in seen :
seen[zsub] += 1
else :
seen[zsub] = 1
x += 1
highest = max(seen.values())
lowest = min(seen.values())
if (lowest + 1 == highest) :
print("Output: 1\n")
else :
print("Output: 0\n")
| true |
138b36a7b1167f6dcf26fcd83693104960c716b2 | Python | beat-machine/beat-machine | /beatmachine/utils.py | UTF-8 | 268 | 2.984375 | 3 | [
"MIT"
] | permissive | import itertools
import typing as t
def chunks(iterable: t.Iterable[t.T], size: int) -> t.Generator[t.List[t.T], None, None]:
iterator = iter(iterable)
for first in iterator:
yield list(itertools.chain([first], itertools.islice(iterator, size - 1)))
| true |
deea2e6d9ae1fe341e13a80449c94576667819e5 | Python | SanGlebovskii/lesson_5 | /homework_5_7.py | UTF-8 | 343 | 3.03125 | 3 | [] | no_license | from random import randint
n = 6
m = 6
matrix_one = []
for i in range(n):
matrix_second = []
for j in range(m):
matrix_second.append(randint(1, 9))
matrix_one.append(matrix_second)
print(matrix_one)
for i in range(len(matrix_one)):
max_element = max(matrix_one[i])
matrix_one[i][i] = max_element
print(matrix_one)
| true |
4631969d60d57d7e3face53bd08cc96cbbebf629 | Python | aalexsmithh/exfoliated-neurons | /baseline.py | UTF-8 | 1,083 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 4 01:33:24 2016
@author: Sandy Wong
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
import csv
from sklearn.model_selection import cross_val_score
x = np.fromfile('train_x.bin', dtype='uint8')
print (x.shape)
x = x.reshape(100000,3600)
#x = x[:500, :2] #first 2 features
y_raw = []
i = 0
with open('train_y.csv', 'r') as f_in:
csvreader = csv.reader(f_in)
for row in csvreader:
y_raw.append(int(row[1]))
y = np.array(y_raw)
#y = y[:500]
print (x.shape," ",y.shape)
logreg = linear_model.LogisticRegression(C=1e5)
f = open('results_logreg.txt', 'w')
print ("Logistic Regression on 3600 bytes raw features 3 fold cross validation")
f.write("Logistic Regression on 3600 bytes raw features 3 fold cross validation\n")
scores = cross_val_score(logreg, x, y, cv=3)
for score in scores:
print (score)
f.write(str(score)+"\n")
print ("Mean: ", scores.mean())
f.write("Mean: "+str(scores.mean())+"\n")
f.close()
| true |
aaac7d86603bd7ada9706f46fb9808d54aff6df1 | Python | michelelt/carsharing-prediction-moduled | /source/Vancouver/regression_svr.py | UTF-8 | 3,798 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 12 12:25:25 2019
@author: mc
"""
import pandas as pd
from sklearn.svm import SVR
# =============================================================================
# import datasets
# =============================================================================
city = 'Vancouver'
data_path = './../../data/'
train = pd.read_csv('%s%s/Regression/dataset_train_emer.csv'%(data_path, city))
valid = pd.read_csv('%s%s/Regression/dataset_test_emer.csv'%(data_path, city))
complete_dataset = train.append(valid, ignore_index=True)
# =============================================================================
# SVR models oparams
# =============================================================================
kernel_labels = ['RBF', 'Linear', 'Polynomial']
svr_rbf = SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1)
svr_lin = SVR(kernel='linear', C=100, gamma='auto')
svr_poly = SVR(kernel='poly', C=100, gamma='auto', degree=3, epsilon=.1,
coef0=1)
svrs = [svr_rbf, svr_lin, svr_poly]
metrics = pd.DataFrame(
columns=['FID_valid', 'y_pred_valid', 'y_valid', 'er_r_pred_train', 'kernel']
)
norm = False
targets = []
for c in complete_dataset.columns:
if 'c_start' in c or 'c_final' in c:
targets.append(c)
s = pd.Series(index=metrics.columns)
for target in targets:
print(target)
for i in range(len(complete_dataset)):
valid = complete_dataset.loc[i].to_frame().T
train = complete_dataset.loc[~complete_dataset.index.isin([i])]
mean, std = train.mean(), train.std()
# =============================================================================
# preprocess data
# =============================================================================
train_target = train[target]
valid_target = valid[target]
for c in train.columns:
if ('sum' in c) or ('count' in c)\
or ('start' in c) or ('final' in c)\
or ('Gi_' in c) or ('m_age' in c)\
or ('f_age' in c) or ('NAME' in c)\
or ('MAPID' in c) or ('FID' in c)\
or ('geometry' in c):
train = train.drop(c, axis=1)
valid = valid.drop(c,axis=1)
if norm==True:
#reubuilt to norm
train[target] = train_target
valid[target] = valid_target
#norm
mean, std = train.mean(), train.std()
train = (train-mean)/std
valid = (valid-mean)/std
#resplit
train_target = train[target]
valid_target = valid[target]
# =================================================================
# SVR regression
# =============================================================================
for ix, svr in enumerate(svrs):
regressor = svr.fit(train, train_target)
y_pred_train = svr.predict(train)
er_r_pred_train = sum(abs(y_pred_train-train_target)/train_target)/(len(train_target))
y_pred_valid = svr.predict(valid)
s['FID_valid'] = i
s['y_pred_valid'] = y_pred_valid[0]
s['y_valid'] = valid_target.values[0]
s['er_r_pred_train'] = er_r_pred_train
s['kernel'] = kernel_labels[ix]
s['target'] = target
s['mean_target'] = mean[target]
s['std_target'] = std[target]
s['is_normed'] = norm
metrics = metrics.append(s, ignore_index=True)
metrics.to_csv('%s%s/Regression/output_svr/metrics_svr_norm_%s.csv'%(data_path, city, norm))
| true |
cab1a0ec5b264bc5e297308b6b4c8da63ce14a2a | Python | sativa/SPEED | /mod14g_spatial_statistics.py | UTF-8 | 4,354 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SPEED: Module 14: Spatial Statistics
GitHub repository: https://github.com/maplion/SPEED
@author: Ryan Dammrose aka MapLion
"""
import threading
from matplotlib.pylab import *
import speedcalc
import speedcli
from testcalculations import time
__author__ = "Ryan Dammrose"
__copyright__ = "Copyright 2015"
__license__ = "MIT"
sc_ss = speedcalc.SpatialStatistics()
s_cli = speedcli.SpeedCLI(description="SPEED Spatial Statistics")
# Progress bar variables
stop = False
kill = False
class ProgressBarLoading(threading.Thread):
"""
Self-made animated spinner for the purpose of letting the user know that something
is processing; built for an unknown process time with know gauges.
"""
def run(self):
global stop
global kill
i = 0
while not stop:
if (i % 4) == 0:
sys.stdout.write('Loading... /')
time.sleep(0.2)
sys.stdout.write('\r')
elif (i % 4) == 1:
sys.stdout.write('Loading... -')
time.sleep(0.2)
sys.stdout.write('\r')
elif (i % 4) == 2:
sys.stdout.write('Loading... \\')
time.sleep(0.2)
sys.stdout.write('\r')
elif (i % 4) == 3:
sys.stdout.write('Loading... |')
time.sleep(0.2)
sys.stdout.write('\r')
sys.stdout.flush()
time.sleep(0.1)
i += 1
if kill:
print 'ABORT!\n\n',
else:
print 'Done!\n\n',
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
"""
This is the main function for Module 14
@param argv: incoming arguments
@return: void
"""
# Start time and Progress Bar
start_time = time.clock()
p = ProgressBarLoading()
p.start()
global stop
global kill
# Declare local main Variables
if argv is None:
argv = sys.argv
try:
# Parse command line arguments
arguments = s_cli.arg_parse(argv)
# Get File
if arguments.file is None:
sys.exit("No file name given.")
else:
filename = arguments.inputFilePath + "/" + arguments.file
if ".tif" not in filename:
filename += ".tif"
# Read Raster into an Array
rasterArray = sc_ss.read_raster_as_array(filename)
# Process Spatial Statistics
Morans_I = sc_ss.calc_morans_i(rasterArray)
# Save array out to Spatial Raster (GeoTiff)
if arguments.outputFile is None:
if ".tif" in arguments.file:
arguments.file.replace(".tif", "")
arguments.outputFile = arguments.file + "_MoransI"
# outputFilename = arguments.outputFilePath + "/" + arguments.outputFile + ".tif"
# sc_ss.save_raster_array_to_geotiff(rasterArray, outputFilename)
outputImage = None
if ".tif" in arguments.file:
outputImage = arguments.outputFilePath + "/" + arguments.file.replace(".tif", "")
end_time = time.clock()
execution_time = end_time - start_time
time.sleep(1)
stop = True
print "\n\nProcess time: {0} seconds".format(round(execution_time, 2))
lag_distance = sc_ss.get_lag_distance_for_plot(30, 330)
# Save out image of related NDVI
figure(1)
title('NDVI Image {0}'.format(arguments.file))
imshow(rasterArray)
savefig(outputImage + "_NDVI.jpg", dpi=300)
# show()
# Save out image of Plot of Lag Distance vs. Moran's I number
figure(2)
plot(lag_distance, Morans_I, 'bo')
title('Spatial Autocorrelation of NDVI for Image {0}'.format(arguments.file))
xlabel('Lag Distance(m)')
ylabel('Morans I')
savefig(outputImage + "_plot.jpg", dpi=300)
show()
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
except KeyboardInterrupt or EOFError:
kill = True
stop = True
if __name__ == "__main__":
sys.exit(main())
| true |
6b8b188e59fa314f65f18cbdc3fc2d1f4bcf7cc5 | Python | bary321/hongheishu | /honghei_err.py | UTF-8 | 582 | 2.890625 | 3 | [] | no_license | # coding:utf-8
__author__ = 'bary'
class LengthException(Exception):
def __init__(self, err='length not equal'):
Exception.__init__(self, err)
class RootNoBlack(Exception):
def __init__(self, err="root not black"):
Exception.__init__(self, err)
class HongChild(Exception):
def __init__(self, err="child not black"):
Exception.__init__(self, err)
class FatherErr(Exception):
def __init__(self, err="father err"):
Exception.__init__(self, err)
| true |
afa240f3ed62cbbc8f1c7fdcdce565b9bf29ef2d | Python | PratylenClub/celegans3000 | /connectome_manager/neural_network_manager.py | UTF-8 | 7,180 | 3.125 | 3 | [] | no_license | import pandas as pd
import pickle as p
import numpy as np
INPUT_INDEX = 0
OUTPUT_INDEX = 1
INITIAL_CELL_STATE = []
NEURON_TYPE = "Neuron"
SENSORIAL_INPUT_TYPE = "Sensorial"
SENSORIAL_NEURON_TYPE = "Sensorial_Neuron"
MOTOR_NEURONS_TYPE = "Motor_Neuron"
MUSCLE_TYPE = "Muscle"
SENSORY_MOTOR_NEURON_TYPE = "Sensorial_Motor_Neuron"
def connectome_data_to_NN_model(connectome_file,
muscles_file,
sensor_file,
neurotransmitters_pickle=None,
muscle_2_motor_pickle=None,
sensory_cells_2_sensors_pickle=None,
model_file="model.pickle"):
"""
Receives three datasets describing a connectome and pickle file names containing dictionnaries to build the neural network model
:param connectome_file: Connectome CSV file with 4 columns: (Origin neuron, Target neuron, Number of connections, Neurotransmitter)
:param muscles_file: Connections between neurons and muscles CSV file with 4 columns: (Origin neuron, Target muscule, Number of connections, Neurotransmitter)
:param sensor_file: Connections between sensorial information and neurons CSV file with 4 columns: (Origin sensorial function, Target neuron, Weight of the connection, Neurotransmitter)
:param neurotransmitters_pickle: Name of pickle file containing a dictionary with (keys : neurotransmitter type, values: corresponding weight)
:param muscle_2_motor_pickle: Name of pickle file containing a dictionary with (keys : muscles, values: motor action)
:param sensory_cells_2_sensors_pickle: Name of pickle file containing a dictionary with (keys : sensorial function, values: robot sensorial information)
:param model_file: Name of the neural network model pickle file
type connectome_file: String
:type muscles_file: String
:type sensory_file: String
:type neurotransmitters_pickle: String
:type muscle_2_motor_pickle: String
:type sensory_cells_2_sensors_pickle: String
:type model_file: String
"""
connectome = pd.read_csv(connectome_file,index_col=0).fillna("nan")
muscles = pd.read_csv(muscles_file,index_col=0).fillna("nan")
sensor = pd.read_csv(sensor_file,index_col=0).fillna("nan")
# Initialize weights
muscles["Weight"] = 1
connectome["Weight"] = 1
# Take neurotransmitter into accound
if neurotransmitters_pickle is not None:
neurotransmitters = p.load(open(neurotransmitters_pickle,"rb"))
if "Neurotransmitter" in muscles.columns:
muscle_neurotransmitter_values = np.asarray(map(neurotransmitters.get,muscles["Neurotransmitter"]))
muscles["Weight"] = muscle_neurotransmitter_values
if "Neurotransmitter" in connectome.columns:
connectome_neurotransmitter_values = np.asarray(map(neurotransmitters.get,connectome["Neurotransmitter"]))
connectome["Weight"] = connectome_neurotransmitter_values
# Take number of connections into account
if "Number of Connections" in muscles.columns:
muscles["Weight"] *= muscles["Number of Connections"]
if "Number of Connections" in connectome.columns:
connectome["Weight"] *= connectome["Number of Connections"]
# Convert muscles names to motor actions
if muscle_2_motor_pickle is not None:
muscle_2_motor = p.load(open(muscle_2_motor_pickle,"rb"))
muscles.iloc[:,OUTPUT_INDEX] = np.asarray(map(muscle_2_motor.get,muscles.iloc[:,OUTPUT_INDEX]))
# Convert sensorial signals to sensors
if sensory_cells_2_sensors_pickle is not None:
sensory_cells_2_sensors = p.load(open(sensory_cells_2_sensors_pickle,"rb"))
sensor.iloc[:,INPUT_INDEX] = np.asarray(map(sensory_cells_2_sensors.get,sensor.iloc[:,INPUT_INDEX]))
Neural_Network = {}
Cells_state = {}
Cells_types = {}
for neuron in set(connectome.iloc[:,INPUT_INDEX]).union(set(connectome.iloc[:,OUTPUT_INDEX])):
Neural_Network[neuron] = {}
Cells_state[neuron] = INITIAL_CELL_STATE
Cells_types[neuron] = NEURON_TYPE
for cell in set(muscles.iloc[:,OUTPUT_INDEX]):
Neural_Network[cell] = {}
Cells_state[cell] = INITIAL_CELL_STATE
Cells_types[cell] = MUSCLE_TYPE
for cell in set(muscles.iloc[:,INPUT_INDEX]):
Neural_Network[cell] = {}
Cells_state[cell] = INITIAL_CELL_STATE
Cells_types[cell] = MOTOR_NEURONS_TYPE
for cell in set(sensor.iloc[:,INPUT_INDEX]):
Neural_Network[cell] = {}
Cells_state[cell] = INITIAL_CELL_STATE
Cells_types[cell] = SENSORIAL_INPUT_TYPE
for cell in set(sensor.iloc[:,OUTPUT_INDEX]):
Neural_Network[cell] = {}
Cells_state[cell] = INITIAL_CELL_STATE
Cells_types[cell] = SENSORIAL_NEURON_TYPE
for cell in set(muscles.iloc[:,INPUT_INDEX]).intersection(set(sensor.iloc[:,OUTPUT_INDEX])):
Neural_Network[cell] = {}
Cells_state[cell] = INITIAL_CELL_STATE
Cells_types[cell] = SENSORY_MOTOR_NEURON_TYPE
for i in xrange(connectome.index.size):
Neural_Network[connectome.iloc[i,INPUT_INDEX]][connectome.iloc[i,OUTPUT_INDEX]] = connectome.iloc[i,:]["Weight"]
for i in xrange(muscles.index.size):
Neural_Network[muscles.iloc[i,INPUT_INDEX]][muscles.iloc[i,OUTPUT_INDEX]] = muscles.iloc[i,:]["Weight"]
for i in xrange(sensor.index.size):
Neural_Network[sensor.iloc[i,INPUT_INDEX]][sensor.iloc[i,OUTPUT_INDEX]] = sensor.iloc[i,:]["Weight"]
p.dump({"Neural Network":Neural_Network, "Cells_state":Cells_state, "Cells_types":Cells_types} ,open(model_file,"wb"))
if __name__ == "__main__":
"""
connectome_file = "data/connectome_clean_data/Connectome.csv"
muscles_file = "data/connectome_clean_data/Neurons_to_Muscles.csv"
sensor_file = "data/connectome_clean_data/Sensory.csv"
neurotransmitters_pickle = "data/connectome_clean_data/Neurotransmiters_2_coefficient.pickle"
muscle_2_motor_pickle = "data/connectome_clean_data/muscle_2_motor.pickle"
sensory_cells_2_sensors_pickle = "data/connectome_clean_data/sensory_2_sensors.pickle"
model_file = "models/celegans3000.pickle"
connectome_data_to_NN_model(connectome_file,muscles_file,sensor_file,neurotransmitters_pickle,muscle_2_motor_pickle,sensory_cells_2_sensors_pickle,model_file)
connectome_file = "data/connectome_clean_data/Connectome.csv"
muscles_file = "data/connectome_clean_data/Neurons_to_Muscles.csv"
sensor_file = "data/connectome_clean_data/Sensory.csv"
model_file = "models/celegans3000_full.pickle"
connectome_data_to_NN_model(connectome_file,muscles_file,sensor_file,model_file=model_file)
"""
connectome_file = "data/connectome_neuroml/Connectome.csv"
muscles_file = "data/connectome_neuroml/Neurons_to_Muscles.csv"
sensor_file = "data/connectome_neuroml/Sensory.csv"
neurotransmitters_pickle = "data/connectome_neuroml/Neurotransmiters_2_coefficient.pickle"
muscle_2_motor_pickle = "data/connectome_neuroml/muscle_2_motor.pickle"
sensory_cells_2_sensors_pickle = "data/connectome_neuroml/sensory_2_sensors.pickle"
model_file = "models/celegans3000_neuroml.pickle"
connectome_data_to_NN_model(connectome_file,muscles_file,sensor_file,neurotransmitters_pickle,muscle_2_motor_pickle,sensory_cells_2_sensors_pickle,model_file)
connectome_file = "data/connectome_neuroml/Connectome.csv"
muscles_file = "data/connectome_neuroml/Neurons_to_Muscles.csv"
sensor_file = "data/connectome_neuroml/Sensory.csv"
model_file = "models/celegans3000_full_neuroml.pickle"
connectome_data_to_NN_model(connectome_file,muscles_file,sensor_file,model_file=model_file)
| true |
8edf3bc82b9356b4be6c257d10798ee365fbad54 | Python | AngelPerezRodriguezRodriguez/CYPAngelPRR | /libro/Ejemplo3_02.py | UTF-8 | 205 | 3.359375 | 3 | [] | no_license | nomina = 0
for i in range(1, 11, 1):
sue = float(input("Ingresa el sueldo: "))
nomina += sue #nomina = nomina + sue
print(f"La nómina de la empresa es de: {nomina}")
| true |
2ab3ca717276288e8d6af19d080e53bff8bbed75 | Python | alexliyang/caffe2_android | /lamia_scripts/make_src.py | UTF-8 | 1,471 | 2.515625 | 3 | [] | no_license | import os
import sys
required_src = ['operators', 'android', 'core', 'test']
def list_dir(path):
return os.listdir(path)
def backend(file):
return os.path.splitext(file)[1]
def open_file(filename):
if os.path.exists(filename):
os.remove(filename)
f = open(filename, 'w')
return f
def add_to_file(folder, file, fp, filename, by_line = True):
full_file = os.path.join(folder, file)
dispath = ' '
if by_line is True:
dispath = '\n'
fp.writelines(full_file + dispath)
print('file: {} add to {}'.format(full_file, filename))
def save_file_name(prefix, folders):
c_file = 'c_file'
cc_file = 'cc_file'
o_file = 'o_file'
c_fp = open_file(os.path.join(prefix, c_file))
cc_fp = open_file(os.path.join(prefix, cc_file))
o_fp = open_file(os.path.join(prefix, o_file))
for folder in folders:
if not os.path.isdir(folder):
continue
if folder not in required_src:
continue
src_path = os.path.join(prefix, folder)
files = list_dir(src_path)
for file in files:
if backend(file) == '.c':
add_to_file(folder, file, c_fp, c_file)
add_to_file(os.path.join(prefix, folder), file + '.o', o_fp, o_file, False)
elif backend(file) == '.cc':
add_to_file(folder, file, cc_fp, cc_file)
add_to_file(os.path.join(prefix, folder), file + '.o', o_fp, o_file, False)
if __name__ == '__main__':
prefix = './'
gl_dir = list_dir(prefix)
save_file_name(prefix, gl_dir)
| true |
1d708ab083e8260866b75c20db60cf2fce749ceb | Python | alexdaube/MurphysBot | /glo/tests/common/map/decomposition_map/test_decomposition_cell.py | UTF-8 | 10,728 | 2.953125 | 3 | [] | no_license | import unittest
from mock.mock import Mock, MagicMock
from shapely.geometry import LineString
from common.map.decomposition_map.decomposition_cell import DecompositionCell
from common.map.position import Position
class TestDecompositionCell(unittest.TestCase):
top_left = Position(0, 15)
top_right = Position(15, 15)
bottom_right = Position(15, 0)
bottom_left = Position(0, 0)
over_top = Position(7, 20)
below_bottom = Position(7, -5)
position_on_right = Position(25, 7)
position_on_left = Position(-7, 7)
cell = None
cell_intersecting = DecompositionCell(top_left, top_right, Position(15, 25), Position(0, 25))
cell_not_intersecting = DecompositionCell(Position(100, 100), Position(100, 125), Position(90, 125),
Position(90, 100))
position_center = Position(15.0 / 2, 15.0 / 2)
position_inside_cell = Position(5, 5)
position_on_the_edge_of_cell = Position(0, 0)
position_outside_cell = Position(150, 250)
def setUp(self):
self.cell = DecompositionCell(self.top_left, self.top_right, self.bottom_right, self.bottom_left)
def test_after_initialisation_cell_contain_points(self):
self.assertEqual(self.bottom_left, self.cell.bottom_left)
self.assertEqual(self.bottom_right, self.cell.bottom_right)
self.assertEqual(self.top_left, self.cell.top_left)
self.assertEqual(self.top_right, self.cell.top_right)
def test_after_initialisation_cell_passed_through_is_false(self):
self.assertFalse(self.cell.has_passed_through())
def test_after_set_has_passed_through_cell_passed_through_is_true(self):
self.cell.set_has_passed_through()
self.assertTrue(self.cell.has_passed_through())
def test_after_reset_passed_through_cell_passed_through_is_false(self):
self.cell.passed_through = True
self.cell.reset_pass_through()
self.assertFalse(self.cell.has_passed_through())
def test_when_checking_if_position_inside_or_border_call_polygon_contain(self):
self.cell.polygon = MagicMock()
self.cell.contain_position_inside_cell_and_borders(self.position_inside_cell)
assert self.cell.polygon.contains.called, "contains() call not found"
def test_when_checking_if_position_inside_or_border_call_with_position_on_edge_call_intersects(self):
self.cell.polygon = MagicMock()
self.cell.polygon.contains.return_value = False
self.cell.contain_position_inside_cell_and_borders(self.position_inside_cell)
assert self.cell.polygon.intersects.called, "intersects() call not found"
def test_when_checking_if_on_cell_border_call_intersects(self):
self.cell.polygon = MagicMock()
self.cell.contain_position_on_cell_borders(self.position_on_the_edge_of_cell)
assert self.cell.polygon.intersects.called, "intersects() call not found"
def test_when_checking_if_on_cell_border_call_contains(self):
self.cell.polygon = MagicMock()
self.cell.contain_position_on_cell_borders(self.position_on_the_edge_of_cell)
assert self.cell.polygon.contains.called, "contains() call not found"
def test_when_checking_if_cell_within_another_cell_call_contains(self):
self.cell.polygon = MagicMock()
self.cell.contain_cell(self.cell)
assert self.cell.polygon.contains.called, "contains() call not found"
def test_when_getting_cell_center_return_calculated_cell_center(self):
self.assertEqual(self.position_center, self.cell.get_cell_center())
def test_calculating_center_of_intersection_2_cell_call_polygon_intersection(self):
polygon1_mock = Mock()
self.cell.polygon = polygon1_mock
self.cell.get_two_cell_intersection_center(self.cell_intersecting)
polygon1_mock.intersection.assert_called_with(self.cell_intersecting.polygon)
def test_calculating_center_of_intersection_of_2_cell(self):
self.assertEqual(Position(7.5, 15), self.cell.get_two_cell_intersection_center(self.cell_intersecting))
def test_calculating_center_of_intersection_of_2_cell_not_intersection_return_none(self):
self.assertIsNone(self.cell.get_two_cell_intersection_center(self.cell_not_intersecting))
def test_is_cell_intersection_line_call_polygon_intersecting(self):
cell_polygon_mock = Mock()
cell_polygon_mock.intersects.return_value = False
position1 = Position(1.0, 1.0)
position2 = Position(2.0, 2.0)
self.cell.polygon = cell_polygon_mock
self.cell.is_cell_intersecting_line(position1, position2)
assert cell_polygon_mock.intersects.called, "intersects() call not found"
def test_when_is_intersection_a_line_call_redirect_call_to_polygon_intersection(self):
self.cell.polygon = MagicMock()
position1 = Position(1.0, 1.0)
position2 = Position(2.0, 2.0)
self.cell.is_intersection_a_line(position1, position2)
assert self.cell.polygon.intersection.called, "intersection() call not found"
def test_when_is_intersection_a_line_call_and_intersection_is_a_line_return_true(self):
self.cell.polygon = MagicMock()
position1 = Position(1.0, 1.0)
position2 = Position(2.0, 2.0)
line = LineString([[position1.X, position1.Y], [position2.X, position2.Y]])
self.cell.polygon.intersection.return_value = line
self.assertTrue(self.cell.is_intersection_a_line(position1, position2))
def test_when_is_intersection_a_line_call_and_intersection_is_not_a_line_return_false(self):
self.cell.polygon = MagicMock()
position1 = Position(1.0, 1.0)
position2 = Position(2.0, 2.0)
self.cell.polygon.intersection.return_value = position1
self.assertFalse(self.cell.is_intersection_a_line(position1, position2))
def test_when_is_line_passing_only_on_border_return_false(self):
self.assertFalse(self.cell.is_line_passing_only_on_cell_border(self.bottom_left, self.top_right))
def test_when_is_line_passing_only_on_border_call_polygon_intersection(self):
self.cell.polygon = MagicMock()
self.cell.is_line_passing_only_on_cell_border(self.bottom_left, self.top_right)
assert self.cell.polygon.intersection.called, "intersection() call not found"
def test_when_is_line_passing_only_on_border_while_not_intersection_found_dosent_call_contains(self):
self.cell.polygon = MagicMock()
self.cell.polygon.intersection.return_value = None
self.cell.is_line_passing_only_on_cell_border(self.bottom_left, self.top_right)
assert not self.cell.polygon.contains.called, "intersection() called while it shall ne be"
def test_when_is_line_passing_only_on_border_with_intersection_found(self):
self.cell.polygon = MagicMock()
intersection_line = LineString([(self.bottom_left.X, self.bottom_left.Y), (self.top_left.X, self.top_left.Y)])
self.cell.polygon.intersection.return_value = intersection_line
self.cell.is_line_passing_only_on_cell_border(self.bottom_left, self.top_right)
assert self.cell.polygon.contains.called, "contains() call not found"
def test_when_is_line_passing_only_on_border_with_line_only_on_border_return_true(self):
self.assertTrue(self.cell.is_line_passing_only_on_cell_border(self.bottom_left, self.top_left))
def test_when_is_line_passing_only_on_border_with_line_only_not_on_border_return_false(self):
self.assertFalse(self.cell.is_line_passing_only_on_cell_border(self.bottom_left, self.top_right))
def test_when_is_line_passing_only_on_border_with_point_intersection_only_on_border_return_false(self):
self.assertTrue(self.cell.is_line_passing_only_on_cell_border(self.bottom_left, Position(-100, -100)))
def test_when_is_line_passing_only_on_border_while_not_intersection_found_return_false(self):
self.cell.polygon = MagicMock()
self.cell.polygon.intersection.return_value = None
self.cell.is_line_passing_only_on_cell_border(self.bottom_left, self.top_right)
def test_when_calculate_distance_between_cells_certers_call_second_cell_get_cell_center(self):
second_cell_mock = Mock()
second_cell_mock.get_cell_center.return_value = self.position_outside_cell
self.cell.calculate_distance_between_cell_center(second_cell_mock)
assert second_cell_mock.get_cell_center.called, "get_cell_center() call not found"
def test_when_is_cell_crossing_top_or_down_with_line_crossing_both_return_true(self):
self.assertTrue(self.cell.is_line_crossing_cell_top_or_bottom(self.over_top, self.below_bottom))
def test_when_is_cell_crossing_top_or_down_with_line_crossing_only_top_return_true(self):
self.assertTrue(self.cell.is_line_crossing_cell_top_or_bottom(self.over_top, self.position_inside_cell))
def test_when_is_cell_crossing_top_or_down_with_line_crossing_only_bottom_return_true(self):
self.assertTrue(self.cell.is_line_crossing_cell_top_or_bottom(self.below_bottom, self.position_inside_cell))
def test_when_is_cell_crossing_top_or_down_with_line_crossing_none_return_false(self):
self.assertFalse(
self.cell.is_line_crossing_cell_top_or_bottom(self.position_on_left, self.position_inside_cell))
def test_when_is_line_crossing_cell_right_and_line_crossing_return_true(self):
self.assertTrue(self.cell.is_line_crossing_cell_right(self.position_on_right, self.position_inside_cell))
def test_when_is_line_crossing_cell_right_and_line_crossing_return_false(self):
self.assertFalse(self.cell.is_line_crossing_cell_right(self.over_top, self.position_inside_cell))
def test_when_is_line_crossing_cell_left_and_line_crossing_return_true(self):
self.assertTrue(self.cell.is_line_crossing_cell_left(self.position_on_left, self.position_inside_cell))
def test_when_is_line_crossing_cell_left_and_line_crossing_return_false(self):
self.assertFalse(self.cell.is_line_crossing_cell_left(self.over_top, self.position_inside_cell))
def test_when_is_cell_intersecting_cell_call_polygon_intersect(self):
polygon_mock = MagicMock()
cell_mock = MagicMock()
self.cell.polygon = polygon_mock
self.cell.is_cell_intersecting_cell(cell_mock)
assert polygon_mock.intersects.called, "intersects() call not found"
def test_distance_from_position_call_polygon_distance(self):
polygon_mock = MagicMock()
self.cell.polygon = polygon_mock
self.cell.distance_from_position(self.position_outside_cell)
assert polygon_mock.distance.called, "distance() call not found"
| true |
d14ba8decc89dd33950235a9304b92468eba2c9b | Python | Justw8/webtesten | /fileHandling.py | UTF-8 | 1,688 | 4.21875 | 4 | [] | no_license | fileName = 'test.txt' # Het bestand waarin word geschreven
linebreak = '\n' # enter in een variable zetten voor duidelijkheid
def getItemsFromFile(): # Bestand Uitlees functie
try:
file = open(fileName, "r") # proberen het bestand te openen met als regel: r - Read
except:
return [] # Als het niet lukt, leeg array terug geven aan de functie uitkomst
listOfItems = [] # Array aanmaken voor de gevonden gegevens
for item in file: # Door elke regel van het bestand heen loopen
listOfItems.append(item.strip(linebreak).split(', ')) # linebreak weghalen, en aan het einde van de array toevoegen, gesplitst met ,'s)
file.close() # bestand afsluiten
return listOfItems # Array met gelezen regels/items terug geven aan de functie uitkomst
def writeItemsToFile(listOfItems): # Bestand Schrijf functie
try:
file = open(fileName, "w") # proberen het bestand te openen met als regel: w - Write
except:
return False # Lukt het niet dan False teruggeven aan de functie uitkomst
for item in listOfItems: # Door alle items loopen die aan deze functe zijn meegegeven
file.write((', '.join(map(str, item))) + linebreak) # Van ieder item een string maken, en een linebreak achter het item plaatsen
file.close() # bestand afsluiten
return True # True teruggeven aan de functie, omdat het is uitgevoerd
items = [['a,99,22'], ['b,34,dd'], ['c,5,21']] # Array met items
writeItemsToFile(items) # Array met items in het bestand schrijven (roept de functie hierboven benoemd aan)
items = getItemsFromFile() # items uitlezen met de bovenstaande functie en in de items variable zetten
print(items) # items printen
| true |
41eab3f4c0418e50ef1ae8c725e03283d179030c | Python | anoukvlug/oggm | /oggm/sandbox/distribute_2d.py | UTF-8 | 12,419 | 2.609375 | 3 | [
"BSD-3-Clause"
] | permissive | import logging
import warnings
import oggm.cfg as cfg
from oggm import utils
import numpy as np
import xarray as xr
from scipy import ndimage
from scipy.stats import mstats
from oggm.core.gis import gaussian_blur
from oggm.utils import ncDataset, entity_task
# Module logger
log = logging.getLogger(__name__)
def filter_nan_gaussian_conserving(arr, sigma=1):
"""Apply a gaussian filter to an array with nans.
Source: https://stackoverflow.com/a/61481246/4057931
Intensity is only shifted between not-nan pixels and is hence conserved.
The intensity redistribution with respect to each single point
is done by the weights of available pixels according
to a gaussian distribution. All nans in arr, stay nans in output.
One comment on StackOverflow indicates that it may not work as well
for other values of sigma. To investigate
"""
nan_msk = np.isnan(arr)
loss = np.zeros(arr.shape)
loss[nan_msk] = 1
loss = ndimage.gaussian_filter(loss, sigma=sigma, mode='constant', cval=1)
gauss = arr.copy()
gauss[nan_msk] = 0
gauss = ndimage.gaussian_filter(gauss, sigma=sigma, mode='constant', cval=0)
gauss[nan_msk] = np.nan
return gauss + loss * arr
@entity_task(log, writes=['gridded_data'])
def add_smoothed_glacier_topo(gdir, outline_offset=-40,
smooth_radius=1):
"""Smooth the glacier topography while ignoring surrounding slopes.
It is different from the smoothing that occurs in the 'process_dem'
function, that generates 'topo_smoothed' in the gridded_data.nc file.
This is of importance when redistributing thicknesses to the 2D grid,
because the sides of a glacier tongue can otherwise be higher than the
tongue middle, resulting in an odd shape once the glacier retreats
(e.g. with the middle of the tongue retreating faster that the edges).
Write the `glacier_topo_smoothed` variable in `gridded_data.nc`.
Source: https://stackoverflow.com/a/61481246/4057931
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
outline_offset : float, optional
add an offset to the topography on the glacier outline mask. This
allows to obtain better looking results because topography on the
outline is often affected by surrounding slopes, and artificially
reducing its elevation before smoothing makes the border pixel melt
earlier. -40 seems good, but it may depend on the glacier.
smooth_radius : int, optional
the gaussian radius. One comment on StackOverflow indicates that it
may not work well for other values than 1. To investigate.
"""
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
raw_topo = xr.where(ds.glacier_mask == 1, ds.topo, np.nan)
if outline_offset is not None:
raw_topo += ds.glacier_ext * outline_offset
raw_topo = raw_topo.data
smooth_glacier = filter_nan_gaussian_conserving(raw_topo, smooth_radius)
with ncDataset(gdir.get_filepath('gridded_data'), 'a') as nc:
vn = 'glacier_topo_smoothed'
if vn in nc.variables:
v = nc.variables[vn]
else:
v = nc.createVariable(vn, 'f4', ('y', 'x',))
v.units = 'm'
v.long_name = 'Glacier topo smoothed'
v.description = ("DEM smoothed just on the glacier. The DEM outside "
"the glacier doesn't impact the smoothing.")
v[:] = smooth_glacier
@entity_task(log, writes=['gridded_data'])
def assign_points_to_band(gdir, topo_variable='glacier_topo_smoothed',
elevation_weight=1.003):
"""Assigns glacier grid points to flowline elevation bands and ranks them.
Creates two variables in gridded_data.nc:
`band_index`, which assigns one number per grid point (the index to
which band this grid point belongs). This ordering is done to preserve
area by elevation, i.e. point elevation does matter, but not strictly.
What is more important is the "rank" by elevation, i.e. each flowline band
has the correct gridded area.
`rank_per_band`, which assigns another index per grid point: the
"rank" withing a band, from thinner to thicker and from bottom to top.
This rank indicates which grid point will melt faster within a band.
There is one aibtrary parameter here, which is by how much to weight the
elevation factor (see Parameters)
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
topo_variable : str
the topography to read from `gridded_data.nc` (could be smoothed, or
smoothed differently).
elevation_weight : float
how much weight to give to the elevation of the grid point versus the
thickness. Arbitrary number, might be tuned differently.
"""
# We need quite a few data from the gridded dataset
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
topo_data = ds[topo_variable].data.copy()
glacier_mask = ds.glacier_mask.data == 1
topo_data_flat = topo_data[glacier_mask]
band_index = topo_data * np.NaN # container
per_band_rank = topo_data * np.NaN # container
distrib_thick = ds.distributed_thickness.data
# For the flowline we need the model flowlines only
fls = gdir.read_pickle('model_flowlines')
assert len(fls) == 1, 'Only works with one flowline.'
fl = fls[0]
# number of pixels per band along flowline
npix_per_band = fl.bin_area_m2 / (gdir.grid.dx ** 2)
nnpix_per_band_cumsum = np.around(npix_per_band[::-1].cumsum()[::-1])
rank_elev = mstats.rankdata(topo_data_flat)
bins = nnpix_per_band_cumsum[nnpix_per_band_cumsum > 0].copy()
bins[0] = len(topo_data_flat) + 1
band_index[glacier_mask] = np.digitize(rank_elev, bins, right=False) - 1
# Some sanity checks for now
# Area gridded and area flowline should be similar
assert np.allclose(nnpix_per_band_cumsum.max(), len(topo_data_flat), rtol=0.1)
# All bands should have pixels in them
# Below not allways working - to investigate
# rgi_ids = ['RGI60-11.03887'] # This is Marmlolada
# base_url = 'https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.6/
# L3-L5_files/2023.1/elev_bands/W5E5'
# assert np.nanmax(band_index) == len(bins) - 1
# assert np.nanmin(band_index) == 0
assert np.all(np.isfinite(band_index[glacier_mask]))
# Ok now assign within band using ice thickness weighted by elevation
# We rank the pixels within one band by elevation, but also add
# a penaltly is added to higher elevation grid points
min_alt = np.nanmin(topo_data)
weighted_thick = ((topo_data - min_alt + 1) * 1.003) * distrib_thick
for band_id in np.unique(np.sort(band_index[glacier_mask])):
# We work per band here
is_band = band_index == band_id
per_band_rank[is_band] = mstats.rankdata(weighted_thick[is_band])
with ncDataset(gdir.get_filepath('gridded_data'), 'a') as nc:
vn = 'band_index'
if vn in nc.variables:
v = nc.variables[vn]
else:
v = nc.createVariable(vn, 'f4', ('y', 'x',))
v.units = '-'
v.long_name = 'Points grouped by band along the flowline'
v.description = ('Points grouped by band along the flowline, '
'ordered from top to bottom.')
v[:] = band_index
vn = 'rank_per_band'
if vn in nc.variables:
v = nc.variables[vn]
else:
v = nc.createVariable(vn, 'f4', ('y', 'x',))
v.units = '-'
v.long_name = 'Points ranked by thickness and elevation within band'
v.description = ('Points ranked by thickness and elevation within each '
'band.')
v[:] = per_band_rank
@entity_task(log, writes=['gridded_data'])
def distribute_thickness_from_simulation(gdir, input_filesuffix='',
ys=None, ye=None,
smooth_radius=None):
"""Redistributes the simulated flowline area and volume back onto the 2D grid.
For this to work, the glacier cannot advance beyond its initial area!
We assume that add_smoothed_glacier_topo and assign_points_to_band have
been run before, and that the user stored the data from their simulation
in a flowline diagnostics file (turned off per default).
The algorithm simply melts each flowline band onto the
2D grid points, but adds some heuristics (see :py:func:`assign_points_to_band`)
as to which grid points melts faster. Currently it does not take elevation
into account for the melt *within* one band, a downside which is somehow
mitigated with smoothing (the default is quite some smoothing).
Writes a new variable to gridded_data.nc (simulation_distributed_thickness)
together with a new time dimension. If a variable already exists we
will try to concatenate.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
input_filesuffix : str
the filesuffix of the flowline diagnostics file.
ys : int
pick another year to start the series (default: the first year
of the diagnostic file)
ye : int
pick another year to end the series (default: the first year
of the diagnostic file)
smooth_radius : int
pixel size of the gaussian smoothing. Default is to use
cfg.PARAMS['smooth_window'] (i.e. a size in meters). Set to zero to
suppress smoothing.
"""
fp = gdir.get_filepath('fl_diagnostics', filesuffix=input_filesuffix)
with xr.open_dataset(fp) as dg:
assert len(dg.flowlines.data) == 1, 'Only works with one flowline.'
with xr.open_dataset(fp, group=f'fl_0') as dg:
if ys or ye:
dg = dg.sel(time=slice(ye, ye))
dg = dg.load()
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
band_index_mask = ds.band_index.data
rank_per_band = ds.rank_per_band.data
glacier_mask = ds.glacier_mask.data == 1
orig_distrib_thick = ds.distributed_thickness.data
band_ids, counts = np.unique(np.sort(band_index_mask[glacier_mask]), return_counts=True)
dx2 = gdir.grid.dx**2
out_thick = np.zeros((len(dg.time), *glacier_mask.shape))
for i, yr in enumerate(dg.time):
dgy = dg.sel(time=yr)
residual_pix = 0
area_cov = 0
new_thick = out_thick[i, :]
for band_id, npix in zip(band_ids.astype(int), counts):
band_area = dgy.area_m2.values[band_id]
band_volume = dgy.volume_m3.values[band_id]
if band_area != 0:
# We have some ice left
pix_cov = (band_area / dx2) + residual_pix
mask = (band_index_mask == band_id) & (rank_per_band >= (npix - pix_cov))
residual_pix = pix_cov - mask.sum()
vol_orig = np.where(mask, orig_distrib_thick, 0).sum() * dx2
area_dis = mask.sum() * dx2
thick_cor = (vol_orig - band_volume) / area_dis
area_cov += area_dis
new_thick[mask] = orig_distrib_thick[mask] - thick_cor
# Make sure all glacier covered cells have the minimum thickness
new_thick[mask] = utils.clip_min(new_thick[mask], 1)
this_glacier_mask = new_thick > 0
this_vol = dgy.volume_m3.values.sum()
# Smooth
dx = gdir.grid.dx
if smooth_radius != 0:
if smooth_radius is None:
smooth_radius = np.rint(cfg.PARAMS['smooth_window'] / dx)
new_thick = gaussian_blur(new_thick, int(smooth_radius))
new_thick[~this_glacier_mask] = np.NaN
# Conserve volume
tmp_vol = np.nansum(new_thick) * dx2
new_thick *= this_vol / tmp_vol
out_thick[i, :] = new_thick
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
ds = ds.load()
ds['time'] = dg['time']
vn = "simulation_distributed_thickness" + input_filesuffix
if vn in ds:
warnings.warn(f'Overwriting existing variable {vn}')
ds[vn] = (('time', 'y', 'x',), out_thick)
ds.to_netcdf(gdir.get_filepath('gridded_data'))
| true |
061434d24b3a8f1fbfbe3510ac4fa74b5729c203 | Python | YashRunwal/Understanding-Open-CV | /Edge Detection/edge_detect_and_gradient.py | UTF-8 | 1,022 | 2.90625 | 3 | [] | no_license | # Import libs
import cv2
def read_images(image1):
# Make sure that the images are of the same size
image_1 = cv2.imread(image1)
return image_1
def apply_gradient(show_laplacian, show_sobel):
img = read_images('pikachu.png')
cv2.imshow('Pikachu', img)
laplacian = cv2.Laplacian(img, cv2.CV_64F)
if show_laplacian == 1:
cv2.imshow('laplacian', laplacian)
sobel_x = cv2.Sobel(img, cv2.CV_64F,1,0,ksize=5)
sobel_y = cv2.Sobel(img, cv2.CV_64F,0,1,ksize=5)
if show_sobel == 1:
cv2.imshow('sobel_x', sobel_x)
cv2.imshow('sobel_y', sobel_y)
def edge_detect(show_edge, save_image):
img = read_images('pikachu.png')
edges = cv2.Canny(img, 100, 200)
if show_edge == 1:
cv2.imshow('Edges', edges)
if save_image == 1:
cv2.imwrite('Edge_Pikachu.jpg', edges)
if __name__ == '__main__':
edge_detect(show_edge=1, save_image=1)
| true |
d1a60a5e447c472946e4b5699a6f2f4ac7eab3ef | Python | alan-lynch52/3rdYrProject | /toxic-comments/eda.py | UTF-8 | 570 | 3.046875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#load in toxic comments training data
train = pd.read_csv('train.csv')
LABELS = ["toxic","severe_toxic","obscene","threat","insult","identity_hate"]
y = train[LABELS]
#engineer clean labels
y['clean'] = (train[LABELS].sum(axis=1)==0)
c_dist = y.sum()
print(c_dist)
#plot distribution of labels
x = np.arange(7)
plt.bar(x, c_dist)
plt.xticks(x,['toxic','sev toxic','obscene','threat','insult','id hate','clean'], rotation=30)
plt.ylabel("Frequency")
plt.xlabel("Labels")
plt.show()
| true |
84261c1349f332f79eb215a8ed154a1681a408ad | Python | jpcarranza94/tiny_yolo3_masks | /generate_report.py | UTF-8 | 2,812 | 2.828125 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
from mdutils.mdutils import MdUtils
from mdutils.tools import Html
import markdown2
df = pd.read_csv("out.csv")
def labelParse():
labels = []
for i in range(0,len(df['label'])):
label_list = eval(df['label'][i])
labels.append(label_list)
return labels
def timestampParse():
timestamp_list = []
for i in range(0, len(df['timestamp'])):
time = datetime.datetime.strptime(df['timestamp'][i], "Timestamp: %Y-%m-%d %H:%M:%S")
timestamp_list.append(time.strftime("%H:%M:%S"))
return timestamp_list
def predictionsPerSecond():
labels = labelParse()
timestamps = timestampParse()
unique_timestamps = set(timestamps)
clean_labels = []
clean_timestamps = []
for i in range(0, len(labels)):
for j in range(0, len(labels[i])):
clean_timestamps.append(timestamps[i])
clean_labels.append(labels[i][j])
df_clean = pd.DataFrame({'labels':clean_labels, 'timestamps': clean_timestamps})
table = pd.crosstab(df_clean.labels, df_clean.timestamps)
dictionary_masks = {0: 'Mascarillas correcta', 1: 'Mascarilla incorrecta', 2: 'Sin Mascarilla', 3: 'other'}
columns = table.columns.to_list()
prediction_list = []
for i in range(0,len(columns)):
array = np.array(table.iloc[:, i])[0:3]
if array[1:].any() > 0:
result = dictionary_masks.get(np.argmax(array[1:])+1)
prediction_list.append(result)
else:
result = dictionary_masks.get(0)
prediction_list.append(result)
str_timestamp = columns[i]
final_predictions_per_s = pd.DataFrame({'Label': prediction_list, 'Time': columns})
return final_predictions_per_s
def generate_report():
df_final = predictionsPerSecond()
explode = (0.1, 0.1, 0.1)
plt.pie(df_final.Label.value_counts(), explode=explode, labels= df_final.Label.value_counts().index.to_list(), autopct='%1.1f%%', shadow=True, startangle=90)
plt.title('Tiempo de uso de mascarilla')
plt.savefig('predictions.png')
mdFile = MdUtils(file_name = 'report', title = 'Reporte de predicción de uso correcto de mascarilla')
mdFile.new_line(mdFile.new_inline_image(text = 'Predicciones',path = 'predictions.png'))
mdFile.new_header(title = 'Tablas de resultados', level = 1)
mdFile.new_line('Juan Pablo Carranza Hurtado')
mdFile.new_line('José Alberto Ligorría Taracena')
mdFile.create_md_file()
f = open("report.html", "w")
f.write(markdown2.markdown_path('report.md'))
f.write(pd.crosstab(df_final.Time, df_final.Label).to_html())
f.write('<h1> Cantidad de segundos de utilización de mascarilla </h1>')
f.write(pd.DataFrame(df_final.Label.value_counts()).to_html())
f.close() | true |
e3d322c207494edb875e6400896c429f52754c47 | Python | dubian98/Cursos-Python | /serializar_objetos.py | UTF-8 | 910 | 3.453125 | 3 | [] | no_license | #creacion
import pickle
class vehiculos():
def __init__(self, marca, modelo):
self.marca=marca
self.modelo=modelo
self.enmarcha=False
self.acelera=False
self.frena=False
def arranca(self):
self.enmarcha=True
def acelera(self):
self.acelera=True
def frena(self):
self.frena=True
def estado(self):
print("Marca: ",self.marca,"\nModelo: ",self.modelo,"\nEnmarcha: ",self.enmarcha,"\nAcelerea: ",self.acelera,"\nFrena: ",self.frena )
coche1=vehiculos("mazda","mx5")
coche2=vehiculos("renauld","logan")
coche3=vehiculos("ford","f-150")
coches=[coche1,coche2,coche3]
fichero=open("loscoches","wb")
pickle.dump(coches,fichero)
fichero.close()
del fichero
#lectura
import pickle
fichero2=open("loscoches","rb")
micoches=pickle.load(fichero2)
fichero2.close()
for i in micoches:
print(c.estado())
print("hola") | true |
e1702f81fcf60b22fa54fd212152c40fca776f46 | Python | DreamingFuture/python-crawler | /日常/模拟浏览器尝试.py | UTF-8 | 565 | 2.65625 | 3 | [] | no_license | # 作者 :孔庆杨
# 创建时间 :2019/1/2215:08
# 文件 :模拟浏览器尝试.py
# IDE :PyCharm
import re
import time
from selenium import webdriver
browser = webdriver.Chrome()
browser.get('https://tieba.baidu.com/p/2125145202#!/l/p1')
for i in range(0, 5):
browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(1)
print(browser.page_source) # browser.page_source是获取网页的全部html
#htmls = re.findall('http[\s\S].png', browser.page_source)
#print(htmls)
browser.close()
| true |
e1d1800c56d7be2728bb645abf0dd0e64a36a949 | Python | kyjp/api_python | /section4/api.py | UTF-8 | 710 | 2.734375 | 3 | [] | no_license | # ホットペッパーapi
import os
from dotenv import load_dotenv
import requests
import pandas as pd
URL = 'http://webservice.recruit.co.jp/hotpepper/gourmet/v1/'
load_dotenv()
API_KEY = os.environ['RECUEST_API_KEY']
params = {
'key': API_KEY,
'keyword': '沖縄',
'format': 'json',
'count': 100
}
res = requests.get(URL, params)
result = res.json()
items = result['results']['shop']
print(len(items))
# 表形式に変形
pd.DataFrame()
df = pd.DataFrame(items)
# print(df)
# print(df.head())
# 欲しい情報をオブジェクトから取得
df = df[['name', 'address', 'wifi']]
print(df)
# インデックスを削除してcsvに書き出し
df.to_csv('hotpepper.csv', index=False)
| true |
352de48e6edebf1e0c0e01671bf7928aba418dd2 | Python | nikhil7127/AIO | /website/auth.py | UTF-8 | 2,398 | 2.53125 | 3 | [] | no_license | from flask import Blueprint, render_template, request, redirect, url_for, flash
from werkzeug.security import generate_password_hash, check_password_hash
from . import db
from .models import User
from sqlalchemy import exc
from flask_login import login_user, logout_user, current_user, login_required
auth = Blueprint("auth", __name__)
@auth.route("/login", methods=["POST", "GET"])
def login():
if request.method == "POST":
userName = request.form.get("email")
password = request.form.get("password")
currentUser = User.query.filter_by(username=userName).first()
try:
if check_password_hash(currentUser.password, password):
flash("Logged in successfully", "success")
login_user(currentUser,remember=True)
return redirect(url_for("views.home"))
else:
flash("Password doesn't match", "error")
return redirect(url_for("auth.login"))
except Exception:
flash("Account doesn't exist", "error")
return redirect(url_for("auth.login"))
return render_template("login.html",presentUser=current_user)
@auth.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("auth.login"))
@auth.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
data = request.form
new_user = User(username=data.get("username").strip(), email=data.get("email").strip(),
password=generate_password_hash(data.get("password").strip(), method="sha256"))
try:
db.session.add(new_user)
db.session.commit()
login_user(new_user,remember=True)
flash("Logged in successfully", "success")
return redirect(url_for("views.home"))
except exc.IntegrityError:
db.session.rollback()
if User.query.filter_by(username=data.get("username").strip()).first():
flash("Username already taken", "error")
elif User.query.filter_by(email=data.get("email").strip()).first():
flash("Email already taken", "error")
else:
flash("Account already exists", "error")
return redirect(url_for("auth.register"))
return render_template("register.html",presentUser=current_user)
| true |
d64da6146a53f2a2ae42be364a798c1b4645dd39 | Python | MaxAntony/ApuntesPythonCodigoFacilito | /7.funciones/1.definiendo.py | UTF-8 | 461 | 4.1875 | 4 | [] | no_license | def crear_mensaje(nombre):
return 'hola {}, bienvenido al curso'.format(nombre)
# si dejamos sin argumentos dara un error
nuevo_mensaje = crear_mensaje('max')
print(nuevo_mensaje)
def suma(val1, val2, val3):
return val1+val2+val3
print(suma(10, 20, 30))
# retornando multiples valores
def obtener_curso():
return 'curso de python', 'basico', 3.6
print(obtener_curso())
curso, nivel, version = obtener_curso()
print(curso, nivel, version)
| true |
274a7a511eec0908eaed17d0cda839930b824178 | Python | jesellier-shell/point | /utils.py | UTF-8 | 670 | 2.953125 | 3 | [] | no_license | class StepFunction1D:
def __init__(self, time, values) :
self.time = time
self.values = values
def value(self, T) :
for (t, v) in zip(self.time, self.values):
if(T < t):
return v
return self.values[-1]
def integral(self, T):
cumul = 0
prev_t = 0
for (t, v) in zip(self.time, self.values):
if(T < t):
return cumul + (T-prev_t)*v
cumul = cumul + (t-prev_t)*v
prev_t = t
cumul = cumul + (T-prev_t)*v
return cumul
| true |
db3ec1a37a7ebd546aa60be8247affdbdc3fad56 | Python | Peng-YM/pymoo | /pymoo/usage/problems/usage_tsp.py | UTF-8 | 2,037 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
from pymoo.algorithms.so_genetic_algorithm import GA
from pymoo.model.repair import Repair
from pymoo.operators.crossover.order_crossover import OrderCrossover
from pymoo.operators.mutation.inversion_mutation import InversionMutation
from pymoo.operators.sampling.random_permutation_sampling import PermutationRandomSampling
from pymoo.optimize import minimize
from pymoo.problems.single.traveling_salesman import visualize, create_random_tsp_problem
from pymoo.util.termination.default import SingleObjectiveDefaultTermination
from pymoo.visualization.video.callback_video import AnimationCallback
problem = create_random_tsp_problem(50, 100, seed=1)
class StartFromZeroRepair(Repair):
def _do(self, problem, pop, **kwargs):
X = pop.get("X")
I = np.where(X == 0)[1]
for k in range(len(X)):
i = I[k]
x = X[k]
_x = np.concatenate([x[i:], x[:i]])
pop[k].set("X", _x)
return pop
class PathVisualization(AnimationCallback):
def notify(self, algorithm):
if algorithm.n_gen % 10 == 0:
fig, ax = plt.subplots()
x = algorithm.opt[0].get("X")
visualize(problem, x, fig=fig, ax=ax, show=False)
ax.set_title(f"Generation: {algorithm.n_gen}")
self.video.record(fig=fig)
algorithm = GA(
pop_size=20,
sampling=PermutationRandomSampling(),
mutation=InversionMutation(),
crossover=OrderCrossover(),
repair=StartFromZeroRepair(),
eliminate_duplicates=True
)
# if the algorithm did not improve the last 200 generations then it will terminate (and disable the max generations)
termination = SingleObjectiveDefaultTermination(n_last=200, n_max_gen=np.inf)
res = minimize(
problem,
algorithm,
termination,
# UNCOMMENT to save the visualization
# callback=PathVisualization(fname="tsp.mp4"),
verbose=False
)
print(res.F)
print(res.algorithm.evaluator.n_eval)
visualize(problem, res.X)
| true |
6e4e818a21336f471e4896a0d4007bfaaeeecb5e | Python | ganye/modulus_old | /lib/path.py | UTF-8 | 532 | 2.734375 | 3 | [] | no_license | '''
Created on Feb 6, 2014
@author: xinv
'''
import os
import re
__all__ = ['get_base_dir','get_file_path','get_dir_path',]
def get_base_dir():
return os.path.dirname(os.path.dirname(__file__))
def get_file_path(*args):
pattern = re.compile('/+')
path = []
for arg in args:
path.append('/' + arg)
path = ''.join(path).replace('//','/').rstrip('/')
while '//' in path:
path = path.replace('//','/')
return path
def get_dir_path(*args):
return get_file_path(*args) + '/' | true |
f1d74b8538865cf65b8218ee2b83d262b549edaf | Python | Lokeshwarrobo/Algorithms | /Sorting/InsertionSort.py | UTF-8 | 298 | 4.0625 | 4 | [] | no_license | array = [10, 9, 8, 6, 7, 5, 0, 1, 2, 3, 4]
def Insertion_Sort(array):
for i in range(1, len(array)):
j = i
while j > 0 and array[j] < array[j - 1]:
swap(j, j-1, array)
j -= 1
return array
def swap(i, j, array):
array[i], array[j] = array[j], array[i]
print(Insertion_Sort(array))
| true |
ab39cd578ac1cb302e58687465bc94be762b8328 | Python | Vtneang/StockResearch | /RandoTests/Searching.py | UTF-8 | 538 | 3.296875 | 3 | [] | no_license | # Performing google search using Python code
class Gsearch_python:
def __init__(self,name_search):
self.name = name_search
def Gsearch(self):
count = 0
try :
from googlesearch import search
except ImportError:
print("No Module named 'google' Found")
for i in search(query=self.name,tld='co.in',lang='en',num=10,stop=3,pause=1):
count += 1
print (count)
print(i + '\n')
if __name__=='__main__':
gs = Gsearch_python("Tutorialspoint Python")
gs.Gsearch() | true |
0a071e7a3b70446a742a0229747d5434b467f7ec | Python | Samatki/PyProjects-webTest | /webTest.py | UTF-8 | 275 | 2.6875 | 3 | [] | no_license | from sys import argv
import httplib as h
x = argv[1]
conn = h.HTTPConnection(x)
conn.request("GET","/")
y = conn.getresponse()
if 200<=y.status and y.status<400:
z = 'OK'
else:
z= 'BAD REQUEST'
print x, ' *** ', z,'\n\t', y.status, ' *** ', y.reason
| true |
f582c0358912e2edc26a79584fc51b9b10b2bad6 | Python | GrayJoKing/BeeBot | /discordBot/Randomcog.py | UTF-8 | 4,777 | 3.3125 | 3 | [] | no_license | import discord
from discord.ext import commands
#All
import random
#Dice
from math import ceil
#Cat
import json
#Dog and Cat
import aiohttp
#Dog
from re import search
#B.ook + clean function
import secrets
#roll
from functools import reduce
class Random():
def __init__(self, bot):
self.bot = bot
##Roll
##Rolls dice
@commands.command(aliases=['die','dice'])
async def roll(self, *, dice):
'''roll <number>
- Rolls number of dice
e.g. b.roll 5
OR
b.roll [<number1>d<number2>]
- Rolls number1 dice with number2 sides
e.g. b.roll 5d3 6d4 2d7 ...'''
if len(dice) == 0:
await self.bot.say("You rolled a ``" + str(ceil(random.random()*6)) + "`")
return
dice = dice.split(" ")
results = []
if len(dice) == 1:
try:
int(dice[0])
dice = int(dice[0])
if dice >= 100:
await self.bot.say("No more than 100 dice please")
return
for i in range(0,dice):
results.append(str(random.randint(1,6)))
text = "You rolled `" + str(dice) + "` dice. The results were `" + ", ".join(results) + "`"
await self.bot.say(text)
return
except:
pass
counter = 0
for die in dice:
die = die.split("d")
if len(die) != 2:
await self.bot.say("error when processing "+ secrets.clean('d'.join(die)))
return
else:
if die[0] == '':
die[0] = '1'
counter += int(die[0])
if die[0] == '':
die[0] = 0
elif not die[0].isnumeric():
await self.bot.say("error when processing "+ secrets.clean('d'.join(die)))
return
elif int(die[0]) > 100 or counter > 100:
await self.bot.say("No more than 100 dice please")
return
elif int(die[1]) > 100:
await self.bot.say("Dice with values no more than 100 please")
return
tmp = []
for i in range(0,int(die[0])):
tmp.append(str(random.randint(1,int(die[1]))))
results.append("You rolled `" + str(die[0]) + "` dice with `" + str(die[1]) + "` sides. The results were `" + ", ".join(tmp) + "` for a total of `" + str(reduce(lambda x,y: int(x)+int(y), tmp)) + "`")
await self.bot.say('\n'.join(results))
results = []
##Choose {phrase} or {phrase} or ...
@commands.command(pass_context = True, aliases = ['choice'])
async def choose(self, ctx, *, words):
'''choose <choice> {"or" <choice>}
- Replies with a random choice from those given
e.g. b.choose Option 1 or Option 2 or Option 3'''
await self.bot.say("`{}`".format(secrets.clean(random.choice(words.split(" or ")))))
##Flip
##Flips a coin
@commands.command(aliases = ['coin'])
async def flip(self, table = None):
'''flip
- Flips a coin'''
if table == "table":
await self.bot.say("(╯:bee:)╯︵ ┻━┻")
return
await self.bot.say(random.choice(["Heads","Tails"]))
##Scramble
@commands.command()
async def scramble(self, *, msg):
'''scramble <message>
- Scrambles your message'''
await self.bot.say(":twisted_rightwards_arrows: `" + secrets.clean(random.shuffle(msg)) + "`")
#Dog
@commands.command(aliases = ["itch", "ark", "ite"], pass_context = True)
async def dog(self, ctx):
'''dog
- Gets a picture of a random \U0001F436'''
msg = await self.bot.say("Getting a \U0001F436 or two!")
async with aiohttp.get('http://random.dog/woof.json') as r:
if r.status == 200:
js = await r.json()
embed = discord.Embed()
embed.set_image(url = js['url'])
print(js['url'])
await self.bot.edit_message(msg, new_content=" " if ctx.invoked_with == "dog" else random.choice(["Grrr...", "Woof!", "Bark!"]),embed = embed)
else:
await self.bot.edit_message(msg, new_content="Error while getting image.")
#Cat
@commands.command()
async def cat(self):
'''cat
- Gets a picture of a random \U0001F431'''
msg = await self.bot.say("Getting a \U0001F431 or two!")
async with aiohttp.get('http://random.cat/meow') as r:
if r.status == 200:
js = await r.json()
embed = discord.Embed()
embed.set_image(url = js['file'])
await self.bot.edit_message(msg, new_content=" ",embed = embed)
else:
await self.bot.edit_message(msg, new_content="Error while getting image.")
@commands.command()
async def acronym(self, acro):
'''acronym <letters>
- Returns a random matching the letters given'''
if not acro.isalpha():
await self.bot.say("Error: Letters only")
return
if len(acro) > 10:
await self.bot.say("Error: Word too long")
return
wordList = []
for letter in acro.lower():
word = random.choice(list(filter(lambda word: word[0] == letter and word.capitalize() not in wordList, (self.bot.wordLists['long'] + self.bot.wordLists['medium']))))
wordList.append(word.capitalize())
await self.bot.say("The acronym " + ".".join(list(acro.upper())) + " means:\n\n`" + ' '.join(wordList) + "`")
def setup(bot):
bot.add_cog(Random(bot))
| true |
2614c8d9fd238d33b81e76d6fd795eb8f0b450a0 | Python | jerryhan88/py_source | /python_source/src/gc.py | UTF-8 | 1,596 | 3.015625 | 3 | [] | no_license | from __future__ import division
import wx, time
class Node:
def __init__(self, _id, x, y):
self.id = _id
self.x, self.y = x, y
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'test', size=(640, 480))
MyPanel(self)
self.Show(True)
class MyPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour(wx.WHITE)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.GP = None
def OnPaint(self, evt):
dc = wx.PaintDC(self)
gc = wx.GraphicsContext.Create(dc)
gc.SetPen(wx.BLACK_PEN)
gc.SetBrush(wx.BLUE_BRUSH)
gc.DrawRectangle(100, 100, 100, 100)
if self.GP == None:
gpath1 = gc.CreatePath()
gpath1.MoveToPoint(0, 0)
gpath1.AddLineToPoint(0, 100)
gpath1.AddLineToPoint(100, 100)
gpath1.AddLineToPoint(-50, 0)
gpath1.CloseSubpath()
gpath2 = gc.CreatePath()
gpath2.MoveToPoint(150, 0)
gpath2.AddLineToPoint(200, 0)
gpath2.AddLineToPoint(200, 50)
gpath2.CloseSubpath()
self.GP = [gpath1, gpath2]
#
gc.Translate(300, 100)
gc.SetBrush(wx.RED_BRUSH)
gc.DrawPath(self.GP[0])
gc.Translate(0, 0)
gc.SetBrush(wx.GREEN_BRUSH)
gc.DrawPath(self.GP[1])
if __name__ == '__main__':
app = wx.App(False)
app.frame = MainFrame()
app.MainLoop()
| true |
4329634a680a1067864152d7968894c870dd66a5 | Python | h-varma/Team-11 | /src/unit3/hridya_rectangle.py | UTF-8 | 260 | 3.984375 | 4 | [
"MIT"
] | permissive | def rectangle(length, width):
if length <= 0 or width <= 0:
raise ValueError("The side length cannot be negative or zero!")
measures = {"area": length * width}
print(f'The rectangle has area {format(measures["area"])}')
return measures
| true |
b6a126ab5a58eacb015311cead742ef6f43593e2 | Python | crystalDf/Automate-the-Boring-Stuff-with-Python-Chapter-06-String | /rawString.py | UTF-8 | 142 | 3.375 | 3 | [] | no_license | # A raw string completely ignores all escape characters and prints
# any backslash that appears in the string
print(r'That is Carol\'s cat.')
| true |
367c23676752e69644120df702bc95672722ef74 | Python | ctmackay/aesthetic_twitterbot | /bb_markov.py | UTF-8 | 5,603 | 3.21875 | 3 | [] | no_license | # Body Building Markov Model
# Charles MacKay
# this module will build 4 markov models based muscle group we selected.
# the source is the text we scraped from the body building website
# to produce 4 steps in our exercise.
# input: muscle group
# output: 4 markov models that can output a generated sentence corresponding to each step of the exercise
import markovify
import os
import random
import re
import time
word_title_array = []
random_exercise_name_list = []
muscle_list = ["Biceps", "Shoulders", "Calves", "Neck", "Lats",
"Triceps", "Hamstrings", "Adductors", "Quadriceps",
"Chest", "Middle Back", "Abductors", "Lower Back",
"Glutes", "Forearms", "Traps"]
DATA_DIR = r"C:\Users\Charl\Dropbox\Python scripts\twitterbot\laptop"
input_file_list = []
title_dict = dict()
#builds an array of titles
def build_title_array(title_path):
wta = []
# Get raw text as string.
with open(title_path) as f:
for line in f:
for word in line.split():
wta.append(word)
return wta
#picks a random muscle group from the list
def generate_new_muscle_group():
return muscle_list[random.randint(0, len(muscle_list)-1)]
#input a array of titles, outputs a randomly generated exercise name
def generate_new_exercise_name(wta):
empty_list = []
new_name=''
for i in range (5):
s = wta[random.randint(0,len(wta)-1)]
if s not in empty_list:
empty_list.append(s)
new_name += s + ' '
return new_name
def build_paths_to_inputs(muscle_chosen):
step1_exercise_path = DATA_DIR + '\\' + muscle_chosen +'1.txt'
step2_exercise_path = DATA_DIR + '\\' + muscle_chosen +'2.txt'
step3_exercise_path = DATA_DIR + '\\' + muscle_chosen +'3.txt'
step4_exercise_path = DATA_DIR + '\\' + muscle_chosen +'4.txt'
if os.path.exists(step1_exercise_path) and os.path.exists(step2_exercise_path) and os.path.exists(step3_exercise_path) and os.path.exists(step4_exercise_path):
return step1_exercise_path,step2_exercise_path,step3_exercise_path,step4_exercise_path
else:
raise Exception(IOError)
def build_text_files(p1,p2,p3,p4):
with open(p1) as f:
step1_text = f.read()
with open(p2) as f:
step2_text = f.read()
with open(p3) as f:
step3_text = f.read()
with open(p4) as f:
step4_text = f.read()
return (step1_text, step2_text, step3_text, step4_text)
def build_markov_models(step1_text, step2_text, step3_text, step4_text):
# Build the models.
global step1_model
global step2_model
global step3_model
global step4_model
step1_model = markovify.Text(step1_text, state_size=3)
step2_model = markovify.Text(step2_text, state_size=3)
step3_model = markovify.Text(step3_text, state_size=3)
step4_model = markovify.Text(step4_text, state_size=3)
#PUBLIC functions for twitter interface
def get_current_muscle():
global muscle_chosen
return muscle_chosen
def get_exercise_name():
global random_exercise_name
return random_exercise_name
def give_me_step1():
return step1_model.make_short_sentence(140)
def give_me_step2():
return step2_model.make_short_sentence(140)
def give_me_step3():
return step3_model.make_short_sentence(140)
def give_me_step4():
return step4_model.make_short_sentence(140)
def list_all_muscles():
return muscle_list
def generate_specific_model(muscle):
global muscle_chosen
muscle_chosen = muscle
print "building a markov chain based on: ",
print muscle_chosen
#find the corresponding titles file for that muscle
title_path = title_dict[muscle_chosen]
global random_exercise_name
random_exercise_name = generate_new_exercise_name(build_title_array(title_path))
#build paths to the description files
(step1_path,step2_path,step3_path,step4_path) = build_paths_to_inputs(muscle_chosen)
# now lets generate our workout markov chain
# open the text files for reading
(step1_text, step2_text, step3_text, step4_text) = build_text_files(step1_path,step2_path,step3_path,step4_path)
# Build the models.
build_markov_models(step1_text, step2_text, step3_text, step4_text)
## START MAIN PROGRAM ##
#find titles and build dictionary
for f in os.listdir(DATA_DIR):
if f.endswith("titles.txt"):
mus = (os.path.splitext(f)[0])[:-7]
full_path = os.path.abspath(os.path.join(DATA_DIR, f))
title_dict[mus] = full_path
#find exercise description files
for f in os.listdir(DATA_DIR):
if f.endswith(".txt"):
input_file_list.append(f)
#pick a muscle group
muscle_chosen = generate_new_muscle_group()
#print muscle_chosen
#find the corresponding titles file for that muscle
title_path = title_dict[muscle_chosen]
random_exercise_name = generate_new_exercise_name(build_title_array(title_path))
#build paths to the description files
(step1_path,step2_path,step3_path,step4_path) = build_paths_to_inputs(muscle_chosen)
# now lets generate our workout markov chain
# open the text files for reading
(step1_text, step2_text, step3_text, step4_text) = build_text_files(step1_path,step2_path,step3_path,step4_path)
# Build the models.
build_markov_models(step1_text, step2_text, step3_text, step4_text)
#now the program is ready to be used with a random muscle initialized
##print "Muscle group chosen: ",
##print get_current_muscle()
##print get_exercise_name()
##
##print give_me_step1()
##print give_me_step2()
##print give_me_step3()
##print give_me_step4()
| true |
260cfa7dc8066348847b5c85e75cb18b0847ec23 | Python | kristellef/FYPcode_backup | /createcsv.py | UTF-8 | 1,032 | 2.703125 | 3 | [] | no_license | import json, csv, os
mapping_csv = csv.reader(open('/Users/macbook/Desktop/mapping0.csv', 'rt'), delimiter=',')
count = 0
data = []
for row in mapping_csv:
count +=1
if count%50000==0:
print(count,'/346.516.753')
if len(row) <=3:
path = "/Users/macbook/Desktop/csvs/"+str(row[0][0:3])+".csv"
if os.path.isfile(path) :
writer = csv.writer(open(path,'a'))
writer.writerow(row)
else:
temp_csp = csv.writer(open(path,'w+'))
temp_csp.writerow(row)
if len(row)>3:
id = row[len(row)-2]
counter = row[len(row)-1]
for i in range(0,len(row)-2):
path = "/Users/macbook/Desktop/csvs/" + str(row[i][0:3]) + ".csv"
data = [row[i], id, counter]
if os.path.isfile(path):
writer = csv.writer(open(path,'a'))
writer.writerow(data)
else:
temp_csp = csv.writer(open(path,'w+'))
temp_csp.writerow(data)
print('done')
| true |
66a6a0935bc2228c049014958cfae6dd77a2306d | Python | Seneda/GameJam2020 | /References/wabley/game_funtions.py | UTF-8 | 1,512 | 3.21875 | 3 | [] | no_license | import sys
import pygame
def check_keydown_events(event, car):
"""Respond to keypresses."""
if event.key == pygame.K_RIGHT:
#rotate car clockwise
car.rotating_clockwise = True
elif event.key == pygame.K_LEFT:
#rotate car anticlockwise
car.rotating_anticlockwise = True
elif event.key == pygame.K_UP:
#move the car forward
car.forward_motion = True
def check_keyup_events(event, car):
if event.key == pygame.K_RIGHT:
car.rotating_clockwise = False
elif event.key == pygame.K_LEFT:
car.rotating_anticlockwise = False
elif event.key == pygame.K_UP:
car.forward_motion = False
def check_events(car):
"""Respond to keypresses and mouse events"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, car)
elif event.type == pygame.KEYUP:
check_keyup_events(event, car)
def update_screen(fm_settings, screen, car,monkeys, pieces):
"""Update images on the screen and flip to the new screen."""
#Redraw the screen during each pass through the loop.
screen.fill(fm_settings.screen_bg_colour)
for piece in pieces.sprites():
piece.draw_piece()
car.blitme()
for monkey in monkeys:
# monkey.update(car.rect.centerx,car.rect.centery)
monkey.blitme()
#Make the most recent screen visible.
pygame.display.flip() | true |
b0fa516fde2e1f7ec36bdca5f2d0cee4b2e11907 | Python | hcutler/civictech-blog-analysis | /initial-analysis/techpresident/tp-article-parse.py | UTF-8 | 2,345 | 2.65625 | 3 | [] | no_license | import urllib2
import time
from bs4 import BeautifulSoup as bsoup
from bs4 import BeautifulSoup
from yaml import load, Loader
import requests as rq
import re
import unicodedata
import sys
#write article urls to textfile
all_text = ""
secretURLs = []
with open("tp-333.txt", "r") as file:
data = file.read()
url_list = data.split('\n')
for x in url_list: #change range to e.g. [0:2] to test
html_doc = urllib2.urlopen(x).read()
soup = BeautifulSoup(html_doc, "lxml")
#get title
# title = soup.find('div', attrs={"class": "boxshadow "})
# title = str(title)
# t_start = title.find('<h1>')
# t_end = title.find('</h1>')
# print title[t_start + 4 :t_end]
#get author
try:
# author = soup.find('span', attrs={"id": "techauth"})
# author_name = author.text + ' '
# get article content
content = soup.find('div', attrs={"id": "story-content"})
text = content.text
# str_text = unicodedata.normalize('NFKD', text).encode('ascii','ignore')
# all_text += author_name
# all_text += text
# all_text = unicodedata.normalize('NFKD', all_text).encode('ascii','ignore')
# print 'Success: ', x
except:
# print 'This article has no techauth tag'
secretURLs.append(x)
# author_name = ''
text = ''
pass
# author = str(author)
# a_start = author.find('">')
# a_end = author.find('</a>')
#print author[a_start+3:a_end]
# # get article content
# content = soup.find('div', attrs={"id": "story-content"})
# text = content.text
# content = str(content)
# soup2 = BeautifulSoup(content, "lxml")
# text = soup2.get_text()
#str_text = unicodedata.normalize('NFKD', text).encode('ascii','ignore')
# auth_name = unicodedata.normalize('NFKD', author_name).encode('ascii','ignore')
txt = unicodedata.normalize('NFKD', text).encode('ascii','ignore')
# print author_name
print txt
# all_text += author_name
# all_text += text
#all_text = unicodedata.normalize('NFKD', all_text).encode('ascii','ignore')
#print 'Success: ', x
# with open("tp0_content.txt", "w") as outfile:
# outfile.write(all_text)
# with open("tp0_secretURLs.txt", "w") as outfile:
# outfile.write(secretURLs)
#print secretURLs
| true |
47d0a1694196a05fec47aa7fee1b130908b6e1f7 | Python | sunita6/python-assignment3 | /assignment3/pythonassign31.py | UTF-8 | 129 | 3.859375 | 4 | [] | no_license | n=int(input("enter the number:"))
sum1=0
while(n>0):
sum1=sum1+n
n=n-1
print("the sum of n natural number is",sum1)
| true |
9840caee869be9ffd0275ed3983cd5830d57eacd | Python | sumfish/music_pre-content | /model.py | UTF-8 | 16,140 | 2.6875 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
class Tripletnet(nn.Module):
def __init__(self, embeddingnet):
super(Tripletnet, self).__init__()
self.embeddingnet = embeddingnet
def forward(self, A, P, N):
'''
embedded_A, a = self.embeddingnet(A)
embedded_P, p = self.embeddingnet(P)
embedded_N, n = self.embeddingnet(N)
'''
embedded_A = self.embeddingnet(A)
embedded_P = self.embeddingnet(P)
embedded_N = self.embeddingnet(N)
dist_AP = F.pairwise_distance(embedded_A, embedded_P, 2)
dist_AN = F.pairwise_distance(embedded_A, embedded_N, 2)
return dist_AP, dist_AN, embedded_A, embedded_P, embedded_N ###for training
#return dist_AP, dist_AN, a, p, n ####for visualize
def get_embedding(self, x):
return self.embeddingnet(x)
class Net_s(nn.Module): ##internet
def __init__(self):
super(Net_s, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
print('level 0:{}'.format(x.shape))
x = F.relu(F.max_pool2d(self.conv1(x), 2))
print('level 1:{}'.format(x.shape))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
print('level 2:{}'.format(x.shape))
x = x.view(-1, 320)
print('level 3:{}'.format(x.shape))
x = F.relu(self.fc1(x))
print('level 4:{}'.format(x.shape))
x = F.dropout(x, training=self.training)
print('level 5:{}'.format(x.shape))
return self.fc2(x)
#####################################################
class res_block(nn.Module):
def __init__(self, inp, out, kernel, stride_list):
super(res_block, self).__init__()
self.conv1 = nn.Conv1d(inp, inp, kernel_size=kernel, stride=stride_list[0], padding=kernel//2)
self.bn2 = nn.BatchNorm1d(inp)
self.conv2 = nn.Conv1d(inp, out, kernel_size=kernel, stride=stride_list[1], padding=kernel//2)
self.bn3 = nn.BatchNorm1d(out)
self.add_conv = nn.Conv1d(inp, out, kernel_size=kernel, stride=stride_list[1], padding=kernel//2)
if inp!=out:
print('in!=out')
downsample = True
else:
downsample = False
self.downsample = downsample
#self.up = nn.Conv1d(inp, out, kernel_size=kernel)
def forward(self, x):
'''
block x:torch.Size([10, 128, 173])
f(x):torch.Size([10, 128, 173])
f(x):torch.Size([10, 128, 87])
block x:torch.Size([10, 128, 87])
'''
#print('in')
#print('block x:{}'.format(x.shape)) #shape(N,128,173)
ori = x
out = self.conv1(x)
out = F.relu(self.bn2(out))
#print('f(x):{}'.format(out.shape))
out = self.conv2(out)
out = F.relu(self.bn3(out))
#print('f(x):{}'.format(out.shape))
if self.downsample:
out = out + self.bn3(self.add_conv(ori))
out = F.relu(out)
#print('f(x):{}'.format(self.add_conv(ori).shape))
else:
out = out + F.avg_pool1d(ori, kernel_size=2, ceil_mode=True)
#print('block x:{}'.format(out.shape))
return out
class Encoder_v4(nn.Module): #new ,1d ,no connected layer
def __init__(self):
super(Encoder_v4, self).__init__()
self.c_in=512 ###128=mel dimension
self.c_out=1
self.c_m1=128
self.c_m2=64
self.kernel=5
self.stride=[1,2]
self.conv1 = nn.Conv1d(self.c_in, self.c_m1, kernel_size=1)
self.norm_layer = nn.BatchNorm1d(self.c_m1)
self.act = nn.ReLU()
self.drop_out = nn.Dropout(p=0.25)
self.conv_last2 =res_block(self.c_m1, self.c_m2, self.kernel, self.stride)
self.conv_last1 = res_block(self.c_m2, self.c_out, self.kernel, self.stride)
self.head = nn.Sequential(
res_block(self.c_m1, self.c_m1, self.kernel, self.stride),
res_block(self.c_m1, self.c_m1, self.kernel, self.stride),
res_block(self.c_m1, self.c_m1, self.kernel, self.stride),
)
'''
c_in=128
self.head = nn.Sequential(
res_block(self.c_in, self.c_in, self.kernel, self.stride),
res_block(self.c_in, self.c_in, self.kernel, self.stride),
res_block(self.c_in, self.c_m, self.kernel, self.stride),
res_block(self.c_m, self.c_m, self.kernel, self.stride),
)
'''
def forward(self, _input):
x = _input
#print('original:{}'.format(x.shape))
x = x.view(-1, x.size(2), x.size(3))
#print('after view:{}'.format(x.shape))
#### conv bank??????
#### dimension up
x = self.conv1(x)
x = self.norm_layer(x)
x = self.act(x)
x = self.drop_out(x)
#### residual
x = self.head(x)
x = self.conv_last2(x)
x = self.conv_last1(x)
#print('level 1(after res):{}'.format(x.shape))
x = x.view(-1, x.size(2))
#print('level 1(after res):{}'.format(x.shape))
#input()
return x
class Encoder_v5(nn.Module): #new ,1d ,no connected layer, 512
def __init__(self):
super(Encoder_v5, self).__init__()
self.c_in=512 ###128=mel dimension
self.c_out=1
self.c_m1=128
self.c_m2=64
self.kernel=5
self.stride=[1,2]
self.conv1 = nn.Conv1d(self.c_in, self.c_in2, kernel_size=1)
self.norm_layer = nn.BatchNorm1d(self.c_in2)
self.act = nn.ReLU()
self.drop_out = nn.Dropout(p=0.25)
self.head = nn.Sequential(
res_block(self.c_in2, self.c_in2, self.kernel, self.stride),
res_block(self.c_in2, self.c_in2, self.kernel, self.stride),
res_block(self.c_in2, self.c_m, self.kernel, self.stride),
res_block(self.c_m, self.c_m, self.kernel, self.stride),
res_block(self.c_m, self.c_out, self.kernel, self.stride)
#res_block(self.c_m, self.c_out, self.kernel, [1,1])#####new
)
def forward(self, _input):
x = _input
#print('original:{}'.format(x.shape))
x = x.view(-1, x.size(2), x.size(3))
#print('after view:{}'.format(x.shape))
#### conv bank??????
#### dimension up
x = self.conv1(x)
x = self.norm_layer(x)
x = self.act(x)
x = self.drop_out(x)
#### residual
x = self.head(x)
#print('level 1(after res):{}'.format(x.shape))
x = x.view(-1, x.size(2))
#print('level 1(after res):{}'.format(x.shape))
#input()
return x
###################################################################
# frame-level paper
class block_in(nn.Module):
def __init__(self, inp, out, kernel):
super(block_in, self).__init__()
if kernel==3:
last_kernel=1
else:
last_kernel=5
self.in1 = nn.InstanceNorm2d(inp)
self.conv1 = nn.Conv2d(inp, out, (kernel,1), padding=(1,0))
self.in2 = nn.InstanceNorm2d(out)
self.conv2 = nn.Conv2d(out, out, (kernel,1), padding=(1,0))
self.in3 = nn.InstanceNorm2d(out)
self.up = nn.Conv2d(inp, out, (last_kernel,1), padding=(0,0))
self.in4 = nn.InstanceNorm2d(out)
def forward(self, x):
#print('in')
#print('block x:{}'.format(x.shape)) #shape(N,C,128,87)
out = self.conv1(self.in1(x)) #before is a cnn layer
#print('f(x):{}'.format(out.shape))
out = self.conv2(F.relu(self.in2(out)))
out = self.in3(out)
#print('f(x):{}'.format(out.shape))
#print('f(x):{}'.format(self.up(x).shape))
out += self.in4(self.up(x)) ##########################
#print('block x:{}'.format(out.shape))
return out
class Encoder_v2(nn.Module): ##add instance normalize
def __init__(self):
super(Encoder_v2, self).__init__()
fre = 64
middle_size=50
content_size=10
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Linear(fre*3, middle_size)
self.fc2 = nn.Linear(middle_size, content_size)
#self.fc2 = nn.Linear(zsize, num_classes)
self.lin_drop = nn.Dropout(p=0.5)
self.head = nn.Sequential(
#nn.BatchNorm2d(inp), ###############
#nn.Conv2d(1, fre, (3,1), padding=(1,0)),
nn.Conv2d(1, fre, (5,1), padding=(1,0)),
block_in(fre, fre*2, 5),
nn.Dropout(p=0.25),
nn.MaxPool2d((3,1),(3,1)), #(42,T)
block_in(fre*2, fre*3, 3),
#nn.Dropout(p=0.3),
####nn.BatchNorm2d(fre*3),
nn.ReLU(inplace=True),
#nn.MaxPool2d((3,1),(3,1)),
#nn.Conv2d(fre*3, fre*2, (3,1), padding=(1,0))
)
def forward(self, _input):
'''
original:torch.Size([16, 1, 128, 87])
level 1(after res):torch.Size([16, 192, 40, 87])
level 2:torch.Size([16, 192, 1, 1])
level 3:torch.Size([16, 192])
level 4:torch.Size([16, 50])
'''
x = _input
#print('original:{}'.format(x.shape))
x = self.head(x)
#print('level 1(after res):{}'.format(x.shape))
x = self.avgpool(x)##############
#print('level 2:{}'.format(x.shape))
#x = x.view(-1, 192)
x = torch.flatten(x, 1)
#print('level 3:{}'.format(x.shape))
last_layer = self.lin_drop(F.relu(self.fc1(x)))
#print('level 4:{}'.format(x.shape))
#out = F.softmax(self.fc2(last_layer), dim=0) ####classifier
out = self.fc2(last_layer)
#print('level 5:{}'.format(x.shape))
return out, last_layer
#############################################################################
class block_1d(nn.Module):
def __init__(self, inp, out, kernel):
super(block_1d, self).__init__()
if kernel==3:
last_kernel=1
else:
last_kernel=5
self.bn1 = nn.BatchNorm1d(inp)
#self.conv1 = nn.Conv2d(inp, out, (kernel,1), padding=(1,0))
self.conv1 = nn.Conv1d(inp, out, kernel_size=kernel, padding=1)
self.bn2 = nn.BatchNorm1d(out)
#self.conv2 = nn.Conv2d(out, out, (kernel,1), padding=(1,0))
self.conv2 = nn.Conv1d(out, out, kernel_size=kernel, padding=1)
self.bn3 = nn.BatchNorm1d(out)
self.up = nn.Conv1d(inp, out, kernel_size=last_kernel)
#self.up = nn.Conv2d(inp, out, (last_kernel,1), padding=(0,0))
def forward(self, x):
#print('in')
#print('block x:{}'.format(x.shape)) #shape(N,C,128,87)
out = self.conv1(self.bn1(x)) #before is a cnn layer
#print('f(x):{}'.format(out.shape))
out = self.conv2(F.relu(self.bn2(out)))
out = self.bn3(out)
#print('f(x):{}'.format(out.shape))
#print('f(x):{}'.format(self.up(x).shape))
out += self.up(x) ##########################
#print('block x:{}'.format(out.shape))
return out
class Encoder_v3(nn.Module): ########## 1d conv
def __init__(self):
super(Encoder_v3, self).__init__()
fre = 64
msize=50
zsize=10
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.fc1 = nn.Linear(fre*3, msize)
self.fc2 = nn.Linear(msize, zsize)
#self.fc2 = nn.Linear(zsize, num_classes)
self.lin_drop = nn.Dropout(p=0.5)
self.head = nn.Sequential(
#nn.BatchNorm2d(inp), ###############
#nn.Conv2d(1, fre, (3,1), padding=(1,0)),
#nn.Conv2d(1, fre, (5,1), padding=(1,0)),
nn.Conv1d(128, fre, 5), ######### 128=dictionary dimension
block_1d(fre, fre*2, 5),
nn.Dropout(p=0.25),
#nn.MaxPool2d((3,1),(3,1)), #(42,T)
block_1d(fre*2, fre*3, 3),
#nn.Dropout(p=0.3),
nn.BatchNorm1d(fre*3),
nn.ReLU(inplace=True),
#nn.MaxPool2d((3,1),(3,1)),
#nn.Conv2d(fre*3, fre*2, (3,1), padding=(1,0))
)
def forward(self, _input):
x = _input
#print('original:{}'.format(x.shape))
x = x.view(-1, x.size(2), x.size(3))
#print('after view:{}'.format(x.shape))
x = self.head(x)
#print('level 1(after res):{}'.format(x.shape))
x = self.avgpool(x)##############
#print('level 2:{}'.format(x.shape))
#x = x.view(-1, 192)
x = torch.flatten(x, 1)
#print('level 3:{}'.format(x.shape))
last_layer = self.lin_drop(F.relu(self.fc1(x)))
#print('level 4:{}'.format(x.shape))
out = self.fc2(last_layer)
#return out, last_layer
return out
#############################################################################
# frame-level paper
class block(nn.Module):
def __init__(self, inp, out, kernel):
super(block, self).__init__()
if kernel==3:
last_kernel=1
else:
last_kernel=5
self.bn1 = nn.BatchNorm2d(inp)
self.conv1 = nn.Conv2d(inp, out, (kernel,1), padding=(1,0))
self.bn2 = nn.BatchNorm2d(out)
self.conv2 = nn.Conv2d(out, out, (kernel,1), padding=(1,0))
self.bn3 = nn.BatchNorm2d(out)
self.up = nn.Conv2d(inp, out, (last_kernel,1), padding=(0,0))
def forward(self, x):
#print('in')
#print('block x:{}'.format(x.shape)) #shape(N,C,128,87)
out = self.conv1(self.bn1(x)) #before is a cnn layer
#print('f(x):{}'.format(out.shape))
out = self.conv2(F.relu(self.bn2(out)))
out = self.bn3(out)
#print('f(x):{}'.format(out.shape))
#print('f(x):{}'.format(self.up(x).shape))
out += self.up(x) ##########################
#print('block x:{}'.format(out.shape))
return out
class Encoder_v1(nn.Module):
def __init__(self):
super(Encoder_v1, self).__init__()
fre = 64
msize=50
zsize=10
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Linear(fre*3, msize)
self.fc2 = nn.Linear(msize, zsize)
#self.fc2 = nn.Linear(zsize, num_classes)
self.lin_drop = nn.Dropout(p=0.5)
self.head = nn.Sequential(
#nn.BatchNorm2d(inp), ###############
#nn.Conv2d(1, fre, (3,1), padding=(1,0)),
nn.Conv2d(1, fre, (5,1), padding=(1,0)),
block(fre, fre*2, 5),
nn.Dropout(p=0.25),
nn.MaxPool2d((3,1),(3,1)), #(42,T)
block(fre*2, fre*3, 3),
#nn.Dropout(p=0.3),
nn.BatchNorm2d(fre*3),
nn.ReLU(inplace=True),
#nn.MaxPool2d((3,1),(3,1)),
#nn.Conv2d(fre*3, fre*2, (3,1), padding=(1,0))
)
def forward(self, _input):
'''
original:torch.Size([16, 1, 128, 87])
level 1(after res):torch.Size([16, 192, 40, 87])
level 2:torch.Size([16, 192, 1, 1])
level 3:torch.Size([16, 192])
level 4:torch.Size([16, 50])
'''
x = _input
#print('original:{}'.format(x.shape))
x = self.head(x)
#print('level 1(after res):{}'.format(x.shape))
x = self.avgpool(x)##############
#print('level 2:{}'.format(x.shape))
#x = x.view(-1, 192)
x = torch.flatten(x, 1)
#print('level 3:{}'.format(x.shape))
last_layer = self.lin_drop(F.relu(self.fc1(x)))
#print('level 4:{}'.format(x.shape))
#out = F.softmax(self.fc2(last_layer), dim=0) ####classifier
out = self.fc2(last_layer)
#print('level 5:{}'.format(x.shape))
return out, last_layer | true |
08a4163ed74ee8b33c38b443e0307074c042b32b | Python | dgpllc/leetcode-python | /learnpythonthehardway/997-find-the-town-judge.py | UTF-8 | 1,873 | 3.78125 | 4 | [] | no_license | # In a town, there are N people labelled from 1 to N. There is a rumor that one of these people is secretly the town
# judge.
#
# If the town judge exists, then:
#
# The town judge trusts nobody.
# Everybody (except for the town judge) trusts the town judge.
# There is exactly one person that satisfies properties 1 and 2.
# You are given trust, an array of pairs trust[i] = [a, b] representing that the person labelled a trusts the person
# labelled b.
#
# If the town judge exists and can be identified, return the label of the town judge. Otherwise, return -1.
#
#
#
# Example 1:
#
# Input: N = 2, trust = [[1,2]]
# Output: 2
# Example 2:
#
# Input: N = 3, trust = [[1,3],[2,3]]
# Output: 3
# Example 3:
#
# Input: N = 3, trust = [[1,3],[2,3],[3,1]]
# Output: -1
# Example 4:
#
# Input: N = 3, trust = [[1,2],[2,3]]
# Output: -1
# Example 5:
#
# Input: N = 4, trust = [[1,3],[1,4],[2,3],[2,4],[4,3]]
# Output: 3
#
#
# Note:
#
# 1 <= N <= 1000
# trust.length <= 10000
# trust[i] are all different
# trust[i][0] != trust[i][1]
# 1 <= trust[i][0], trust[i][1] <= N
import collections
class Solution(object):
def findJudge(self, N, trust):
"""
:type N: int
:type trust: List[List[int]]
:rtype: int
"""
res = -1
mdict1, mdict2 = collections.defaultdict(set), collections.defaultdict(set)
for t in trust:
mdict1[t[0]].add(t[1])
mdict2[t[1]].add(t[0])
for i in xrange(1, N + 1):
if not mdict1[i]:
if len(mdict2[i]) == N - 1:
res = i
return res
if __name__ == '__main__':
print Solution().findJudge(3, [[1, 3], [2, 3]])
print Solution().findJudge(3, [[1, 3], [2, 3], [3, 1]])
print Solution().findJudge(3, [[1, 2], [2, 3]])
print Solution().findJudge(4, [[1, 3], [1, 4], [2, 3], [2, 4], [4, 3]])
| true |
1c9301d3e6092d1aa3ba777ac5a7212f3ae76def | Python | byceps/byceps | /byceps/blueprints/admin/attendance/views.py | UTF-8 | 2,110 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | """
byceps.blueprints.admin.attendance.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2023 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort
from byceps.services.brand import brand_service
from byceps.services.party import party_service
from byceps.services.ticketing import ticket_attendance_service
from byceps.services.user import user_service
from byceps.util.framework.blueprint import create_blueprint
from byceps.util.framework.templating import templated
from byceps.util.views import permission_required
blueprint = create_blueprint('attendance_admin', __name__)
@blueprint.get('/brands/<brand_id>')
@permission_required('admin.access')
@templated
def view_for_brand(brand_id):
"""Show most frequent attendees for parties of this brand."""
brand = brand_service.find_brand(brand_id)
if brand is None:
abort(404)
parties = party_service.get_parties_for_brand(brand.id)
if parties:
parties.sort(key=lambda party: party.starts_at, reverse=True)
most_recent_party = parties[0]
else:
most_recent_party = None
party_total = len(parties)
top_attendees = _get_top_attendees(brand.id)
return {
'brand': brand,
'party_total': party_total,
'most_recent_party': most_recent_party,
'top_attendees': top_attendees,
}
def _get_top_attendees(brand_id):
top_attendee_ids = ticket_attendance_service.get_top_attendees_for_brand(
brand_id
)
top_attendees = _replace_user_ids_with_users(top_attendee_ids)
# Sort by highest attendance count first, alphabetical screen name second.
top_attendees.sort(key=lambda att: (-att[1], att[0].screen_name))
return top_attendees
def _replace_user_ids_with_users(attendee_ids):
user_ids = {user_id for user_id, attendance_count in attendee_ids}
users_by_id = user_service.get_users_indexed_by_id(
user_ids, include_avatars=False
)
return [
(users_by_id[user_id], attendance_count)
for user_id, attendance_count in attendee_ids
]
| true |
521eb77c555736fbe679230d9374f162dddf4e52 | Python | lolozor/GeekBrains_courses_HW | /Алгоритмы и структуры данных на PYTHON/lesson_1/lesson1_hw6.py | UTF-8 | 1,041 | 4.46875 | 4 | [] | no_license | # 6.
# По длинам трех отрезков, введенных пользователем, определить возможность существования треугольника,
# составленного из этих отрезков.
# Если такой треугольник существует, то определить,
# является ли он разносторонним, равнобедренным или равносторонним.
a = int(input('Введите сторону 1: '))
b = int(input('Введите сторону 2: '))
c = int(input('Введите сторону 3: '))
if (a+b <= c) or (a+c <= b) or (b+c <= a):
print('Треугольник не существует')
elif (a != b) and (a != c) and (b != c):
print('Это разносторонний треугольник.')
elif (a == b) and (b == c):
print('Это равносторонний треугольник.')
else:
print('Это равнобедренный треугольник.'); | true |
868d2fd2a462a2601c29509f61b640b82632deb5 | Python | milena-mathew/coba-CCPO | /coba/tests/test_registry.py | UTF-8 | 7,739 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | import unittest
from coba.registry import CobaRegistry, coba_registry_class
class TestObject:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class TestArgObject:
def __init__(self, arg):
self.arg = arg
class CobaRegistry_Tests(unittest.TestCase):
def setUp(self) -> None:
CobaRegistry.clear() #make sure the registry is fresh each test
def test_endpoint_loaded(self):
klass = CobaRegistry.retrieve("NoneSink")
self.assertEqual("NoneSink", klass.__name__)
def test_endpoint_loaded_after_decorator_register(self):
@coba_registry_class("MyTestObject")
class MyTestObject(TestObject): pass
klass = CobaRegistry.retrieve("NoneSink")
self.assertEqual("NoneSink", klass.__name__)
def test_register_decorator(self):
@coba_registry_class("MyTestObject")
class MyTestObject(TestObject): pass
klass = CobaRegistry.construct("MyTestObject")
self.assertIsInstance(klass, MyTestObject)
self.assertEqual(klass.args, ())
self.assertEqual(klass.kwargs, {})
def test_registered_create(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct("test")
self.assertEqual(klass.args, ())
self.assertEqual(klass.kwargs, {})
def test_registered_create_args1(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": [1,2,3] })
self.assertEqual(klass.args, (1,2,3))
self.assertEqual(klass.kwargs, {})
def test_registered_create_args2(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": 1 })
self.assertEqual(klass.args, (1,))
self.assertEqual(klass.kwargs, {})
def test_registered_create_kwargs(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": {"a":1} })
self.assertEqual(klass.args, ())
self.assertEqual(klass.kwargs, {"a":1})
def test_registered_create_args3(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": "abc" })
self.assertEqual(klass.args, ("abc",))
self.assertEqual(klass.kwargs, {})
def test_registered_create_args_kwargs(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": [1,2,3], "kwargs": {"a":1} })
self.assertEqual(klass.args, (1,2,3))
self.assertEqual(klass.kwargs, {"a":1})
def test_registered_create_name_args_kwargs(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "name": "test", "args": [1,2,3], "kwargs": {"a":1} })
self.assertEqual(klass.args, (1,2,3))
self.assertEqual(klass.kwargs, {"a":1})
def test_registered_create_foreach1(self):
CobaRegistry.register("test", TestObject)
recipe = { "test":[[1,2,3]], "kwargs": {"a":1}, "method":"foreach" }
klasses = CobaRegistry.construct(recipe)
self.assertEqual(len(klasses), 1)
self.assertEqual(klasses[0].args, (1,2,3))
self.assertEqual(klasses[0].kwargs, {"a":1})
def test_registered_create_foreach2(self):
CobaRegistry.register("test", TestObject)
recipe = { "test":[1,2,3], "kwargs": {"a":1}, "method":"foreach" }
klasses = CobaRegistry.construct(recipe)
self.assertEqual(len(klasses), 3)
self.assertEqual(klasses[0].args, (1,))
self.assertEqual(klasses[0].kwargs, {"a":1})
self.assertEqual(klasses[1].args, (2,))
self.assertEqual(klasses[1].kwargs, {"a":1})
self.assertEqual(klasses[2].args, (3,))
self.assertEqual(klasses[2].kwargs, {"a":1})
def test_registered_create_foreach3(self):
CobaRegistry.register("test", TestObject)
recipe = { "test":[1,2], "kwargs": [{"a":1},{"a":2}], "method":"foreach" }
klasses = CobaRegistry.construct(recipe)
self.assertEqual(len(klasses), 2)
self.assertEqual(klasses[0].args, (1,))
self.assertEqual(klasses[0].kwargs, {"a":1})
self.assertEqual(klasses[1].args, (2,))
self.assertEqual(klasses[1].kwargs, {"a":2})
def test_registered_create_foreach4(self):
CobaRegistry.register("test", TestObject)
recipe = { "test":[[1,2],3], "method":"foreach" }
klasses = CobaRegistry.construct(recipe)
self.assertEqual(len(klasses), 2)
self.assertEqual(klasses[0].args, (1,2))
self.assertEqual(klasses[0].kwargs, {})
self.assertEqual(klasses[1].args, (3,))
self.assertEqual(klasses[1].kwargs, {})
def test_registered_create_recursive1(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": "test" })
self.assertEqual(1, len(klass.args))
self.assertEqual(klass.kwargs, {})
self.assertIsInstance(klass.args[0], TestObject)
self.assertEqual(klass.args[0].args, ())
self.assertEqual(klass.args[0].kwargs, {})
def test_registered_create_recursive2(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": {"test":1} })
self.assertEqual(1, len(klass.args))
self.assertEqual(klass.kwargs, {})
self.assertIsInstance(klass.args[0], TestObject)
self.assertEqual(klass.args[0].args, (1,))
self.assertEqual(klass.args[0].kwargs, {})
def test_registered_create_recursive3(self):
CobaRegistry.register("test", TestObject)
klass = CobaRegistry.construct({ "test": {"a": "test"} })
self.assertEqual(klass.args, ())
self.assertEqual(1, len(klass.kwargs))
self.assertIsInstance(klass.kwargs["a"], TestObject)
self.assertEqual(klass.kwargs["a"].args, ())
self.assertEqual(klass.kwargs["a"].kwargs, {})
def test_registered_create_array_arg(self):
CobaRegistry.register("test", TestArgObject)
klass = CobaRegistry.construct({ "test": [1,2,3] })
self.assertEqual(klass.arg, [1,2,3])
def test_registered_create_dict_arg(self):
CobaRegistry.register("test", TestArgObject)
with self.assertRaises(Exception):
klass = CobaRegistry.construct({ "test": {"a":1} })
def test_not_registered(self):
CobaRegistry.register("test", TestObject)
with self.assertRaises(Exception) as cm:
CobaRegistry.construct("test2")
self.assertEqual("Unknown recipe test2", str(cm.exception))
def test_invalid_recipe1(self):
CobaRegistry.register("test", TestObject)
recipe = {"test":[1,2,3], "args":[4,5,6] }
with self.assertRaises(Exception) as cm:
CobaRegistry.construct(recipe)
self.assertEqual(f"Invalid recipe {str(recipe)}", str(cm.exception))
def test_invalid_recipe2(self):
CobaRegistry.register("test", TestObject)
recipe = {"test":[1,2,3], "name":"test", "args":[4,5,6]}
with self.assertRaises(Exception) as cm:
CobaRegistry.construct(recipe)
self.assertEqual(f"Invalid recipe {str(recipe)}", str(cm.exception))
def test_invalid_recipe3(self):
CobaRegistry.register("test", TestObject)
recipe = {"test":{"a":1}, "name":"test", "kwargs":{"a":1}}
with self.assertRaises(Exception) as cm:
CobaRegistry.construct(recipe)
self.assertEqual(f"Invalid recipe {str(recipe)}", str(cm.exception))
if __name__ == '__main__':
unittest.main() | true |
49705456258829f99aad487e64b2eed099440b4d | Python | KshanaRules/NOX | /NOX 0.1.py | UTF-8 | 2,619 | 3.328125 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
def menor(datos):
poz = 0
for dato in datos:
if (dato>=-.19 and dato <=.21): #Establece un umbral en X y Y respcto a 0
if((dato>=0 and datos[poz+1]<=0) or (dato<=0 and datos[poz+1]>=0)): #Valida que el soguiente dato no se ecuentre dentro del umbra, si se cumple rompe el ciclo y regresa valores para graficar
break
poz = poz +1
return [dato,poz]
Datos = [] #Se almacena cada reistro de cada especie para su procesamiento
nEspecies = 0
nEspecies2 = nEspecies + 1
archivoXLS = pd.read_excel('datos.xlsx') #Leer el archivo que contiene 12 especies disponibles, se busca determinar la entropía con los valores en cada una
pos = 16 # Posición del renglón para obtener el nombre y datos de la especie, PANDAS omite el primer rengón del archivo (A,B,C...), de lo contrario toma como posición 0 a (Groups name, , 0 , 0.02....)
pos2 = pos + 1
columnas = archivoXLS.columns #Obtiene el nomnre de las columnas del archivo de excel
especies = columnas[0:1] #Obtiene valor de columna 1, nombre de las especies 'Group name'
columnas = columnas[2:-2] #Obtiene los valores de columnas con Tase de Cosecchas (HR), aplica filtro a textos
while(nEspecies<12): #Lectura de cada una de las especies para procesar y graficar datos
Nespecie = archivoXLS[nEspecies:nEspecies2][especies].values #Obtiene el arreglo de especies, filtrando por 'Group name'
Nespecie = Nespecie[0][0] #Extrae el valor del arreglo, tipo string
HR = archivoXLS[pos:pos2][columnas].values #Obtiene los valores de HR para cada especie
ren,col = HR.shape #Tamaño de los datos ren=1, col= 50 => 1 especie, 50 HR
#Llenar arreglo para almacenar valores de cada HR de especie
c = 0
while (c<col):
Datos.append(HR[0,c])
c = c+1
# Regresa posición del cambio de valores en HR. Obtiene la posicón y valor de ubicación.
dato,poz = menor(Datos)
#Grafíca especie
fig, ax = plt.subplots()
ax.plot(columnas, Datos, 'go--',linewidth=.5,markersize=3, label='HR')
ax.annotate('CAMBIO',xy=(columnas[poz+1],dato),xytext=(.6,.504),arrowprops=(dict(facecolor='black',arrowstyle='simple')))
ax.set_facecolor('#eafff5')
ax.tick_params(labelcolor='tab:red')
plt.title(Nespecie)
plt.xlabel("Tasa de cosecha")
plt.ylabel("Cambio de entropía")
fig.savefig(Nespecie + '.png')
plt.legend()
plt.show()
nEspecies = nEspecies + 1
nEspecies2 = nEspecies + 1
Datos = []
pos = pos +1
pos2 = pos2 + 1
| true |
f06eea176a3b053dffe88aa44a948f8d02c9b0b2 | Python | haolloyin/projecteuler | /solutions/1-9/p7XthPrime.py | UTF-8 | 389 | 3.203125 | 3 | [] | no_license |
# Project Euler - Problem 7
import prime
import time
start = time.time()
count = 0
p = 1
while True:
p = p+1
if prime.isPrime(p) == True:
count = count+1
print "%d : %d " % (count, p)
if count == 10001:
print "the 10001st prime is %d " % p
break
print "%.8f Secs" % (time.time() - start)
print time.asctime()
print p
#104743
| true |
206d5f87188ba5e287c076aebb7a03c658226aa2 | Python | chorwonkim/__Algorithms__ | /BOJ/DataS/7785.py | UTF-8 | 445 | 3.59375 | 4 | [] | no_license | from sys import stdin
Read = stdin.readline
# list를 사용할 경우에는 삽입 및 삭제에 O(n)이 사용된다.
# 따라서 총 O(n^2)이 될 수 있으므로 Set 자료구조를 사용했다.
d = set()
for _ in range(int(Read())):
name, status = map(str, Read().split())
if status == "enter":
d.add(name)
else:
d.remove(name)
result = list(d)
result.sort(reverse=True)
for item in result:
print(item) | true |
cdf60e5f0b42fa8302094590fc00de5dc945044e | Python | Aasthaengg/IBMdataset | /Python_codes/p03331/s296434499.py | UTF-8 | 140 | 3.328125 | 3 | [] | no_license | n = int(input())
s = 0
while True:
s += n % 10
n = n // 10
if n == 0:
break
if s == 1:
print(10)
else:
print(s) | true |
d1badc6f211cdcfcafb39a5044a3a64484dbb9ea | Python | MustafaIsmaill/road_info_osm_extract | /scripts/map_extract.py | UTF-8 | 4,585 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
import osmnx as ox
from road_info_osm_extract.msg import point
from road_info_osm_extract.msg import points
from road_info_osm_extract.msg import pointsList
class map_extract:
def __init__(self, node_name, place_name, publish_rate):
#initialise class variables
self._place_name = place_name
self._ros_node_name = node_name
self._road_info = pointsList()
self._road_points = pointsList()
self._publish_rate = publish_rate
#create publisher to the topic "road_info"
self._road_info_publisher = rospy.Publisher("road_info", pointsList, queue_size=100)
self._road_points_publisher = rospy.Publisher("road_points", pointsList, queue_size=100)
#create a node
rospy.init_node(self._ros_node_name, anonymous=False)
self._publish_rate = rospy.Rate(self._publish_rate) # 1hz
#convert place in a map to a graph
self._graph = ox.graph_from_place(self._place_name, network_type='drive')
self._graph_proj = ox.project_graph(self._graph)
#extract edges and nodes
#edges define the geometry of the road
#nodes define the start and end points of each road
self._nodes, self._edges = ox.graph_to_gdfs(self._graph_proj, nodes=True, edges=True)
def _publish_roads_data(self):
self._parse_road_info()
self._parse_road_points()
while not rospy.is_shutdown():
self._road_info_publisher.publish(self._road_info)
self._road_points_publisher.publish(self._road_points)
self._publish_rate.sleep()
#parses the edges and nodes into readable lists
def _parse_road_info(self):
#loop through all roads or "edges"
for i in range(0,len(self._edges)):
pointXYstart = point()
pointXYend = point()
pointLatLonStart = point()
pointLatLonEnd = point()
pointLengthOneWay = point()
pointXYstart.x = self._get_start_x(i)
pointXYstart.y = self._get_start_y(i)
pointXYend.x = self._get_end_x(i)
pointXYend.y = self._get_end_y(i)
pointLatLonStart.x = self._get_start_lat(i)
pointLatLonStart.y = self._get_start_lon(i)
pointLatLonEnd.x = self._get_end_lat(i)
pointLatLonEnd.y = self._get_end_lon(i)
pointLengthOneWay.x = self._get_edge_length(i)
pointLengthOneWay.y = self._get_edge_direction(i)
points_array = points()
points_array.pt.insert(0, pointXYstart)
points_array.pt.insert(1, pointXYend)
points_array.pt.insert(2, pointLatLonStart)
points_array.pt.insert(3, pointLatLonEnd)
points_array.pt.insert(4, pointLengthOneWay)
self._road_info.points_list.insert(i, points_array)
def _parse_road_points(self):
for i in range(0,len(self._edges)):
x_points = self._edges[:].geometry[i].xy[0]
y_points = self._edges[:].geometry[i].xy[1]
points_xy = points()
for xy in range(0,len(x_points)):
point_xy = point()
point_xy.x = x_points[xy]
point_xy.y = y_points[xy]
points_xy.pt.insert(xy, point_xy)
self._road_points.points_list.insert(i, points_xy)
#returns the x-coordinate of the start node
def _get_start_x(self, edge_id):
start_node_id = self._edges[:].u[edge_id]
return self._nodes[:].x[start_node_id]
#returns the y-coordinate of the start node
def _get_start_y(self, edge_id):
start_node_id = self._edges[:].u[edge_id]
return self._nodes[:].y[start_node_id]
#returns the x-coordinate of the end node
def _get_end_x(self, edge_id):
start_node_id = self._edges[:].v[edge_id]
return self._nodes[:].x[start_node_id]
#returns the y-coordinate of the end node
def _get_end_y(self, edge_id):
start_node_id = self._edges[:].v[edge_id]
return self._nodes[:].y[start_node_id]
#returns the latitude of the start node
def _get_start_lat(self, edge_id):
start_node_id = self._edges[:].u[edge_id]
return self._nodes[:].lat[start_node_id]
#returns the longitude of the start node
def _get_start_lon(self, edge_id):
start_node_id = self._edges[:].u[edge_id]
return self._nodes[:].lon[start_node_id]
#returns the latitude of the end node
def _get_end_lat(self, edge_id):
start_node_id = self._edges[:].v[edge_id]
return self._nodes[:].lat[start_node_id]
#returns the longitude of the end node
def _get_end_lon(self, edge_id):
start_node_id = self._edges[:].v[edge_id]
return self._nodes[:].lon[start_node_id]
#returns the length of the edge or road
def _get_edge_length(self, edge_id):
return self._edges[:].length[edge_id]
#returns a boolean indicating whether a road is a one way or not
def _get_edge_direction(self, edge_id):
direction = self._edges[:].oneway[edge_id]
direction = float(direction)
return direction | true |