blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d907d58d96d89dae7c7aed5d1b6018504a7c8906
|
Python
|
MaEvGoR/datamining_project
|
/Spectrum.py
|
UTF-8
| 13,129
| 2.765625
| 3
|
[] |
no_license
|
import math
import numpy as np
class Spectrum:
def __init__(self, price_step):
self.price_step = price_step
self.buy_bins = []
self.all_buy_bins = []
self.all_buy_volumes = []
self.all_sell_bins = []
self.all_sell_volumes = []
self.sell_bins = []
self.best_sell = -1
self.best_buy = -1
self.buy_volumes = [0 for i in range(50)]
self.sell_volumes = [0 for i in range(50)]
self.spectrum_sell_bins = []
self.spectrum_buy_bins = []
self.spectrum_sell = [0 for i in range(10)]
self.spectrum_buy = [0 for i in range(10)]
def insert_to_buy(self, price, volume):
max_price = self.all_buy_bins[0]
# new one is greater than the currently recorded maximum
if price > max_price:
dist = math.ceil((price - max_price) / self.price_step)
self.all_buy_bins = [i for i in np.arange(price, max_price, -self.price_step)] + self.all_buy_bins
self.all_buy_volumes = [0 for i in
range(len(self.all_buy_bins) - len(self.all_buy_volumes))] + self.all_buy_volumes
self.all_buy_volumes[0] += volume
return 0
else:
idx = math.ceil((max_price - price) / self.price_step)
if idx < len(self.all_buy_bins):
self.all_buy_volumes[idx] += volume
return idx
else:
dist = idx - len(self.all_buy_bins) + 1
self.all_buy_bins = self.all_buy_bins + [i for i in
np.arange(self.all_buy_bins[-1] - self.price_step, price - 1,
-self.price_step)]
self.all_buy_volumes = self.all_buy_volumes + [0 for i in range(
len(self.all_buy_bins) - len(self.all_buy_volumes))]
self.all_buy_volumes[idx] += volume
return idx
def insert_to_sell(self, price, volume):
min_price = self.all_sell_bins[0]
# new one is less than the currently recorded minimum
if price < min_price:
dist = math.ceil((min_price - price) / self.price_step)
self.all_sell_bins = [i for i in np.arange(price, min_price, self.price_step)] + self.all_sell_bins
self.all_sell_volumes = [0 for i in range(
len(self.all_sell_bins) - len(self.all_sell_volumes))] + self.all_sell_volumes
self.all_sell_volumes[0] += volume
return 0
else:
idx = math.ceil((price - min_price) / self.price_step)
if idx < len(self.all_sell_bins):
self.all_sell_volumes[idx] += volume
return idx
else:
dist = idx - len(self.all_sell_bins) + 1
self.all_sell_bins = self.all_sell_bins + [i for i in
np.arange(self.all_sell_bins[-1] + self.price_step,
price + 1, self.price_step)]
self.all_sell_volumes = self.all_sell_volumes + [0 for i in range(
len(self.all_sell_bins) - len(self.all_sell_volumes))]
self.all_sell_volumes[idx] += volume
return idx
def delete_from_buy(self, price, volume):
max_price = self.all_buy_bins[0]
idx = math.ceil((max_price - price) / self.price_step)
if 0 <= idx < len(self.all_buy_bins):
if volume < self.all_buy_volumes[idx]:
self.all_buy_volumes[idx] -= volume
return idx
# find first non-zero element
else:
self.all_buy_volumes[idx] = 0
while self.all_buy_volumes[idx] == 0:
if (idx == len(self.all_buy_volumes) - 1):
break
idx += 1
return idx
else:
return -1
def delete_from_sell(self, price, volume):
min_price = self.all_sell_bins[0]
idx = math.ceil((price - min_price) / self.price_step)
if 0 <= idx < len(self.all_sell_bins):
if volume < self.all_sell_volumes[idx]:
self.all_sell_volumes[idx] -= volume
return idx
# find first non-zero element
else:
self.all_sell_volumes[idx] = 0
while self.all_sell_volumes[idx] == 0:
if (idx == len(self.all_sell_volumes) - 1):
break
idx += 1
return idx
else:
return -1
def find_idx_sell(self, price):
k = math.ceil((price - self.best_sell) / self.price_step)
if k == 50:
k = 49
return int(k)
def find_idx_buy(self, price):
k = math.ceil((self.best_buy - price) / self.price_step)
if k == 50:
k = 49
return int(k)
def find_idx_spectrum_sell(self, price):
k = math.ceil((price - self.best_sell) / self.price_step) // 5
if k == 10:
k = 9
return k
def find_idx_spectrum_buy(self, price):
k = math.ceil((self.best_buy - price) / self.price_step) // 5
if k == 10:
k = 9
return k
def recalc_spectrum_sell(self):
self.spectrum_sell_bins = [self.sell_bins[i] for i in range(0, 50, 5)]
self.spectrum_sell = [sum(self.sell_volumes[i:i + 5]) for i in range(0, 50, 5)]
def recalc_spectrum_buy(self):
self.spectrum_buy_bins = [self.buy_bins[i] for i in range(0, 50, 5)]
self.spectrum_buy = [sum(self.buy_volumes[i:i + 5]) for i in range(0, 50, 5)]
def new_sell_order(self, price, volume):
# no sell orders recorded yet
if self.best_sell == -1:
self.best_sell = price
max_sell = self.best_sell + 50 * self.price_step
self.sell_bins = [p for p in np.arange(self.best_sell, max_sell, self.price_step)]
self.spectrum_sell_bins = [p for p in np.arange(self.best_sell, max_sell, self.price_step * 5)]
self.sell_volumes[0] = volume
self.spectrum_sell[0] = volume
self.all_sell_bins = self.sell_bins.copy()
self.all_sell_volumes = self.sell_volumes.copy()
else:
# sell order falls somewhere in the existing bins
if self.best_sell <= price < self.best_sell + 50 * self.price_step:
idx = self.find_idx_sell(price)
if idx == 50:
idx = 49
self.sell_volumes[idx] += volume
spect_idx = self.find_idx_spectrum_sell(price)
self.spectrum_sell[spect_idx] += volume
_ = self.insert_to_sell(price, volume)
else:
# found new best, update everything
if self.best_sell > price:
idx = self.insert_to_sell(price, volume)
self.best_sell = price
if idx + 50 < len(self.all_sell_bins):
self.sell_bins = self.all_sell_bins[idx:idx + 50]
self.sell_volumes = self.all_sell_volumes[idx:idx + 50]
else:
self.sell_bins = [p for p in np.arange(self.best_sell, self.best_sell + 50 * self.price_step,
self.price_step)]
self.sell_volumes = self.all_sell_volumes[idx:] + [0 for i in
range(50 - len(self.all_sell_volumes) + idx)]
self.recalc_spectrum_sell()
# save for the later usage
else:
_ = self.insert_to_sell(price, volume)
def new_buy_order(self, price, volume):
# no buy orders recorded yet
if self.best_buy == -1:
self.best_buy = price
min_buy = self.best_buy - 50 * self.price_step
self.buy_bins = [p for p in np.arange(self.best_buy, min_buy, -self.price_step)]
self.spectrum_buy_bins = [p for p in np.arange(self.best_buy, min_buy, -self.price_step * 5)]
self.buy_volumes[0] = volume
self.spectrum_buy[0] = volume
self.all_buy_bins = self.buy_bins.copy()
self.all_buy_volumes = self.buy_volumes.copy()
else:
# buy order falls somewhere in the existing bins
if self.best_buy >= price > self.best_buy - 50 * self.price_step:
idx = self.find_idx_buy(price)
if idx == 50:
idx = 49
self.buy_volumes[idx] += volume
spect_idx = self.find_idx_spectrum_buy(price)
self.spectrum_buy[spect_idx] += volume
_ = self.insert_to_buy(price, volume)
else:
# found new best, update everything
if self.best_buy < price:
idx = self.insert_to_buy(price, volume)
self.best_buy = price
if idx + 50 < len(self.all_buy_bins):
self.buy_bins = self.all_buy_bins[idx:idx + 50]
self.buy_volumes = self.all_buy_volumes[idx:idx + 50]
else:
self.buy_bins = [p for p in np.arange(self.best_buy, self.best_buy - 50 * self.price_step,
-self.price_step)]
self.buy_volumes = self.all_buy_volumes[idx:] + [0 for i in
range(50 - len(self.all_buy_volumes) + idx)]
self.recalc_spectrum_buy()
# save for the later usage
else:
_ = self.insert_to_buy(price, volume)
def delete_sell_order(self, price, volume):
# does not remove current best
if self.best_sell + 50 * self.price_step > price > self.best_sell or price == self.best_sell and volume < \
self.sell_volumes[0]:
idx = self.find_idx_sell(price)
self.sell_volumes[idx] = max(0, self.sell_volumes[idx] - volume)
spect_idx = self.find_idx_spectrum_sell(price)
self.spectrum_sell[spect_idx] = max(0, self.spectrum_sell[spect_idx] - volume)
else:
# if removes current best
if price == self.best_sell and volume >= self.sell_volumes[0]:
idx = self.delete_from_sell(price, volume)
self.best_sell = self.all_sell_bins[idx]
if idx + 50 < len(self.all_sell_bins):
self.sell_bins = self.all_sell_bins[idx:idx + 50]
self.sell_volumes = self.all_sell_volumes[idx:idx + 50]
else:
self.sell_bins = [p for p in
np.arange(self.best_sell, self.best_sell + 50 * self.price_step, self.price_step)]
self.sell_volumes = self.all_sell_volumes[idx:] + [0 for i in
range(50 - len(self.all_sell_volumes) + idx)]
self.recalc_spectrum_sell()
# if does not fall in 50 steps
elif price > self.best_sell + 50 * self.price_step:
_ = self.delete_from_sell(price, volume)
def delete_buy_order(self, price, volume):
# does not remove current best
if self.best_buy - 50 * self.price_step < price < self.best_buy or price == self.best_buy and volume < \
self.buy_volumes[0]:
idx = self.find_idx_buy(price)
self.buy_volumes[idx] = max(0, self.buy_volumes[idx] - volume)
spect_idx = self.find_idx_spectrum_buy(price)
self.spectrum_buy[spect_idx] = max(0, self.spectrum_buy[spect_idx] - volume)
else:
# if removes current best
if price == self.best_buy and volume >= self.buy_volumes[0]:
idx = self.delete_from_buy(price, volume)
self.best_buy = self.all_buy_bins[idx]
if idx + 50 < len(self.all_buy_bins):
self.buy_bins = self.all_buy_bins[idx:idx + 50]
self.buy_volumes = self.all_buy_volumes[idx:idx + 50]
else:
self.buy_bins = [p for p in
np.arange(self.best_buy, self.best_buy - 50 * self.price_step, -self.price_step)]
self.buy_volumes = self.all_buy_volumes[idx:] + [0 for i in
range(50 - len(self.all_buy_volumes) + idx)]
self.recalc_spectrum_buy()
# if does not fall in 50 steps
elif price > self.best_buy + 50 * self.price_step:
_ = self.delete_from_buy(price, volume)
| true
|
0bfc3a6dedfd160096540ca46b1ab7c7f55db3a5
|
Python
|
SandaruwanWije/cryptography
|
/ceasar_cipher/ceasar_cipher.py
|
UTF-8
| 2,180
| 3.1875
| 3
|
[] |
no_license
|
import sys
def help():
print("python3 ceasar_cipher.py -e -m \"Enter Mesage Here\" -k 3")
print("python3 ceasar_cipher.py -d -c \"ciper text\" -k 3")
print("-k should me 0 26")
def encrypt(msg, key):
cipher = ""
for letter in msg:
int_ltr = ord(letter)
if int_ltr > 64 and int_ltr < 91:
new_int_ltr = int_ltr + key
if new_int_ltr > 90:
new_int_ltr = 64 + (key - (90 - int_ltr))
cipher = cipher + chr(new_int_ltr)
elif int_ltr > 96 and int_ltr < 123:
new_int_ltr = int_ltr + key
if new_int_ltr > 122:
new_int_ltr = 96 + (key - (122 - int_ltr))
cipher = cipher + chr(new_int_ltr)
else:
cipher = cipher + letter
return cipher
def decrypt(cipher, key):
msg = ""
for letter in cipher:
int_ltr = ord(letter)
if int_ltr > 64 and int_ltr < 91:
new_int_ltr = int_ltr - key
if new_int_ltr < 65:
new_int_ltr = 91 - (65 - new_int_ltr)
msg = msg + chr(new_int_ltr)
elif int_ltr > 96 and int_ltr < 123:
new_int_ltr = int_ltr - key
if new_int_ltr < 97:
new_int_ltr = 123 - (97 - new_int_ltr)
msg = msg + chr(new_int_ltr)
else:
msg = msg + letter
return msg
if __name__ == "__main__":
try:
argv = sys.argv
if "-h" in argv:
help()
elif "-e" in argv:
msg_index = argv.index("-m") + 1
msg = argv[msg_index].replace("\"", "")
key_index = argv.index("-k") + 1
key = argv[key_index]
cipher = encrypt(msg, int(key))
print(cipher)
else:
cipher_index = argv.index("-c") + 1
cipher = argv[cipher_index].replace("\"", "")
key_index = argv.index("-k") + 1
key = argv[key_index]
msg = decrypt(cipher, int(key))
print(msg)
except Exception as e:
print("Something went wrong. python3 ceasar_cipher.py -h for help")
print("\nHere the errand log:\n")
print(e)
| true
|
4e95a9a726881c765106344fca263a5f0118729e
|
Python
|
ifebuche/2NYP-DS-Project
|
/onboard.py
|
UTF-8
| 2,991
| 2.625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 2020 11:13:37
Updated on Sat Oct 31, 2020 11:14:29
@author: Fesh
"""
###################################################################################
##1. This Script finds and assembles all Excel files sent over by streamers which are products of meating.py
##2. The joined dataset is written to our database and the excel files moved to 'Warehoused' folder
##################################################################################
print("Onboarding script pipline for 2NYP DS Project v1")
print("Importing modules...")
import pyodbc, time, os, sys
import pandas as pd
from datetime import datetime
import urllib
from sqlalchemy import create_engine, types
#Make connection
params = urllib.parse.quote_plus('DRIVER={ODBC Driver 17 for SQL Server};SERVER=FESH\FESHQL; DATABASE=feshdb; Trusted_Connection=yes')
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
files = [line for line in os.listdir() if line.endswith('xlsx')]
if not files:
print(f"We could not find any Excel files.\nExiting")
time.sleep(2)
sys.exit()
#Checker
checker = ['id', 'text', 'source', 'in_reply_to_status_id', 'create_date', 'create_time', 'followers',
'fav_count', 'statuses_count', 'loacation', 'verified', 'name', 'screen_name']
#Assemble all files
dfs = []
for item in files:
df = pd.read_excel(item)
if df.columns.tolist() == checker:
dfs.append(df)
print(f"{item} done")
else:
print(f"Inconsistent column names in {item}")
print("Trying to correct it")
#Check if the two params dropped by Twitter is the old df
try:
if 'extended_tweet' in df.columns and 'display_text_range' in df.columns:
del df['extended_tweet']
del df['display_text_range']
dfs.append(df)
print(f"{item} done")
else:
print('Was not what we thought.\nExiting.')
time.sleep(2)
sys.exit()
except:
print('Was not what we thought.\nExiting.')
time.sleep(2)
sys.exit()
combo = pd.concat(dfs).reset_index(drop=True)
#Rename id to tweet_id
combo = combo.rename(columns = {'id':'tweet_id'})
combo.keys()
print("Commencing write to warehouse")
#Write data to LifeRenewals table and replace if exists
try:
combo.to_sql('_2NYP', engine, if_exists='append', index=False)
print("Successfully writtent to warehouse")
#Move file Warehoused folder
for item in files:
namer = item[:-5]
print(f"Moving {item} to the 'Warehoused' folder...")
time.sleep(2)
now = datetime.now()
name_marker = datetime.strftime(now, '%Y_%m_%d_%H_%M')
path_now = os.getcwd() + '\\'
os.replace(path_now + item, path_now + 'Warehoused\\' + namer + '_' + name_marker + '.xlsx')
except Exception as e:
print("Something bad occurred!")
print(e)
| true
|
ae7ecfd0f92e3e8ab0323f456b5f4cb4513172e0
|
Python
|
lipug/misc
|
/tinkering/background_tasks.py
|
UTF-8
| 5,594
| 3.234375
| 3
|
[] |
no_license
|
import asyncio, socket
def schedule_coroutine(target, *, loop=None):
"""Schedules target coroutine in the given event loop
If not given, *loop* defaults to the current thread's event loop
Returns the scheduled task.
"""
if asyncio.iscoroutine(target):
return asyncio.ensure_future(target, loop=loop)
raise TypeError("target must be a coroutine, "
"not {!r}".format(type(target)))
def call_in_background(target, *, loop=None, executor=None):
"""Schedules and starts target callable as a background task
If not given, *loop* defaults to the current thread's event loop
If not given, *executor* defaults to the loop's default executor
Returns the scheduled task.
"""
if loop is None:
loop = asyncio.get_event_loop()
if callable(target):
return loop.run_in_executor(executor, target)
raise TypeError("target must be a callable, "
"not {!r}".format(type(target)))
def run_in_foreground(task, *, loop=None):
"""Runs event loop in current thread until the given task completes
Returns the result of the task.
For more complex conditions, combine with asyncio.wait()
To include a timeout, combine with asyncio.wait_for()
"""
if loop is None:
loop = asyncio.get_event_loop()
return loop.run_until_complete(asyncio.ensure_future(task, loop=loop))
async def handle_tcp_echo(reader, writer):
data = await reader.read(100)
message = data.decode()
addr = writer.get_extra_info('peername')
print("-> Server received %r from %r" % (message, addr))
print("<- Server sending: %r" % message)
writer.write(data)
await writer.drain()
print("-- Terminating connection on server")
writer.close()
async def tcp_echo_client(message, port, loop=None):
reader, writer = await asyncio.open_connection('127.0.0.1', port, loop=loop)
print('-> Client sending: %r' % message)
writer.write(message.encode())
data = (await reader.read(100)).decode()
print('<- Client received: %r' % data)
print('-- Terminating connection on client')
writer.close()
return data
def tcp_echo_client_sync(message, port):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('-> Client connecting to port: %r' % port)
conn.connect(('127.0.0.1', port))
print('-> Client sending: %r' % message)
conn.send(message.encode())
data = conn.recv(100).decode()
print('<- Client received: %r' % data)
print('-- Terminating connection on client')
conn.close()
return data
"""
make_server = asyncio.start_server(handle_tcp_echo, '127.0.0.1')
server = run_in_foreground(make_server)
server.sockets[0]
port = server.sockets[0].getsockname()[1]
make_server2 = asyncio.start_server(handle_tcp_echo, '127.0.0.1')
server2 = run_in_foreground(make_server2)
server2.sockets[0]
port2 = server2.sockets[0].getsockname()[1]
print(run_in_foreground(tcp_echo_client('Hello World!', port)))
print(run_in_foreground(tcp_echo_client('Hello World!', port2)))
echo1 = schedule_coroutine(tcp_echo_client('Hello World!', port))
echo2 = schedule_coroutine(tcp_echo_client('Hello World!', port2))
run_in_foreground(asyncio.wait([echo1, echo2]))
echo1.result()
echo2.result()
query_server = partial(tcp_echo_client_sync, "Hello World!", port)
query_server2 = partial(tcp_echo_client_sync, "Hello World!", port2)
bg_call = call_in_background(query_server)
bg_call2 = call_in_background(query_server2)
run_in_foreground(asyncio.wait([bg_call, bg_call2]))
bg_call.result()
bg_call2.result()
"""
"""
import socket
def blocking_tcp_client(message, port):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('-> Client connecting to port: {}'.format(port))
conn.connect(('127.0.0.1', port))
print('-> Client sending: {!r}'.format(message))
conn.send(message.encode())
response = conn.recv(100).decode()
print('<- Client received: {!r}'.format(response))
print('-- Terminating connection on client')
conn.close()
return response
import asyncio
async def handle_tcp_echo(reader, writer):
message = (await reader.read(100)).decode()
print('-> Server received: {!r}'.format(message))
client = writer.get_extra_info('peername')
print("<- Server sending {!r} to {}".format(message, client))
writer.write(message.encode())
await writer.drain()
print("-- Terminating connection on server")
writer.close()
loop = asyncio.get_event_loop()
make_server = asyncio.start_server(handle_tcp_echo, '127.0.0.1')
server = loop.run_until_complete(make_server)
server.sockets
port = server.sockets[0].getsockname()[1]
from functools import partial
query_server = partial(blocking_tcp_client, "Hello World!", port)
background_call = loop.run_in_executor(None, query_server)
response = loop.run_until_complete(background_call)
response
async def tcp_echo_client(message, port):
reader, writer = await asyncio.open_connection('127.0.0.1', port)
print('-> Client sending: {!r}'.format(message))
writer.write(message.encode())
response = (await reader.read(100)).decode()
print('<- Client received: {!r}'.format(response))
print('-- Terminating connection on client')
writer.close()
return response
response = loop.run_until_complete(tcp_echo_client('Hello World!', port))
response
def echo_range(stop):
tasks = (asyncio.ensure_future(tcp_echo_client(str(i), port)) for i in range(stop))
return asyncio.gather(*tasks)
responses = list(loop.run_until_complete(echo_range(10)))
responses
"""
| true
|
ec0d764500c36af6b6dfbad031c0470922b8d7a0
|
Python
|
jtsw1990/oop_vending_machine
|
/main.py
|
UTF-8
| 2,066
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
from vending_machine.vending_machine import DataReader, VendingMachine
from vending_machine.customer_simulation import CustomerArrival, CustomerChoice
import time
import datetime
import csv
from datetime import datetime
import os
import json
with open("config.json") as f:
config = json.load(f)
PERIOD_IN_HOURS = config["HOURS"]
ts = datetime.now().strftime("%d-%m-%y_%H-%M")
output_folder = "./analytics/{}".format(ts)
vending_one = VendingMachine(config["MAX_CAPACITY"])
vending_one.load_drinks("./vending_machine/drinks_list.csv")
sample_customer = CustomerArrival()
sample_customer_choice = CustomerChoice()
if not os.path.exists(output_folder):
try:
os.makedirs(output_folder)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open("{}/report_{}.csv".format(output_folder, ts, ts), mode="w+") as csv_file:
colnames = ["drink", "price", "hour", "arrival_no"]
writer = csv.DictWriter(csv_file, fieldnames=colnames)
writer.writeheader()
for hour in range(PERIOD_IN_HOURS):
arrived_per_unit_time = sample_customer.calculate_arrivals()
print("{} customers arrived in time {}".format(
arrived_per_unit_time, hour + 1))
for customer in range(arrived_per_unit_time):
drinks_displayed = vending_one.drinks_displayed
drink_choice = sample_customer_choice.choose_drink(
vending_one.display_stock())
if (vending_one.dispense_drink(drink_choice) == drink_choice) & (drink_choice is not None):
print("Customer: {} from time: {} chose: {}".format(
customer + 1, hour + 1, drink_choice))
print("Drinks left: {}, Cumulative Earnings: {}".format(
vending_one.current_stock, vending_one.current_earnings))
writer.writerow(
{
"drink": drink_choice,
"price": vending_one.stock_list[drink_choice][0],
"hour": hour,
"arrival_no": customer
}
)
else:
print("{} was chosen but not available".format(drink_choice))
writer.writerow(
{
"drink": "missed_sale",
"price": 0,
"hour": hour,
"arrival_no": customer
}
)
| true
|
581934820e5e49bec35507639f3ffda488b1f4f9
|
Python
|
GazzolaLab/PyElastica
|
/examples/RigidBodyCases/RodRigidBodyContact/post_processing.py
|
UTF-8
| 28,770
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from typing import Dict, Sequence
from tqdm import tqdm
def make_data_for_cylinder_along_y(cstart, cradius, cheight):
center_x, center_z = cstart[0], cstart[1]
y = np.linspace(0, cheight, 5)
theta = np.linspace(0, 2 * np.pi, 20)
theta_grid, y_grid = np.meshgrid(theta, y)
x_grid = cradius * np.cos(theta_grid) + center_x
z_grid = cradius * np.sin(theta_grid) + center_z
y_grid += cstart[2]
return [x_grid, y_grid, z_grid]
def plot_video(
rod_history: dict,
cylinder_history: dict,
video_name="video.mp4",
margin=0.2,
fps=60,
step=1,
*args,
**kwargs,
): # (time step, x/y/z, node)
cylinder_start = np.array(cylinder_history["position"])[0, ...]
cylinder_radius = kwargs.get("cylinder_radius")
cylinder_height = kwargs.get("cylinder_height")
cylinder_direction = kwargs.get("cylinder_direction")
XC, YC, ZC = make_data_for_cylinder_along_y(
cylinder_start, cylinder_radius, cylinder_height
)
import matplotlib.animation as manimation
plt.rcParams.update({"font.size": 22})
# Should give a (n_time, 3, n_elem) array
positions = np.array(rod_history["position"])
# (n_time, 3) array
com = np.array(rod_history["com"])
cylinder_com = np.array(cylinder_history["com"])
cylinder_origin = cylinder_com - 0.5 * cylinder_height * cylinder_direction
print("plot video")
FFMpegWriter = manimation.writers["ffmpeg"]
metadata = dict(title="Movie Test", artist="Matplotlib", comment="Movie support!")
writer = FFMpegWriter(fps=fps, metadata=metadata)
dpi = 50
# min_limits = np.roll(np.array([0.0, -0.5 * cylinder_height, 0.0]), _roll_key)
fig = plt.figure(1, figsize=(10, 8), frameon=True, dpi=dpi)
ax = plt.axes(projection="3d") # fig.add_subplot(111)
ax.grid(which="minor", color="k", linestyle="--")
ax.grid(which="major", color="k", linestyle="-")
# plt.axis("square")
i = 0
(rod_line,) = ax.plot(positions[i, 0], positions[i, 1], positions[i, 2], lw=3.0)
XC, YC, ZC = make_data_for_cylinder_along_y(
cylinder_origin[i, ...], cylinder_radius, cylinder_height
)
surf = ax.plot_surface(XC, YC, ZC, color="g", alpha=0.5)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
min_limits = np.array([0.0, 0.0, -0.5 * cylinder_height])
min_limits = -np.abs(min_limits)
max_limits = min_limits + cylinder_height
ax.set_xlim([min_limits[0], max_limits[0]])
ax.set_ylim([min_limits[1], max_limits[1]])
ax.set_zlim([min_limits[2], max_limits[2]])
with writer.saving(fig, video_name, dpi):
with plt.style.context("seaborn-white"):
for i in range(0, positions.shape[0], int(step)):
rod_line.set_xdata(positions[i, 0])
rod_line.set_ydata(positions[i, 1])
rod_line.set_3d_properties(positions[i, 2])
XC, YC, ZC = make_data_for_cylinder_along_y(
cylinder_origin[i, ...], cylinder_radius, cylinder_height
)
surf.remove()
surf = ax.plot_surface(XC, YC, ZC, color="g", alpha=0.5)
writer.grab_frame()
from matplotlib.patches import Circle
fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi)
ax = fig.add_subplot(111)
i = 0
cstart = cylinder_origin
(rod_line,) = ax.plot(positions[i, 0], positions[i, 1], lw=3.0)
(tip_line,) = ax.plot(com[i, 0], com[i, 1], "k--")
min_limits = np.array([0.0, 0.0, -0.5 * cylinder_height])
max_limits = min_limits + cylinder_height
ax.set_xlim([min_limits[0], max_limits[0]])
ax.set_ylim([min_limits[2], max_limits[2]])
circle_artist = Circle((cstart[i, 0], cstart[i, 2]), cylinder_radius, color="g")
ax.add_artist(circle_artist)
ax.set_aspect("equal")
video_name = "2D_" + video_name
with writer.saving(fig, video_name, dpi):
with plt.style.context("fivethirtyeight"):
for i in range(0, positions.shape[0], int(step)):
rod_line.set_xdata(positions[i, 0])
rod_line.set_ydata(positions[i, 2])
tip_line.set_xdata(com[:i, 0])
tip_line.set_ydata(com[:i, 2])
circle_artist.center = cstart[i, 0], cstart[i, 2]
writer.grab_frame()
def plot_cylinder_rod_position(
rod_history,
cylinder_history,
cylinder_radius,
rod_base_radius,
TIP_COLLISION,
TIP_CHOICE,
_roll_key=0,
):
cylinder_start = np.array(cylinder_history["position"])[0, ...]
positions = np.array(rod_history["position"])
sim_time = np.array(rod_history["time"])
n_elem = positions.shape[-1]
fig = plt.figure(figsize=(10, 8), frameon=True, dpi=150)
plt.rcParams.update({"font.size": 18})
ax = fig.add_subplot(111)
colliding_element_idx = n_elem // 2
if TIP_COLLISION:
colliding_element_idx = 0 if TIP_CHOICE == 1 else -1
colliding_element_history = positions[:, :, colliding_element_idx]
# fig = plt.figure(3, figsize=(8, 5))
# ax = fig.add_subplot(111)
ax.plot(sim_time, colliding_element_history[:, _roll_key], label="rod")
ax.hlines(
cylinder_start[_roll_key] - cylinder_radius - rod_base_radius,
sim_time[0],
sim_time[-1],
"k",
linestyle="dashed",
label="cylinder",
)
plt.xlabel("Time [s]", fontsize=20)
plt.ylabel("Position", fontsize=20)
fig.legend(prop={"size": 20})
plt.show()
def plot_velocity(
plot_params_rod_one: dict,
plot_params_rod_two: dict,
filename="velocity.png",
SAVE_FIGURE=False,
):
time = np.array(plot_params_rod_one["time"])
avg_velocity_rod_one = np.array(plot_params_rod_one["com_velocity"])
avg_velocity_rod_two = np.array(plot_params_rod_two["com_velocity"])
total_energy_rod_one = np.array(plot_params_rod_one["total_energy"])
total_energy_rod_two = np.array(plot_params_rod_two["total_energy"])
fig = plt.figure(figsize=(12, 10), frameon=True, dpi=150)
axs = []
axs.append(plt.subplot2grid((4, 1), (0, 0)))
axs.append(plt.subplot2grid((4, 1), (1, 0)))
axs.append(plt.subplot2grid((4, 1), (2, 0)))
axs.append(plt.subplot2grid((4, 1), (3, 0)))
axs[0].plot(time[:], avg_velocity_rod_one[:, 0], linewidth=3, label="rod_one")
axs[0].plot(time[:], avg_velocity_rod_two[:, 0], linewidth=3, label="rod_two")
axs[0].plot(
time[:],
avg_velocity_rod_one[:, 0] + avg_velocity_rod_two[:, 0],
"--",
linewidth=3,
label="total",
)
axs[0].set_ylabel("x velocity", fontsize=20)
axs[1].plot(
time[:],
avg_velocity_rod_one[:, 1],
linewidth=3,
)
axs[1].plot(
time[:],
avg_velocity_rod_two[:, 1],
linewidth=3,
)
axs[1].plot(
time[:],
avg_velocity_rod_one[:, 1] + avg_velocity_rod_two[:, 1],
"--",
linewidth=3,
)
axs[1].set_ylabel("y velocity", fontsize=20)
axs[2].plot(
time[:],
avg_velocity_rod_one[:, 2],
linewidth=3,
)
axs[2].plot(
time[:],
avg_velocity_rod_two[:, 2],
linewidth=3,
)
axs[2].plot(
time[:],
avg_velocity_rod_one[:, 2] + avg_velocity_rod_two[:, 2],
"--",
linewidth=3,
)
axs[2].set_ylabel("z velocity", fontsize=20)
axs[3].semilogy(
time[:],
total_energy_rod_one[:],
linewidth=3,
)
axs[3].semilogy(
time[:],
total_energy_rod_two[:],
linewidth=3,
)
axs[3].semilogy(
time[:],
np.abs(total_energy_rod_one[:] - total_energy_rod_two[:]),
"--",
linewidth=3,
)
axs[3].set_ylabel("total_energy", fontsize=20)
axs[3].set_xlabel("time [s]", fontsize=20)
plt.tight_layout()
# fig.align_ylabels()
fig.legend(prop={"size": 20})
# fig.savefig(filename)
# plt.show()
plt.close(plt.gcf())
if SAVE_FIGURE:
fig.savefig(filename)
def plot_video_with_surface(
rods_history: Sequence[Dict],
video_name="video.mp4",
fps=60,
step=1,
vis2D=True,
**kwargs,
):
plt.rcParams.update({"font.size": 22})
folder_name = kwargs.get("folder_name", "")
# 2d case <always 2d case for now>
import matplotlib.animation as animation
# simulation time
sim_time = np.array(rods_history[0]["time"])
# Rod
n_visualized_rods = len(rods_history) # should be one for now
# Rod info
rod_history_unpacker = lambda rod_idx, t_idx: (
rods_history[rod_idx]["position"][t_idx],
rods_history[rod_idx]["radius"][t_idx],
)
# Rod center of mass
com_history_unpacker = lambda rod_idx, t_idx: rods_history[rod_idx]["com"][time_idx]
# Generate target sphere data
sphere_flag = False
if kwargs.__contains__("sphere_history"):
sphere_flag = True
sphere_history = kwargs.get("sphere_history")
n_visualized_spheres = len(sphere_history) # should be one for now
sphere_history_unpacker = lambda sph_idx, t_idx: (
sphere_history[sph_idx]["position"][t_idx],
sphere_history[sph_idx]["radius"][t_idx],
)
# color mapping
sphere_cmap = cm.get_cmap("Spectral", n_visualized_spheres)
# video pre-processing
print("plot scene visualization video")
FFMpegWriter = animation.writers["ffmpeg"]
metadata = dict(title="Movie Test", artist="Matplotlib", comment="Movie support!")
writer = FFMpegWriter(fps=fps, metadata=metadata)
dpi = kwargs.get("dpi", 100)
xlim = kwargs.get("x_limits", (-1.0, 1.0))
ylim = kwargs.get("y_limits", (-1.0, 1.0))
zlim = kwargs.get("z_limits", (-0.05, 1.0))
difference = lambda x: x[1] - x[0]
max_axis_length = max(difference(xlim), difference(ylim))
# The scaling factor from physical space to matplotlib space
scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
if kwargs.get("vis3D", True):
fig = plt.figure(1, figsize=(10, 8), frameon=True, dpi=dpi)
ax = plt.axes(projection="3d")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
ax.set_zlim(*zlim)
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
# for reference see
# https://stackoverflow.com/questions/48172928/scale-matplotlib-pyplot
# -axes-scatter-markersize-by-x-scale/48174228#48174228
scaling_factor = (
ax.get_window_extent().width / (max_axis_length) * 72.0 / fig.dpi
)
rod_scatters[rod_idx] = ax.scatter(
inst_position[0],
inst_position[1],
inst_position[2],
# for circle s = 4/pi*area = 4 * r^2
s=4 * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
scaling_factor = (
ax.get_window_extent().width / (max_axis_length) * 72.0 / fig.dpi
)
sphere_artists[sphere_idx] = ax.scatter(
sphere_position[0],
sphere_position[1],
sphere_position[2],
s=4 * (scaling_factor * sphere_radius) ** 2,
)
# sphere_radius,
# color=sphere_cmap(sphere_idx),)
ax.add_artist(sphere_artists[sphere_idx])
# ax.set_aspect("equal")
video_name_3D = folder_name + "3D_" + video_name
with writer.saving(fig, video_name_3D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_scatters[rod_idx]._offsets3d = (
inst_position[0],
inst_position[1],
inst_position[2],
)
scaling_factor = (
ax.get_window_extent().width
/ (max_axis_length)
* 72.0
/ fig.dpi
)
# rod_scatters[rod_idx].set_offsets(inst_position[:2].T)
rod_scatters[rod_idx].set_sizes(
4 * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx]._offsets3d = (
sphere_position[0],
sphere_position[1],
sphere_position[2],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
if kwargs.get("vis2D", True):
max_axis_length = max(difference(xlim), difference(ylim))
# The scaling factor from physical space to matplotlib space
scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
rod_lines[rod_idx] = ax.plot(
inst_position[0], inst_position[1], "r", lw=0.5
)[0]
inst_com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx] = ax.plot(inst_com[0], inst_com[1], "k--", lw=2.0)[0]
scaling_factor = (
ax.get_window_extent().width / (max_axis_length) * 72.0 / fig.dpi
)
rod_scatters[rod_idx] = ax.scatter(
inst_position[0],
inst_position[1],
s=4 * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx] = Circle(
(sphere_position[0], sphere_position[1]),
sphere_radius,
color=sphere_cmap(sphere_idx),
)
ax.add_artist(sphere_artists[sphere_idx])
ax.set_aspect("equal")
video_name_2D = folder_name + "2D_xy_" + video_name
with writer.saving(fig, video_name_2D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_lines[rod_idx].set_xdata(inst_position[0])
rod_lines[rod_idx].set_ydata(inst_position[1])
com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx].set_xdata(com[0])
rod_com_lines[rod_idx].set_ydata(com[1])
rod_scatters[rod_idx].set_offsets(inst_position[:2].T)
scaling_factor = (
ax.get_window_extent().width
/ (max_axis_length)
* 72.0
/ fig.dpi
)
rod_scatters[rod_idx].set_sizes(
4 * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx].center = (
sphere_position[0],
sphere_position[1],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
# Plot zy
max_axis_length = max(difference(zlim), difference(ylim))
# The scaling factor from physical space to matplotlib space
scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_xlim(*zlim)
ax.set_ylim(*ylim)
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
rod_lines[rod_idx] = ax.plot(
inst_position[2], inst_position[1], "r", lw=0.5
)[0]
inst_com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx] = ax.plot(inst_com[2], inst_com[1], "k--", lw=2.0)[0]
scaling_factor = (
ax.get_window_extent().width / (max_axis_length) * 72.0 / fig.dpi
)
rod_scatters[rod_idx] = ax.scatter(
inst_position[2],
inst_position[1],
s=4 * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx] = Circle(
(sphere_position[2], sphere_position[1]),
sphere_radius,
color=sphere_cmap(sphere_idx),
)
ax.add_artist(sphere_artists[sphere_idx])
ax.set_aspect("equal")
video_name_2D = folder_name + "2D_zy_" + video_name
with writer.saving(fig, video_name_2D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_lines[rod_idx].set_xdata(inst_position[2])
rod_lines[rod_idx].set_ydata(inst_position[1])
com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx].set_xdata(com[2])
rod_com_lines[rod_idx].set_ydata(com[1])
rod_scatters[rod_idx].set_offsets(
np.vstack((inst_position[2], inst_position[1])).T
)
scaling_factor = (
ax.get_window_extent().width
/ (max_axis_length)
* 72.0
/ fig.dpi
)
rod_scatters[rod_idx].set_sizes(
4 * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx].center = (
sphere_position[2],
sphere_position[1],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
# Plot xz
fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_xlim(*xlim)
ax.set_ylim(*zlim)
# The scaling factor from physical space to matplotlib space
max_axis_length = max(difference(zlim), difference(xlim))
scaling_factor = (2 * 0.1) / (max_axis_length) # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
rod_lines[rod_idx] = ax.plot(
inst_position[0], inst_position[2], "r", lw=0.5
)[0]
inst_com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx] = ax.plot(inst_com[0], inst_com[2], "k--", lw=2.0)[0]
scaling_factor = (
ax.get_window_extent().width / (max_axis_length) * 72.0 / fig.dpi
)
rod_scatters[rod_idx] = ax.scatter(
inst_position[0],
inst_position[2],
s=4 * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx] = Circle(
(sphere_position[0], sphere_position[2]),
sphere_radius,
color=sphere_cmap(sphere_idx),
)
ax.add_artist(sphere_artists[sphere_idx])
ax.set_aspect("equal")
video_name_2D = folder_name + "2D_xz_" + video_name
with writer.saving(fig, video_name_2D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_lines[rod_idx].set_xdata(inst_position[0])
rod_lines[rod_idx].set_ydata(inst_position[2])
com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx].set_xdata(com[0])
rod_com_lines[rod_idx].set_ydata(com[2])
rod_scatters[rod_idx].set_offsets(
np.vstack((inst_position[0], inst_position[2])).T
)
scaling_factor = (
ax.get_window_extent().width
/ (max_axis_length)
* 72.0
/ fig.dpi
)
rod_scatters[rod_idx].set_sizes(
4 * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx].center = (
sphere_position[0],
sphere_position[2],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
def plot_force_vs_energy(
normalized_force,
total_final_energy,
friction_coefficient,
filename="energy_vs_force.png",
SAVE_FIGURE=False,
):
fig = plt.figure(figsize=(12, 10), frameon=True, dpi=150)
axs = []
axs.append(plt.subplot2grid((1, 1), (0, 0)))
axs[0].plot(
normalized_force,
total_final_energy,
linewidth=3,
)
plt.axvline(x=friction_coefficient, linewidth=3, color="r", label="threshold")
axs[0].set_ylabel("total energy", fontsize=20)
axs[0].set_xlabel("normalized force", fontsize=20)
plt.tight_layout()
# fig.align_ylabels()
fig.legend(prop={"size": 20})
# fig.savefig(filename)
# plt.show()
plt.close(plt.gcf())
if SAVE_FIGURE:
fig.savefig(filename)
| true
|
81119c5abb6f84b2f51c1e3a9b5ac9837cb0bcd3
|
Python
|
YuRiTan/rsgeo
|
/rsgeo-py/tests/test_rsgeo.py
|
UTF-8
| 391
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from rsgeo import contains, distance # noqa
def test_contains(polygon_coords, xs, ys):
result = contains(polygon_coords, xs, ys)
np.testing.assert_array_equal(result, [False, False, False, True])
def test_distance(polygon_coords, xs, ys):
result = distance(polygon_coords, xs, ys)
np.testing.assert_array_equal(result, [0, 0, 1.4142135623730951, 0])
| true
|
825fc032cadd4974322b076d5c7a57a28d65eee5
|
Python
|
yanghun-park/PowerShell-Token-Analyzer
|
/PSDecode.py
|
UTF-8
| 2,930
| 3.234375
| 3
|
[] |
no_license
|
import subprocess, locale
# -----------------------------------------------------------------------------------------------
# 1. PSDecode (File): Decryption module using PSDecode
# Return value (String): 0 - When decryption fails / [String] - When decryption
# Translation : 2021/04/29
# -----------------------------------------------------------------------------------------------
def PSDecode(File):
command = "powershell.exe Get-Content " + File + " | PSDecode;"
# Invoke command to Subprocess
ps = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data, err = ps.communicate()
# Decode the results to fit the system
lang = locale.getdefaultlocale() # Import Operating System Language
# Use second value because the resulting value of lang is (language, encoding)
try:
data = data.decode(lang[1])
err = err.decode(lang[1])
except UnicodeDecodeError: # In case of Unicode error
try:
data = data.decode('UTF-8')
err = err.decode('UTF-8')
except:
return 0 # Returns 0 if an error still occurs
if(err != ""):
print("==========[Error]==========")
print("Either it was not run in PowerShell or the PSDecode module was not installed.")
return 0
Check = False # Save Detections Successful
Output = [] # Variables to hold results on successful decryption
Del_output = 0 # Variables to remember unnecessary lines of data
dataline = data.split('\n')
for word in dataline:
Checkline = word[31:38]
if(Check):
if(word == ""):
Check = False
if not word[0:10] == "No actions": # No actions Identified. Methods~~ Remove Phrase
Output.append(word) # Adding decryption data to the output file
Del_output = Del_output + 1
for N in range(3, 1, -1):
if(Checkline == "Layer " + str(N)): # On successful decryption
Check = True
continue
if "### Actions ###" in word:
Check = True
continue
elif "### Warning! ###" in word: # On alert
if (Check): # If decryption is successful and "Warning" appears,
del Output[len(Output)-1] # ### Warning! ### remove
break
return 0 # Otherwise, return as Decryption Failed
elif "### ERROR! ###" in word: # When Decryption Fails
return 0 # return Fails
Decode_File = open(File + "_decode", 'w', encoding='UTF-8') # Creating a Decoded Temporary File
Num = 1
for dataline in Output: # Write File
if(Num == Del_output):
break
Decode_File.write(dataline)
Num = Num + 1
Decode_File.close()
return File + "_decode"
| true
|
1a20789533f1e35f8e68120eaf8044c959a5f969
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03416/s073236091.py
|
UTF-8
| 154
| 3.078125
| 3
|
[] |
no_license
|
a,b = map(int, input().split())
ans = 0
for i in range(a,b+1):
s = str(i)
hl = len(s)//2
if s[:hl]== s[-1:-1-hl:-1]:
ans+=1
print(ans)
| true
|
e1081ee92a93c1c0d02369a5484c5e8604108619
|
Python
|
kmilewczyk96/zadania_dodatkowe
|
/closest_power.py
|
UTF-8
| 427
| 3.109375
| 3
|
[] |
no_license
|
def closest_power(base, num):
result = 0
powered = 0
while powered < num:
powered = base ** result
result = result + 1
if ((base ** (result - 2)) + powered) / 2 >= num:
return result - 2
return result - 1
if __name__ == '__main__':
assert closest_power(3, 6) == 1
assert closest_power(4, 12) == 2
assert closest_power(3, 12) == 2
assert closest_power(4, 1) == 0
| true
|
db2e0abdeb0f39a35fe7afc3799610322806a52a
|
Python
|
totomz/docker-tasker
|
/tasker/examples/SimpleSum.py
|
UTF-8
| 1,166
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
import logging
import sys
import os
from tasker.master.Master import Master
from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stdout)])
def supply():
n = 1
while n <= 10:
task = {
"id": "test-{}".format(n),
"image": "ubuntu",
"arguments": "/bin/bash -c 'sleep 1; echo {number}'".format(number=n)
}
yield task
n += 1
def reduce(value, accumulator, bar):
# value is a dictionary similar to {'id': 'test-2', 'isSuccess': True, 'payload': '2'}
bar.text("Processing: {task}".format(task=value['id']))
if value['isSuccess']:
accumulator.append(int(value['payload']))
def termination(values):
logging.info("Termination! Values: {}".format(values))
_sum = 0
for v in values:
_sum += v
logging.info("DONE! The sum is {}".format(_sum))
logging.info(_sum)
if __name__ == '__main__':
print("DIOCANE PYTHON")
master = Master(supplier=supply,
reducer=reduce,
terminate=termination)
master.start()
print("### DONE ###")
| true
|
db9ad58d9f0b19396c6d9d75c7004698881c20ff
|
Python
|
YangForever/COP514Group
|
/guifinal/cwgui/gui_readpic.py
|
UTF-8
| 423
| 2.78125
| 3
|
[] |
no_license
|
from PIL import Image
import numpy as np
def readPic(filename):
im = Image.open(str(filename))
# im.show()
pix = im.load()
x = im.size[0]
y = im.size[1]
temp = []
for i in range(x):
for j in range(y):
temp.append(pix[j,i])
return ''.join(s for s in str(temp).split(', '))
# temp = np.asarray(temp)
# temp.resize(200, 200)
# print len(temp)
# img = Image.fromarray(temp)
# img.show()
| true
|
2a8395863917a8f6771b5bfec569e2f10ab3f5e7
|
Python
|
rds504/AoC-2020
|
/solutions/day16.py
|
UTF-8
| 3,105
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
import re
from functools import reduce
from operator import mul
from tools.general import load_input
class ValidValues:
def __init__(self, bounds):
self._lo1, self._hi1, self._lo2, self._hi2 = [int(bound) for bound in bounds]
def __contains__(self, value):
return self._lo1 <= value <= self._hi1 or self._lo2 <= value <= self._hi2
def parse_ticket_data(ticket_data):
rule_pattern = re.compile(r"([a-z ]+): ([0-9]+)-([0-9]+) or ([0-9]+)-([0-9]+)")
field_rules = {}
your_ticket = None
near_tickets = []
parsing_mode = "rules"
for line in ticket_data.split('\n'):
if line == "your ticket:":
parsing_mode = "yours"
elif line == "nearby tickets:":
parsing_mode = "nearby"
elif parsing_mode == "rules":
match = rule_pattern.match(line)
if match:
field, *bounds = match.groups()
field_rules[field] = ValidValues(bounds)
elif line != "":
ticket = [int(i) for i in line.split(',')]
if parsing_mode == "yours":
your_ticket = ticket
elif parsing_mode == "nearby":
near_tickets.append(ticket)
return (field_rules, your_ticket, near_tickets)
def validate_tickets(rule_list, ticket_list):
invalid_fields = []
valid_tickets = []
for ticket in ticket_list:
ticket_valid = True
for field in ticket:
for valid_values in rule_list:
if field in valid_values:
break
else:
invalid_fields.append(field)
ticket_valid = False
if ticket_valid:
valid_tickets.append(ticket)
return (valid_tickets, sum(invalid_fields))
def resolve_field_positions(field_rules, valid_ticket_list):
field_positions = {}
field_valid_positions = {}
for field, valid_values in field_rules.items():
valid_positions = set(range(len(valid_ticket_list[0])))
for ticket in valid_ticket_list:
for position, value in enumerate(ticket):
if value not in valid_values:
valid_positions.remove(position)
field_valid_positions[field] = valid_positions
for field, valid_positions in sorted(field_valid_positions.items(), key = lambda x: len(x[1])):
for position in valid_positions:
if position not in field_positions.values():
field_positions[field] = position
break
return field_positions
def check_departure_fields(field_map, ticket):
return reduce(
mul,
(ticket[pos] for fld, pos in field_map.items() if fld.startswith("departure"))
)
rules, own_ticket, nearby_tickets = parse_ticket_data(load_input("day16.txt"))
nearby_valid_tickets, invalid_fieldsum = validate_tickets(rules.values(), nearby_tickets)
print(f"Part 1 => {invalid_fieldsum}")
fld_positions = resolve_field_positions(rules, nearby_valid_tickets)
print(f"Part 2 => {check_departure_fields(fld_positions, own_ticket)}")
| true
|
2102505bbbe24a5cd7136c4b066da59739f9889f
|
Python
|
indahwlnlstr/apd
|
/postest2.py
|
UTF-8
| 1,052
| 3.859375
| 4
|
[] |
no_license
|
#operasi aritmatika
print("Hello,""Selamat Datang")
def print_menu():
print(30 * "-", "MENU", 30*"-")
print("1. Menghitung Luas Balok")
print("2. Menghitung Konversi Suhu")
print("3. Menghitung Volume Kerucut")
print_menu()
pilihan = int(input("Masukkan Pilihan: "))
if pilihan == 1:
p = int(input("masukkan panjang = "))
l = int(input('masukkan luas = '))
t = int(input('masukkan tinggi = '))
luas = (2*p*l)+(2*p*t)+(2*l*t)
print("luas balok adalah = ", luas)
elif pilihan == 2:
Celcius = float(input("masukkan suhu = "))
Reamur = 4/5*Celcius
Farenheit = 9/5*Celcius+32
Kelvin = 273+Celcius
print("Celcius = ", Celcius)
print("Reamur = ", Reamur)
print("Farenheit = ", Farenheit)
print("Kelvin = ", Kelvin)
elif pilihan == 3:
r = int(input("Masukkan jari-jari = "))
t = int(input("Masukkan Tinggi = "))
phi = 3.14
volume = 1/3*(phi*r*r*t)
print("Volume Kerucut adalah = ", volume)
print(30* "-", "TERIMAKASIH", 30* "-")
| true
|
8d9976b103e8fd867701953efaba9446eaad818c
|
Python
|
JHWu92/public-safety-baltimore
|
/src/vis/map.py
|
UTF-8
| 4,248
| 2.796875
| 3
|
[] |
no_license
|
# coding=utf-8
import folium
from folium.plugins import MarkerCluster, FastMarkerCluster
def add_common_tiles(m):
folium.TileLayer('Stamen Terrain').add_to(m)
folium.TileLayer('Stamen Toner').add_to(m)
folium.TileLayer('Stamen Watercolor').add_to(m)
folium.TileLayer('CartoDB dark_matter').add_to(m)
folium.TileLayer('CartoDB positron').add_to(m) # the last one is the default tiles
def marker_cluster(named_data, lonlat=True, filename='tmp_marker_cluster.html', verbose=0):
"""
Parameters
----------
:param named_data: dict of lists of coords
points to be plot on the map
:param lonlat: boolean, default True
whether the coords are in (lon, lat) order.
coords are required in (lat, lon) order in MarkerCluster,
but usually in geopandas, coords are in (lon, lat)
:param filename: str
file name of the map visualization
:param verbose: int, default 0
verbosity
:return: the folium map object
Examples:
----------
>>> named_data = {'A': [(38.9305064,-77.116761), (38.9195066, -77.1069168)]}
>>> marker_cluster(named_data, lonlat=True, filename='tmp_marker_cluster.html')
"""
# TODO: diversify inputs, currently only dict of lists of coords is handled
# if lonlat, make it (lat, lon)
if lonlat:
if verbose > 0: print('transformed to (lat,lon)')
named_data = {name: [(c[1], c[0]) for c in coords] for name, coords in named_data.items()}
# get bounding box
lons, lats = [], []
for _, coords in named_data.items():
lats.extend([coord[0] for coord in coords])
lons.extend([coord[1] for coord in coords])
w, e, s, n = min(lons), max(lons), min(lats), max(lats)
# build map
m = folium.Map()
add_common_tiles(m)
m.fit_bounds([(s, w), (n, e)])
# bind data to map
for name, coords in named_data.items():
f = folium.FeatureGroup(name=name)
if verbose > 0: print('adding layer of', name)
# TODO: add custom popups
popups = ['group: {}<br>lon:{}<br>lat:{}'.format(name, lon, lat) for (lat, lon) in coords]
f.add_child(MarkerCluster(locations=coords, popups=popups))
m.add_child(f)
# layer control
m.add_child(folium.LayerControl())
m.save(filename)
return m
def marker_cluster_fast(named_data, lonlat=True,filename='tmp_marker_cluster.html', verbose=0):
if lonlat:
if verbose > 0: print('transformed to (lat,lon)')
named_data = {name: [(c[1], c[0]) for c in coords] for name, coords in named_data.items()}
# get bounding box
lons, lats = [], []
for _, coords in named_data.items():
lats.extend([coord[0] for coord in coords])
lons.extend([coord[1] for coord in coords])
w, e, s, n = min(lons), max(lons), min(lats), max(lats)
# build map
m = folium.Map()
add_common_tiles(m)
m.fit_bounds([(s, w), (n, e)])
# bind data to map
callback = """
function (row) {{
var icon, marker;
icon = L.AwesomeMarkers.icon({{
icon: "map-marker", markerColor: "{color}"}});
marker = L.marker(new L.LatLng(row[0], row[1]));
marker.setIcon(icon);
return marker;
}};
"""
colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']
for i, (name, coords) in enumerate(named_data.items()):
if verbose > 0: print('adding layer of', name)
FastMarkerCluster(data=coords, callback=callback.format(color=colors[i % len(colors)])).add_to(m)
# layer control
m.add_child(folium.LayerControl())
m.save(filename)
def main():
import geopandas as gp
from shapely.geometry import Point
gpdfs = []
gpdfs.append(gp.GeoDataFrame([Point(-77.116761, 38.9305064), Point(-77.1069168, 38.9195066)], columns=['geometry']))
gpdfs.append(
gp.GeoDataFrame([Point(-77.0908494, 38.9045525), Point(-77.0684995, 38.9000923)], columns=['geometry']))
for gpdf in gpdfs:
gpdf.crs = {'init': 'epsg:4326', 'no_defs': True}
named_coords = {'obj a': gpdfs[0].geometry.apply(lambda x: x.coords[0]).tolist()}
marker_cluster(named_coords, True, verbose=1)
return
if __name__ == '__main__':
main()
| true
|
38e0294a57e9ff531cedc73ddaa99920b20372a1
|
Python
|
renzildourado/Capstone_Project
|
/building_model.py
|
UTF-8
| 4,390
| 2.671875
| 3
|
[] |
no_license
|
import pandas as pd
import scipy
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression
from sklearn import linear_model
from sklearn.neural_network import MLPClassifier
from sklearn.feature_selection import RFE
def open_file():
file_name = "cleaned_dataset_3.csv"
data_frame = pd.read_csv(file_name, low_memory=False)
return data_frame
def build_decision_tree(data_frame):
X = data_frame.drop(["MALWARE_DETECTION"], axis=1)
Y = data_frame['MALWARE_DETECTION']
train_data_x, test_data_x, train_data_y, test_data_y = train_test_split(X, Y, test_size=0.30)
##############################################################################################################
# neural_network = MLPClassifier()
# neural_network = neural_network.fit(train_data_x, train_data_y)
#
# predicted_y = neural_network.predict(test_data_x)
#
# print("FOR Neural Network")
# acc = accuracy_score(predicted_y.round(), test_data_y)
# print("ACCURACY: " + str(acc))
# print("CONFUSION MATRIX")
# print(confusion_matrix(test_data_y, predicted_y.round()))
##############################################################################################################
# linear_regression = LinearRegression()
# linear_regression = linear_regression.fit(train_data_x, train_data_y)
#
# predicted_y = linear_regression.predict(test_data_x)
#
# print("FOR Linear Regression")
# acc = accuracy_score(predicted_y.round(), test_data_y)
# print("ACCURACY: " + str(acc))
# print("CONFUSION MATRIX")
# print(confusion_matrix(test_data_y, predicted_y.round()))
##############################################################################################################
# naive_bayes = GaussianNB()
# naive_bayes = naive_bayes.fit(train_data_x, train_data_y)
#
# predicted_y = naive_bayes.predict(test_data_x)
#
# print("FOR Naive Bayes Classifier")
# acc = accuracy_score(predicted_y, test_data_y)
# print("ACCURACY: " + str(acc))
# print("CONFUSION MATRIX")
# print(confusion_matrix(test_data_y, predicted_y))
##############################################################################################################
# knn_classifier = KNeighborsClassifier()
# knn_classifier = knn_classifier.fit(train_data_x, train_data_y)
#
# predicted_y = knn_classifier.predict(test_data_x)
#
# print("FOR K NEAREST NEIGHBORS")
# acc = accuracy_score(predicted_y, test_data_y )
# print("ACCURACY: "+str(acc))
# print("CONFUSION MATRIX")
# print(confusion_matrix(test_data_y, predicted_y))
##############################################################################################################
# random_forest = RandomForestClassifier()
# random_forest = random_forest.fit(train_data_x, train_data_y)
#
# predicted_y = random_forest.predict(test_data_x)
#
# print("FOR RANDOM FOREST")
# acc = accuracy_score(predicted_y, test_data_y )
# print("ACCURACY: "+str(acc))
# print("CONFUSION MATRIX")
# print(confusion_matrix(test_data_y, predicted_y))
###############################################################################################################
# decision_tree = tree.DecisionTreeClassifier()
# decision_tree = decision_tree.fit(train_data_x, train_data_y)
#
# predicted_y = decision_tree.predict(test_data_x)
#
# print("FOR DECISION TREE")
# acc = accuracy_score(predicted_y, test_data_y)
# print("ACCURACY: " + str(acc))
# print("CONFUSION MATRIX")
# print(confusion_matrix(test_data_y, predicted_y))
##################################################################################################################
# test_sample = list(train_data_frame.iloc[807])
# test_sample = [int(num) for num in test_sample]
# # print(test_sample)
# print(decision_tree.predict([test_sample]))
def main():
data_frame = open_file()
build_decision_tree(data_frame)
if __name__ == '__main__':
main()
| true
|
8717be7555a09846b66b0284714f50ca3294d655
|
Python
|
uuuChen/PLAYGROUND-Seq2SeqTranslation
|
/data_loader.py
|
UTF-8
| 6,436
| 2.828125
| 3
|
[] |
no_license
|
import random
import torch
from torch.autograd import Variable
class Vocabulary:
def __init__(self, text_file_path):
# initialize parameter
self.word2idx = dict()
self.sequences = list()
self.indices = list() # corresponding indices of sequences
self.max_length = 0
self.word_counts = 0
# build vocabulary
self._build(text_file_path)
def _build(self, text_file_path):
signals = ['SOS', 'EOS', 'PAD', 'UNK']
with open(text_file_path, "r", encoding="utf-8-sig") as fp:
lines = fp.readlines()
for line in lines:
sequence = line.strip().split(' ')
self.sequences.append(sequence)
if len(sequence) > self.max_length:
self.max_length = len(sequence)
flat_sequences = [word for sequence in self.sequences for word in sequence]
unique_words = signals + list(set(flat_sequences))
self.word_counts = len(unique_words)
self.word2idx = dict(zip(unique_words, range(len(unique_words))))
self.idx2word = dict(zip(range(len(unique_words)), unique_words))
def sequence_to_indices(self, sequence, add_sos=False, add_eos=False):
indices = list()
if add_sos:
indices.append(self.word2idx['SOS'])
for word in sequence:
index = self.word2idx['UNK'] if word not in self.word2idx else self.word2idx[word]
indices.append(index)
if add_eos:
indices.append(self.word2idx['EOS'])
return indices
def batch_indices_to_batch_sequences(self, batch_indices):
batch_sequences = list()
for indices in batch_indices:
sequence = list()
for index in indices:
word = self.idx2word[index]
if word == 'EOS':
break
sequence.append(word)
batch_sequences.append(sequence)
return batch_sequences
class DataLoader:
def __init__(self, train_inputs_vocab, train_targets_vocab, inputs_file_path, targets_file_path, device=None,
batch_size=1, shuffle=False, is_train=False):
self.device = device
self.num_of_batches = None
self.batch_size = batch_size
self.is_train = is_train
self.train_inputs_vocab = train_inputs_vocab
self.train_targets_vocab = train_targets_vocab
self.SOS_IDX = train_inputs_vocab.word2idx['SOS']
self.EOS_IDX = train_inputs_vocab.word2idx['EOS']
self.PAD_IDX = train_inputs_vocab.word2idx['PAD']
self.UNK_IDX = train_inputs_vocab.word2idx['UNK']
if is_train:
self.inputs_sequences = self.get_sequences(inputs_file_path)
self.targets_sequences = self.get_sequences(targets_file_path)
self.inputs = [train_inputs_vocab.sequence_to_indices(sequence, add_eos=True) for sequence in self.inputs_sequences]
self.targets = [train_targets_vocab.sequence_to_indices(sequence, add_eos=True) for sequence in self.targets_sequences]
if shuffle:
inputs_targets_list = list(zip(self.inputs, self.targets))
random.shuffle(inputs_targets_list)
self.inputs, self.targets = zip(*inputs_targets_list)
inputs_lens = [len(input) for input in self.inputs]
targets_lens = [len(target) for target in self.targets]
self.batches = [
((self.inputs[k: k + self.batch_size], max(inputs_lens[k: k + self.batch_size])),
(self.targets[k: k + self.batch_size], max(targets_lens[k: k + self.batch_size])))
for k in range(0, len(self.inputs), self.batch_size)
]
else: # no targets
self.inputs_sequences = self.get_sequences(inputs_file_path)
self.inputs = [train_inputs_vocab.sequence_to_indices(sequence, add_eos=True) for sequence in self.inputs_sequences]
if shuffle:
random.shuffle(self.inputs)
inputs_lens = [len(input) for input in self.inputs]
self.batches = [
((self.inputs[k: k + self.batch_size], max(inputs_lens[k: k + self.batch_size])),
(None, None))
for k in range(0, len(self.inputs), self.batch_size)
]
self.num_of_batches = len(self.batches)
def get_batch(self):
for batch in self.batches:
if self.is_train:
(inputs, max_input_len), (targets, max_target_len) = batch
padded_inputs = self._pad_sequences(inputs, max_input_len)
padded_targets = self._pad_sequences(targets, max_target_len)
inputs_var = Variable(torch.LongTensor(padded_inputs)).transpose(0, 1).to(self.device) # time * batch
targets_var = Variable(torch.LongTensor(padded_targets)).transpose(0, 1).to(self.device) # time * batch
yield inputs_var, targets_var
else:
(inputs, max_input_len), _ = batch
padded_inputs = self._pad_sequences(inputs, max_input_len)
inputs_var = Variable(torch.LongTensor(padded_inputs)).transpose(0, 1).to(self.device) # time * batch
yield inputs_var
@staticmethod
def get_sequences(text_file_path):
sequences = list()
with open(text_file_path, "r", encoding="utf-8-sig") as fp:
lines = fp.readlines()
for line in lines:
sequence = line.strip().split(' ')
sequences.append(sequence)
return sequences
def _pad_sequences(self, sequences, max_length):
pad_sequences = list()
for sequence in sequences:
pad_sequence = sequence + [self.PAD_IDX] * (max_length - len(sequence))
pad_sequences.append(pad_sequence)
return pad_sequences
if __name__ == '__main__':
train_inputs_path = 'data/train_en.txt'
train_targets_path = 'data/train_fr.txt'
train_inputs_vocab = Vocabulary(train_inputs_path)
train_targets_vocab = Vocabulary(train_targets_path)
train_loader = DataLoader(train_inputs_vocab, train_targets_vocab, train_inputs_path, train_targets_path, shuffle=True, batch_size=128, device='cuda', is_train=True)
for inputs, label in train_loader.get_batch():
print(inputs.shape, label.shape)
| true
|
216dcf1594bf48a9315420124c36b351ea78237a
|
Python
|
AbhiniveshP/CodeBreakersCode
|
/10 - DP/CoinChangeMinCoins.py
|
UTF-8
| 1,322
| 2.84375
| 3
|
[] |
no_license
|
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
# edge cases
if (amount == 0):
return 0
if (coins == None or len(coins) == 0):
return -1
# if not possible => inf coins required => initialize with +inf
minCoins = [ [ float('inf') for c in range(amount + 1) ] for r in range(len(coins)) ]
for r in range(len(coins)):
minCoins[r][0] = 0
for coinIndex in range(len(coins)):
for currentAmount in range(1, amount + 1):
ways1, ways2 = float('inf'), float('inf')
# don't include
if (coinIndex >= 1):
ways1 = minCoins[coinIndex - 1][currentAmount]
# include (check +inf condition as well***)
if (currentAmount >= coins[coinIndex] and minCoins[coinIndex][currentAmount - coins[coinIndex]] != float('inf')):
ways2 = minCoins[coinIndex][currentAmount - coins[coinIndex]] + 1
minCoins[coinIndex][currentAmount] = min(ways1, ways2)
return minCoins[-1][amount] if minCoins[-1][amount] != float('inf') else -1
| true
|
a31fe550eefbc862b2219827d9a900a613448182
|
Python
|
nlp-study/zhihu_competition
|
/tools/word_freq_2_doc.py
|
UTF-8
| 3,242
| 2.8125
| 3
|
[] |
no_license
|
'''
Created on 2016年5月5日
@author: Zhang Xiulong
'''
import codecs
from tools.list_operation import *
def trans_word_freq_2_doc(input_path,output_path):
word_freq_map = {}
processed_word_freq_map = {}
generated_str = []
max_value = 0
min_value = 0
read_file = codecs.open(input_path,'r','utf-8')
for line in read_file:
line = line.strip()
if len(line) == 0:
continue
line_list = line.split('\t')
word = line_list[0]
freq = int(line_list[1])
word_freq_map[word] = freq
print('word_freq_map size:',len(word_freq_map))
read_file.close()
sorted_freq_words = sorted(word_freq_map.items(), key=lambda d:d[1], reverse = True)
print(dict)
for i in range(len(sorted_freq_words)):
word_iterm = sorted_freq_words[i]
processed_word_freq_map[word_iterm[0]] = len(sorted_freq_words) - i
# print(processed_word_freq_map)
# temp_processed_word_freq_map = sorted(processed_word_freq_map.items(), key=lambda d:d[1], reverse = True)
# print('temp_processed_word_freq_map:',temp_processed_word_freq_map)
# value_list = list(word_freq_map.values())
# value_list.sort()
# print(value_list)
# min_value = value_list[0]
# max_value = value_list[-1]
#
# for key in word_freq_map:
# value = word_freq_map[key]
# value = value - min_value + 1
# multiple = value / 100
# processed_word_freq_map[key] += int(multiple)
write_sorted_str_int_map(processed_word_freq_map,'../../result_data/temp.txt')
temp_processed_word_freq_tulp = sorted(processed_word_freq_map.items(), key=lambda d:d[1], reverse = True)
for i in range(len(temp_processed_word_freq_tulp)):
temp_item = temp_processed_word_freq_tulp[i]
word = temp_item[0]
freq = int(temp_item[1])
if i%100 == 0:
print('index:',i)
for j in range(freq):
generated_str.append(word)
print('length of generated_str:',len(generated_str))
print('last one generated_str:',generated_str[-3:-1])
write_file = codecs.open(output_path,'w','utf-8')
line_str = ''
for i in range(len(generated_str)):
line_str += generated_str[i] + ' '
if i % 50 == 0:
write_file.write(line_str + '\n')
line_str = ''
write_file.write(line_str + '\n')
write_file.close()
def check_word_freq_correct(file_path_1,file_path_2):
check_list_1 = read_list(file_path_1)
check_list_2 = read_list(file_path_2)
if len(check_list_1) != len(check_list_2):
print('>>>>Error,two check list is not same length!,Exit<<<<<')
exit()
for i in range(len(check_list_1)):
line_1 = check_list_1[i]
word_1 = line_1.split('\t')[0]
line_2 = check_list_2[i]
word_2 = line_2.split('\t')[0]
if word_1 != word_2:
print(word_1,word_2,' is not same!')
if __name__ == '__main__':
input_path = '../../result_data/cleaned_wrod_freq.txt'
output_path = '../../result_data/wrod_freq_2_doc.txt'
trans_word_freq_2_doc(input_path,output_path)
check_word_freq_correct(input_path,output_path)
| true
|
e94a5b20a17e8083c212340ee91f5f6863edea4a
|
Python
|
CSI-Woo-Lab/gym-rock-paper-scissors
|
/gym_rock_paper_scissors/envs/rock_paper_scissors.py
|
UTF-8
| 5,213
| 2.65625
| 3
|
[] |
no_license
|
import gym
import numpy as np
from gym import spaces
ROCK = 0
PAPER = 1
SCISSORS = 2
NULL_ACTION = 3
RENDER_MAP = {0: "@", 1: "#", 2: "%"}
class RockPaperScissorsBaseEnv(gym.Env):
optimal_winning_rate = None
def __init__(self) -> None:
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Discrete(4) # represent NULL_ACTION
def step(self, action):
info = {}
# FIXME: concat prev_obs and prev_action
env_action = self.env_policy(self.prev_obs)
if action == ROCK and env_action == SCISSORS:
reward = 1
elif action == SCISSORS and env_action == ROCK:
reward = -1
elif env_action < action:
reward = 1
elif action == env_action:
reward = 0
else:
reward = -1
self.prev_obs = env_action
self.prev_action = action
done = False if reward == 0 else True
# state = np.array([action, self.prev_obs])
obs = np.array([env_action])
return obs, reward, done, info
def reset(self):
self.prev_obs = None
return np.array([NULL_ACTION])
def render(self, mode="human"):
user = RENDER_MAP[self.prev_action]
computer = RENDER_MAP[self.prev_obs]
print(
"USER vs COMP\n",
f" {user} || {computer} ",
)
def env_policy(self, obs):
"""return env_action
"""
raise NotImplementedError
class RockPaperScissorsSequencePolicyEnv(RockPaperScissorsBaseEnv):
optimal_winning_rate = 1
def __init__(self, start_with=PAPER, other_sequence=False) -> None:
super().__init__()
self.start_with = start_with
self.other_sequence = other_sequence
def env_policy(self, obs):
if obs == None:
env_action = self.start_with
elif obs == ROCK:
if self.other_sequence:
env_action = SCISSORS
else:
env_action = PAPER
elif obs == PAPER:
if self.other_sequence:
env_action = ROCK
else:
env_action = SCISSORS
elif obs == SCISSORS:
if self.other_sequence:
env_action = PAPER
else:
env_action = ROCK
return env_action
class RockPaperScissorsRandomPolicyEnv(RockPaperScissorsBaseEnv):
optimal_winning_rate = 1/2 # win + win after draw + ...
def env_policy(self, obs):
return np.random.choice([ROCK, PAPER, SCISSORS])
class RockPaperScissorsBiasedPolicyEnv(RockPaperScissorsBaseEnv):
optimal_winning_rate = 2/3 # win + win after draw + ...
def __init__(self, biased_by=ROCK) -> None:
super().__init__()
self.biased_by = biased_by
def env_policy(self, obs):
if self.biased_by == ROCK:
return np.random.choice([ROCK, PAPER, SCISSORS], p=[0.5, 0.25, 0.25])
if self.biased_by == PAPER:
return np.random.choice([ROCK, PAPER, SCISSORS], p=[0.25, 0.5, 0.25])
if self.biased_by == SCISSORS:
return np.random.choice([ROCK, PAPER, SCISSORS], p=[0.25, 0.25, 0.5])
class RockPaperScissorsSequencePolicy2Env(RockPaperScissorsBaseEnv):
optimal_winning_rate = 1
def __init__(self, start_with=PAPER, other_sequence=False, double_with=ROCK) -> None:
super().__init__()
self.start_with = start_with
self.other_sequence = other_sequence
self.double_with = double_with
self.double_flag = False
def reset(self):
self.double_flag = False
return super().reset()
def env_policy(self, obs):
if obs == None:
env_action = self.start_with
elif obs == ROCK:
if self.double_with == ROCK and not self.double_flag:
env_action = ROCK
self.double_flag = True
else:
self.double_flag = False
if self.other_sequence:
env_action = SCISSORS
else:
env_action = PAPER
elif obs == PAPER:
if self.double_with == PAPER and not self.double_flag:
env_action = PAPER
self.double_flag = True
else:
self.double_flag = False
if self.other_sequence:
env_action = ROCK
else:
env_action = SCISSORS
elif obs == SCISSORS:
if self.double_with == SCISSORS and not self.double_flag:
env_action = SCISSORS
self.double_flag = True
else:
self.double_flag = False
if self.other_sequence:
env_action = PAPER
else:
env_action = ROCK
return env_action
if __name__ == "__main__":
env = RockPaperScissorsSequencePolicy2Env(other_sequence=True, double_with=SCISSORS)
env.reset()
for _ in range(10):
obs, reward, done, info = env.step(
np.random.choice([ROCK, PAPER, SCISSORS]))
env.render()
print(obs, reward, done, info)
| true
|
b67a4655ff598645cfa41ae7e3249cb26b42d682
|
Python
|
haavardtysland/IDATT2502
|
/1/b/MultipleLinearRegressionModel.py
|
UTF-8
| 540
| 2.75
| 3
|
[] |
no_license
|
import torch as to
class MultipleLinearRegressionModel:
def __init__(self):
self.W_1 = to.tensor([[0.0]], requires_grad=True)
self.W_2 = to.tensor([[0.0]], requires_grad=True)
self.b = to.tensor([[0.0]], requires_grad=True)
def f(self, length, weight):
return (length @ self.W_1) + (weight @ self.W_2) + self.b
def loss(self, x_1, x_2, y):
return to.nn.functional.mse_loss(self.f(x_1, x_2), y)
def test (self, length, weight):
return length * self.W_1.item() + weight * self.W_2.item() + self.b.item()
| true
|
928ed1df7c5e78bb955bd7bdaa47def63f27043f
|
Python
|
shivamnegi1705/Competitive-Programming
|
/Leetcode/Weekly Contest 210/1614. Maximum Nesting Depth of the Parentheses.py
|
UTF-8
| 685
| 3.484375
| 3
|
[] |
no_license
|
# Question Link:- https://leetcode.com/problems/maximum-nesting-depth-of-the-parentheses/
class Solution:
def maxDepth(self, s: str) -> int:
# stack to push (
st = []
# number of ( in stack
n = 0
# ans --> to store max depth
ans = 0
# Traverse in string from L to R
for i in s:
# Push (
if i=='(':
st.append('(')
n+=1
ans = max(ans,n)
# Pop )
elif i==')':
st.pop()
n-=1
# Update ans
ans = max(ans,n)
return ans
| true
|
3eccbf6381055b09e523d2a7c56ab1beb1c872b2
|
Python
|
bgoesswe/openeo-repeatability
|
/backend/openshift-old/services/user/service/api/api_validation.py
|
UTF-8
| 1,435
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
''' API Payload Validation '''
from re import match
from service.api.api_exceptions import ValidationError
def validate_user(payload):
''' Ensures that the structure of a process spec is correct. '''
if "username" not in payload:
raise ValidationError("'username' is missing.")
if not match(r"^\w{5,}$", payload["username"]):
raise ValidationError("Format of 'username' is wrong. (At least 5 letters/digits)")
if "email" not in payload:
raise ValidationError("'email' is missing.")
if not match(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", payload["email"]):
raise ValidationError("Format of 'email' is wrong.")
if "admin" not in payload:
raise ValidationError("'admin' is missing.")
if not isinstance(payload["admin"], bool):
raise ValidationError("Format of 'admin' is wrong (boolean)")
if "password" not in payload:
raise ValidationError("'password' is missing.")
if not match(r"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-]).{8,}$", payload["password"]):
raise ValidationError("Format of 'password' is wrong." \
"(At least one upper case, at least one lower case, " \
"at least one digit, " \
"at least one special character (#?!@$%^&*-), " \
"minimum eight in length)")
| true
|
d267d2e400aff73d994bfc735f341074084a514c
|
Python
|
shimech/instance-builder
|
/instance_builder/setter.py
|
UTF-8
| 1,019
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
from typing import Callable, TypeVar
from .lib import find_attributes
T = TypeVar("T")
def setter(Class: type) -> type:
"""Setter method generator
Setter methods are generated by decorating class with this function.
Args:
Class (type): Class which you want to generate setter methods.
Returns:
type: Class having setter methods for its private fields.
Examples:
@setter
class User:
__id: int = 0
__name: str = "Shuntaro Shimizu"
user = User()
user.set_id(1) # user._User__id == 1
user.set_name("New Name") # user._User__name == "New Name"
"""
def generate_setter(name: str) -> Callable[T, None]:
def set_value(self, value: T) -> None:
setattr(self, name, value)
return set_value
for attribute in find_attributes(Class):
setter_name = "set_" + attribute.replace(f"_{Class.__name__}__", "")
setattr(Class, setter_name, generate_setter(attribute))
return Class
| true
|
a125d8674dec2e56ae8ace77b634713bf6062a71
|
Python
|
paulrschrater/design2align-scanner
|
/kicd/validation.py
|
UTF-8
| 8,217
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
##################################################
# MIT License
#
# Copyright (c) 2019 Learning Equality
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##################################################
import re
def assert_top_level_numbers_are_sequential_and_properly_indented(
items, exceptions=["32.2.4", "44.2.3"]
):
last_unit, last_section, last_subsection = None, None, None
for item in items:
header = re.search(r"(\d+)\.(\d+)\.(\d+)", item.bullet or "")
if not header:
continue
if item.indent != 1:
print(
"Expected indentation of 1, got {}, in line:\n{}\n\n".format(
item.indent, str(item)
)
)
continue
unit, section, subsection = (
int(header.group(1)),
int(header.group(2)),
int(header.group(3)),
)
if last_unit is not None and item.bullet not in exceptions:
if unit == last_unit:
if section == last_section:
if subsection != last_subsection + 1:
print(
"Expected {}, got {}, in line:\n{}\n\n".format(
"{}.{}.{}".format(unit, section, last_subsection + 1),
item.bullet,
str(item),
)
)
else:
if section != last_section + 1 or subsection != 0:
print(
"Expected {}, got {}, in line:\n{}\n\n".format(
"{}.{}.{}".format(unit, last_section + 1, 0),
item.bullet,
str(item),
)
)
else:
if (
(unit != 1 and unit != last_unit + 1)
or section != 0
or subsection != 0
):
print(
"Expected {}, got {}, in line:\n{}\n\n".format(
"{}.{}.{}".format(last_unit + 1, 0, 0),
item.bullet,
str(item),
)
)
last_unit, last_section, last_subsection = unit, section, subsection
def assert_all_section_headers_have_lesson_count(items, exceptions=["3.0.0", "53.0.0"]):
for i, item in enumerate(items):
if item.indent != 1:
continue
if not item.bullet.endswith(".0.0"):
continue
if item.bullet in exceptions:
continue
if not re.search(r"^(.*) \((\d+) Lessons?\)", item.text, flags=re.IGNORECASE):
print(
"Expected 'A.B.C ______ (X Lessons)', got:\n{}\n{}\n{}\n\n".format(
*items[i - 1 : i + 2]
)
)
def assert_all_top_level_bullets_are_dotted_numbers(items):
for i, item in enumerate(items):
if item.indent != 1:
continue
header = re.search(r"(\d+)\.(\d+)\.(\d+)", item.bullet or "")
if not header:
print(
"Should not be at top-level of bullets:\n{}\n{}\n{}\n\n".format(
*items[i - 1 : i + 2]
)
)
def assert_parenthetical_bullets_are_sequential(items):
roman_numerals = [
"i",
"ii",
"iii",
"iv",
"v",
"vi",
"vii",
"viii",
"ix",
"x",
"xi",
"xii",
"xiii",
"xiv",
"xv",
"xvi",
"xvii",
"xviii",
"xix",
"xx",
]
last_bullet_by_level = []
for i, item in enumerate(items):
level = item.indent
while len(last_bullet_by_level) < level:
last_bullet_by_level.append(None)
while len(last_bullet_by_level) > level:
last_bullet_by_level.pop()
if not item.bullet or ")" not in item.bullet:
last_bullet = None
last_level = None
continue
last_bullet = last_bullet_by_level[-1]
bullet = item.bullet.strip(")")
if not last_bullet:
expected = ["a", "i"]
elif last_bullet == "i":
expected = ["j", "ii"]
elif last_bullet in roman_numerals:
expected = [roman_numerals[roman_numerals.index(last_bullet) + 1]]
elif len(bullet) > 1:
print(
"We expected a single-character bullet but got '{}':\n{}\n{}\n{}\n\n".format(
bullet, *items[i - 1 : i + 2]
)
)
else:
expected = [chr(ord(last_bullet) + 1)]
if bullet not in expected:
print(
"We expected a bullet from {} but got '{}':\n{}\n{}\n{}\n\n".format(
expected, bullet, *items[i - 1 : i + 2]
)
)
last_bullet_by_level[-1] = bullet
def assert_standard_numbering_titles(items):
for i, item in enumerate(items):
header = re.search(r"(\d+)\.(\d+)\.(\d+)", item.bullet or "")
if not header:
continue
unit, section, subsection = (
int(header.group(1)),
int(header.group(2)),
int(header.group(3)),
)
if section == 1 and subsection == 0:
options = ["Specific Objectives", "Specific Objective"]
if item.text.title() not in options:
print(
"Title for {} should be one of {}:\n{}\n{}\n{}\n\n".format(
item.bullet, options, *items[i - 1 : i + 2]
)
)
next_item = items[i + 1]
next_item_text = next_item.text.strip(":").lower()
if not next_item_text.startswith(
"by the end of th"
) or not next_item_text.endswith("the learner should be able to"):
print(
"Title for {} should be 'By the end of (this|the) topic, the learner should be able to:':\n{}\n{}\n{}\n\n".format(
item.bullet, *items[i : i + 3]
)
)
if section == 2 and subsection == 0:
options = ["Contents", "Content"]
if item.text.title() not in options:
print(
"Title for {} should be one of {}:\n{}\n{}\n{}\n\n".format(
item.bullet, options, *items[i - 1 : i + 2]
)
)
if section == 3 and subsection == 0:
options = [
"Project Work",
"Projects",
"Project",
"Practical Activities",
"Excursion",
]
if item.text.title() not in options:
print(
"Title for {} should be one of {}:\n{}\n{}\n{}\n\n".format(
item.bullet, options, *items[i - 1 : i + 2]
)
)
| true
|
093cd0197d1e74a0ee20f4c6bead6b84fcce7c13
|
Python
|
tylergallen/fantasyfootballapp
|
/classes/team_class.py
|
UTF-8
| 342
| 3.453125
| 3
|
[] |
no_license
|
class Team:
number_of_teams = 0
def __init__(self, code, fullName, shortName):
self.code = code
self.fullName = fullName
self.shortName = shortName
Team.number_of_teams += 1
def add_bye_week(self, byeWeek):
self.byeWeek = byeWeek
def display_number_of_teams():
print (f'Total number of teams in the NFL: {number_of_teams}')
| true
|
1eabf5cf7f4781d061ce1deb14aa95a55734f12e
|
Python
|
kurff/pytorch
|
/third_party/ideep/python/ideep4py/tests/mm/test_mdarray3_mm.py
|
UTF-8
| 1,422
| 2.671875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
import numpy
import ideep4py
import testing
import unittest
@testing.parameterize(*testing.product({
'dtype': [numpy.float32],
'shape': [(3, 16, 2, 4), (2, 7, 1, 1),
(2, 7, 3, 2), (2, 2, 2, 2), (3, 4), (1, 1)],
}))
class TestMdarray3(unittest.TestCase):
def setUp(self):
self.x = numpy.ndarray(shape=self.shape, dtype=self.dtype, order='C')
self.x.fill(2.)
self.check_options = {'atol': 1e-5, 'rtol': 1e-4}
@unittest.skip("should be catch in the future")
def test_noneInput(self):
x1 = None
x2 = numpy.ndarray(x1)
x = ideep4py.mdarray(x1)
print(x, x2)
def test_basicOp(self):
x1 = self.x
x = ideep4py.mdarray(x1)
numpy.testing.assert_allclose(1 / x1, 1 / x, **self.check_options)
numpy.testing.assert_allclose(2 * x1, 2 * x, **self.check_options)
numpy.testing.assert_allclose(1 - x1, 1 - x, **self.check_options)
numpy.testing.assert_allclose(1 + x1, 1 + x, **self.check_options)
x1 /= 3
x /= 3
numpy.testing.assert_allclose(x1, x, **self.check_options)
x1 *= 2
x *= 2
numpy.testing.assert_allclose(x1, x, **self.check_options)
x1 += 3
x += 3
numpy.testing.assert_allclose(x1, x, **self.check_options)
x1 -= 5
x -= 5
numpy.testing.assert_allclose(x1, x, **self.check_options)
| true
|
23cd688ed052680d79596f915b9e68fbb4b4f74b
|
Python
|
puyamirkarimi/quantum-walks
|
/Max2SAT_graphs/inf_time_probability_histograms.py
|
UTF-8
| 3,035
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
import matplotlib.pyplot as plt
import numpy as np
def average_data(data):
num_repeats = len(data[:, 0])
num_x_vals = len(data[0, :])
y_av = np.zeros(num_x_vals)
y_std_error = np.zeros(num_x_vals)
for x in range(num_x_vals):
y_av[x] = np.mean(data[:, x])
y_std_error[x] = np.std(data[:, x], ddof=1) / np.sqrt(num_repeats)
return y_av, y_std_error
def mask_data(data):
num_repeats = len(data[:, 0])
num_x_vals = len(data[0, :])
out = np.zeros((num_repeats-2, num_x_vals))
for x in range(num_x_vals):
vals = data[:, x]
vals1 = np.delete(vals, vals.argmin())
vals2 = np.delete(vals1, vals1.argmax())
out[:, x] = vals2
return out
# def plot_graph(x, y, y_std_error, fit_1, fit_2):
# fig, ax = plt.subplots()
# plt.scatter(x[4:], y[4:])
# plt.scatter(x[:4], y[:4], color="gray")
# plt.plot(x, fit_1, '--', label="$y=0.0005x + 0.0012$", color="red")
# plt.plot(x, fit_2, label=r"$y=0.0036 \times 2^{0.0871x}$", color="green")
# #plt.errorbar(x, y, y_std_error)
# ax.set_xlabel("Number of variables, $n$")
# ax.set_ylabel("Average runtime ($s$)")
# ax.set_xlim([5, 20])
# ax.set_xticks(range(5, 21, 3))
# ax.set_ylim([0.004, 0.012])
# ax.set_yscale('log')
# plt.legend()
# plt.tight_layout()
# plt.show()
def zero_to_nan(array):
"""Replace every 0 with 'nan' and return a copy."""
return [float('nan') if x==0 else x for x in array]
def probs_data(n):
data = np.loadtxt("./../Max2SAT_quantum/inf_time_probs_n_" + str(n) + ".txt")
return np.reciprocal(data)
if __name__ == '__main__':
plt.rc('text', usetex=True)
plt.rc('font', size=14)
n_list = [5, 10]
probs_list = []
num_bins = 100
for n in n_list:
probs = probs_data(n)
probs_list.append(probs)
min_prob = np.min(np.array(probs_list).flatten())
max_prob = np.max(np.array(probs_list).flatten())
print(min_prob, max_prob)
x = np.linspace(min_prob, max_prob, num=num_bins)
fig, ax1 = plt.subplots()
# for i_adam, n in enumerate(n_list):
# plt.scatter(x, y_adam[i_adam], label="n="+str(n), marker='+')
# plt.errorbar(x, runtimes_average, runtimes_standard_error)
# plt.xlim([0, 0.021])
# plt.ylim([9e-5, 0.013])
print(np.shape(np.array(probs_list)))
ax1.hist(np.swapaxes(np.array(probs_list), 0, 1), x, color=('deeppink', 'seagreen'))
ax1.set_yscale('log')
ax1.set_xlim([0, 100])
# ax1.yaxis.tick_left()
# ax1.tick_params(labelright='off')
ax1.set_ylim([0, 10e4])
ax1.set_xlabel(r"$P_\infty$")
# ax2.set_xlabel(r"$\overline{T}_{inst}$~/~$s$")
ax1.set_ylabel(r"$p(P_\infty)$")
# ax2.set_ylabel(r"$\overline{T}_{inst}$~/~$s$")
# plt.tight_layout()
# plt.savefig('mixsat.png', dpi=300)
# plt.show()
ax1.tick_params(direction='in', top=True, right=True, which='both')
# plt.savefig('runtimes_histograms.png', dpi=200)
plt.tight_layout()
plt.show()
| true
|
fbebd785339375c43040c0fd62be57b743812be0
|
Python
|
sourabhshete99/HackerRank-Programs
|
/diagonaldiff.py
|
UTF-8
| 890
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/python3. Given a matrix, calc the difference between the diagonal elements
import math
import os
import random
import re
import sys
#
# Complete the 'diagonalDifference' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY arr as parameter.
#
def diagonalDifference(arr):
# Write your code here
l=len(arr)
d1=d2=0
for j in range (0,l):
d1=d1+arr[j][j]
d2=d2+arr[j][l-1-j]
if d1>=d2:
ans=d1-d2
else:
ans=d2-d1
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = []
for i in range(n):
arr.append(list(map(int, input().rstrip().split())))
result = diagonalDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
| true
|
10dd56e6b966545c6f3945c5364ef45367de1f98
|
Python
|
Duisus/allure-python
|
/allure-pytest-bdd/test/links_tests/conftest.py
|
UTF-8
| 711
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
from pytest_bdd import then, parsers
@then(parsers.parse(
"report has link type of {type_name} with url:\n{url}"))
def check_link(type_name, url, allure_report):
links = _get_links(allure_report)
desired_link = {"type": type_name, "url": url, "name": url}
assert desired_link in links
@then(parsers.parse(
"report has link type of {type_name} with \"{link_name}\" name and url:\n{url}"))
def check_link_with_custom_name(type_name, link_name, url, allure_report):
links = _get_links(allure_report)
desired_link = {"type": type_name, "url": url, "name": link_name}
assert desired_link in links
def _get_links(allure_report):
return allure_report.test_cases[0]["links"]
| true
|
5627a94057edcba6eec371ec516b19b20a7949a9
|
Python
|
raviraju/recommender_system
|
/binary_recommender/recommender/rec_popularity_based.py
|
UTF-8
| 8,659
| 2.734375
| 3
|
[] |
no_license
|
"""Module for Popularity Based Recommender"""
import os
import sys
import logging
from timeit import default_timer
from pprint import pprint
import joblib
import pandas as pd
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import utilities
from recommender.rec_interface import Recommender
from recommender.evaluation import PrecisionRecall
class PopularityBasedRecommender(Recommender):
"""Popularity based recommender system model"""
def __init__(self, results_dir, model_dir,
train_data, test_data,
user_id_col, item_id_col, **kwargs):
"""constructor"""
super().__init__(results_dir, model_dir,
train_data, test_data,
user_id_col, item_id_col, **kwargs)
self.model_file = os.path.join(self.model_dir,
'popularity_based_model.pkl')
self.user_features = kwargs['user_features']
#print(self.user_features)
self.data_groups = None
#######################################
def train(self):
"""train the popularity based recommender system model"""
super().train()
print()
print("*"*80)
print("\tPopularity Based : Recommending Trending items ...")
print("*"*80)
start_time = default_timer()
# Get a count of user_ids for each unique item as popularity score
self.data_groups = self.train_data.groupby(self.user_features + [self.item_id_col])\
.agg({self.user_id_col: 'count'})\
.reset_index()
self.data_groups.rename(columns={self.user_id_col:'no_of_users',
self.item_id_col:self.item_id_col},
inplace=True)
#print(self.data_groups.head())
end_time = default_timer()
print("{:50} {}".format("Completed. ",
utilities.convert_sec(end_time - start_time)))
joblib.dump(self.data_groups, self.model_file)
LOGGER.debug("Saved Model")
#######################################
def __get_feature_val(self, identifiers, user_feature):
"""retrieve value for user_feature using identifiers from test_data"""
for identifier, val in identifiers.items():
#print(identifier, val, type(val))
data = self.test_data[self.test_data[identifier] == val]
return data[user_feature].values[0]
def __generate_top_recommendations(self, user_id, known_interacted_items):
"""Generate top popularity recommendations"""
items_to_recommend = []
columns = [self.user_id_col, self.item_id_col, 'score', 'rank']
#print(user_id)
identifiers = {self.user_id_col:user_id}
feature_vals = dict()
for user_feature in self.user_features:
feature_vals[user_feature] = self.__get_feature_val(identifiers, user_feature)
#pprint(feature_vals)
for feature, val in feature_vals.items():
self.data_groups = self.data_groups[self.data_groups[feature] == val]
#Sort the items based upon popularity score : no_of_users
data_groups_sort = self.data_groups.sort_values(['no_of_users', self.item_id_col],
ascending=[0, 1])
total_no_of_users = len(self.get_all_users(dataset='train'))
if total_no_of_users == 0:
total_no_of_users = 1#to avoid division by zero
data_groups_sort['users_percent'] = data_groups_sort['no_of_users']/total_no_of_users
data_groups_sort.reset_index(drop=True, inplace=True)
#print(data_groups_sort.head())
rank = 1
for _, reco in data_groups_sort.iterrows():
item_id = reco[self.item_id_col]
score = reco['users_percent']
if not self.allow_recommending_known_items and item_id in known_interacted_items:#to avoid items which user has already aware
continue
if rank > self.no_of_recs:#limit no of recommendations
break
item_dict = {
self.item_id_col : item_id,
'score' : round(score, 3),
'rank' : rank
}
items_to_recommend.append(item_dict)
rank += 1
if len(items_to_recommend) > 0:
items_to_recommend_df = pd.DataFrame(items_to_recommend)
else:
items_to_recommend_df = pd.DataFrame(columns = [self.item_id_col, 'score', 'rank'])
return items_to_recommend_df
def recommend_items(self, user_id):
"""recommend items for given user_id from test dataset"""
super().recommend_items(user_id)
if os.path.exists(self.model_file):
self.data_groups = joblib.load(self.model_file)
LOGGER.debug("Loaded Trained Model")
start_time = default_timer()
known_interacted_items = self.items_for_evaluation[user_id]['known_interacted_items']
items_to_recommend_df = self.__generate_top_recommendations(user_id, known_interacted_items)
end_time = default_timer()
print("{:50} {}".format("Recommendations generated. ",
utilities.convert_sec(end_time - start_time)))
return items_to_recommend_df
else:
print("Trained Model not found !!!. Failed to generate recommendations")
return None
#######################################
def __recommend_items_to_evaluate(self):
"""recommend items for all users from test dataset"""
for user_id in self.items_for_evaluation:
known_interacted_items = self.items_for_evaluation[user_id]['known_interacted_items']
items_to_recommend_df = self.__generate_top_recommendations(user_id, known_interacted_items)
recommended_items_dict = items_to_recommend_df.set_index(self.item_id_col).to_dict('index')
self.items_for_evaluation[user_id]['items_recommended'] = list(recommended_items_dict.keys())
self.items_for_evaluation[user_id]['items_recommended_score'] = recommended_items_dict
items_to_be_interacted_set = set(self.items_for_evaluation[user_id]['items_to_be_interacted'])
items_recommended_set = set(self.items_for_evaluation[user_id]['items_recommended'])
correct_recommendations = items_to_be_interacted_set.intersection(items_recommended_set)
no_of_correct_recommendations = len(correct_recommendations)
self.items_for_evaluation[user_id]['no_of_correct_recommendations'] = no_of_correct_recommendations
self.items_for_evaluation[user_id]['correct_recommendations'] = list(correct_recommendations)
return self.items_for_evaluation
def evaluate(self, no_of_recs_to_eval, eval_res_file='evaluation_results.json'):
"""Evaluate trained model for different no of ranked recommendations"""
super().evaluate(no_of_recs_to_eval, eval_res_file)
if os.path.exists(self.model_file):
self.data_groups = joblib.load(self.model_file)
LOGGER.debug("Loaded Trained Model")
start_time = default_timer()
#Generate recommendations for the users
self.items_for_evaluation = self.__recommend_items_to_evaluate()
self.save_items_for_evaluation()
precision_recall_intf = PrecisionRecall()
evaluation_results = precision_recall_intf.compute_precision_recall(
no_of_recs_to_eval, self.items_for_evaluation, self.items_all)
end_time = default_timer()
print("{:50} {}".format("Evaluation Completed. ",
utilities.convert_sec(end_time - start_time)))
results_file = os.path.join(self.model_dir, eval_res_file)
utilities.dump_json_file(evaluation_results, results_file)
return evaluation_results
else:
print("Trained Model not found !!!. Failed to evaluate")
evaluation_results = {'status' : "Trained Model not found !!!. Failed to evaluate"}
results_file = os.path.join(self.model_dir, eval_res_file)
utilities.dump_json_file(evaluation_results, results_file)
return evaluation_results
#######################################
| true
|
919a7b0f89a1ebab1c204204af6ec5fef083ce88
|
Python
|
basharSaid/Python
|
/While-Schleife.py
|
UTF-8
| 251
| 3.21875
| 3
|
[] |
no_license
|
#zufallsgenerator
import random
random.seed()
# Initialisirung
summe = 0
# Whlie-Schliefe
while summe < 30 :
zzahl = random.randint(1, 10)
summe = summe + zzahl
print("zahl:", zzahl, "Zwischensumme", summe)
# Ende
print("Ende")
| true
|
31f370dfecd34ce28ab7d63681dc76f2808e7af8
|
Python
|
xiaoyaoshuai/-
|
/作业/7.17/038idcard.py
|
UTF-8
| 641
| 4.09375
| 4
|
[] |
no_license
|
'''
创建一个名称为idcard.py的文件,然后在该文件中定义3个字符串变量,分别记录两个程序员说话,再从程序员甲说的身份证号码中截取出生日日期,并组合成"YYYY年MM月DD日"格式的字符串将两个字符串拼接到一起,并且在中间拼接一个转义字符(换行符),最后输出,输出截取到的出生日期
'''
c = '程序员甲说:你知道我的生日么?'
b = '程序员乙说:输入你的身份证号'
print(c)
print(b)
a = input('程序员甲说:')
print('你的出生日期是%s年%s月%s日,所以你的生日是%s月%s日'%(a[6:10],a[10:12],a[12:14],a[10:12],a[12:14]))
| true
|
15e83d3c89b98172846bc716a814f2b510695797
|
Python
|
eridanletalis/Python
|
/stepik/base_course_1/file_reading/school_data/school_data.py
|
UTF-8
| 710
| 3.25
| 3
|
[] |
no_license
|
s = []
d = {}
with open('in.txt', 'r') as file_reader:
for line in file_reader:
for i in line.strip().lower().split(';'):
s.append(i)
for i in range(len(s)):
if i == 0:
d[s[i]] = []
else:
d[s[0]].append(int(s[i]))
s.clear()
sum0 = 0
sum1 = 0
sum2 = 0
sum3 = 0
for grades in d.values():
for i in range(len(grades)):
sum0 += grades[i]
if i == 0:
sum1 += grades[i]
if i == 1:
sum2 += grades[i]
if i == 2:
sum3 += grades[i]
print(sum0 / len(grades))
sum0 = 0
print(str(sum1 / len(d)) + " " + str(sum2 / len(d)) + " " + str(sum3 / len(d)))
| true
|
96c6a1824bb4cd7d6cb752aa14b442e121dcef0b
|
Python
|
m1ghtfr3e/Competitive-Programming
|
/Leetcode/Python/April-30-day-Challenge/moveZero_2.py
|
UTF-8
| 325
| 3.3125
| 3
|
[] |
no_license
|
def moveZero(nums):
x = 0 # we use it as a pointer
for i in nums:
if i != 0:
nums[x] = i
x += 1
for i in range(x, len(nums)):
nums[i] = 0
return nums
if __name__ == '__main__':
print(moveZero([0,1,0,3,12]))
print(moveZero([0,0,1]))
| true
|
57f3a8a792694f098cac5c9d838d457690e4f58a
|
Python
|
Danucas/holbertonschool-higher_level_programming
|
/0x07-python-test_driven_development/2-matrix_divided.py
|
UTF-8
| 1,128
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/python3
"""Divides a matrix"""
def matrix_divided(matrix, div):
"""matrix division
Arguments:
arg1 (matrix): the matrix to divide
arg2 (div): number to divide by
"""
if type(div) != int and type(div) != float:
raise TypeError("div must be a number")
if div == 0:
raise ZeroDivisionError("division by zero")
if type(matrix) != list:
raise TypeError("matrix must be a matrix (list of lists) of integers/\
floats")
length = 0
new_matrix = []
overf = "Each row of the matrix must have the same size"
form = "matrix must be a matrix (list of lists) of integers/floats"
for y, row in enumerate(matrix):
new_matrix.append([])
if y > 0:
if len(row) != length:
raise TypeError(overf)
for x, col in enumerate(row):
if type(col) != int and type(col) != float or\
(float(col) == float("inf")):
raise TypeError(form)
else:
new_matrix[y].append(round(col / div, 2))
length = len(row)
return new_matrix
| true
|
794094ca26a13fa89a3dc766a2cf49c2e06e8a3e
|
Python
|
dradosevich/SwimScripts
|
/divegenstate.py
|
UTF-8
| 2,619
| 3.03125
| 3
|
[] |
no_license
|
#Danny Radosevich
#Dive XML Generator
import subprocess as sp
import os
import re
toOpen = input("Enter the file to read from:\n") #prompt user for input
readFrom = open(toOpen) #open file
readFrom = readFrom.read() #read in
readFrom = readFrom.split("\n") #get each individual line
lineNum = 1 #control variable
writeOut = 0 #will be used as the write out file
for line in readFrom: #go through every line
#print(line+" "+str(lineNum))
if lineNum == 6: #this is the line with the meet title
newFile = line.replace(' ','')+".xml" #remove spaces, add xml extension, to make file name
newFile = newFile.replace('/','-') #clean up bad characters for the date
#os.system("touch "+newFile)
newFile = str(newFile) #not needed at all
writeOut = open(newFile,"w")#open file to write out
writeOut.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n")#header
writeOut.write("<divingevent>\n") #denote this is a diving event
writeOut.write("<eventtitle>"+newFile[:-4]+"</eventtitle>\n") #use file name to make title
elif lineNum > 13 and "</" not in line: #now to do the divers, if at rigth line
#print(line)
#re.sub('\s+', ' ',line).strip()
#line.replace("\t"," ")
if "," in line:
line = line.replace(",","")
if "FR" in line:
line = line.replace("FR","")
if "SO" in line:
line = line.replace("SO","")
if "JR" in line:
line = line.replace("JR","")
if "SR" in line:
line = line.replace("SR","")
for char in line: #for every character in the line
if char.isdigit():
line = line.replace(char, '') #if it is a number remove it
line = line.replace('.','') #remove all periods
while ' 'in line:
line = line.replace(' ',' ') #get down to single spacing
print(line) #print out line for verification, not needed
line = line.split(" ") #split out the line to individual words
if(len(line)>4): #bounds check
newDiver = "\t<diver>\n" #xml write out
newDiver+= "\t\t<lastname>"+line[2]+"</lastname>\n" #write out last name
newDiver+= "\t\t<firstname>"+line[1]+"</firstname>\n" #writeout first name
newDiver+="\t\t<team>"+line[3][:3]+"</team>\n" #write out team
newDiver+="\t</diver>\n" #end the diver
writeOut.write(newDiver) #write out to the file
lineNum+=1 #increment control
writeOut.write("</divingevent>") #end the diving event
#readFrom.close()
writeOut.close() #close the file
| true
|
1fb5ea2fed2ee0c5e83c89ab199b0700c8683304
|
Python
|
zxteloiv/AdaNSP
|
/src/models/transformer/adaptive_computing.py
|
UTF-8
| 2,996
| 2.828125
| 3
|
[] |
no_license
|
from typing import List, Tuple, Dict, Mapping, Optional
import torch
class AdaptiveComputing(torch.nn.Module):
def __init__(self,
halting_fn: torch.nn.Module,
max_computing_time: int = 10,
epsilon: float = 0.1,
mode: str = "basic"
):
super(AdaptiveComputing, self).__init__()
self._halting_fn: torch.nn.Module = halting_fn
self._threshold: float = 1 - epsilon
self._max_computing_time: int = max_computing_time
self._mode: str = mode
def forward(self,
*args,
inputs: torch.nn.Module,
step_fn: torch.nn.Module, ) -> torch.Tensor:
"""
Adaptively compute the hidden states, and compute the halting probability for each token
:param inputs: (batch, seq_len, emb_dim)
:param mask: (batch, seq_len), input padding mask
:param step_fn: step function to take to compute a new recurrent state,
which accepts inputs, padding mask and timestep, then returns another state
:param halting_prob_cumulation: (batch, seq_len), the previous cumulated halting_probability
:return: halting probability: (batch, seq_len)
"""
timestep = 0
hidden = inputs
# halting_prob_cumulation: (batch, seq_len)
halting_prob_cumulation = hidden.new_zeros(hidden.size()[:-1]).float()
while timestep < self._max_computing_time and "TODO: exit if all place exhausted":
# current all alive tokens, which need further computation
# alive_mask: (batch, seq_len)
alive_mask: torch.Tensor = halting_prob_cumulation < 1.
alive_mask = alive_mask.float()
# halting_prob: (batch, seq_len) <- (batch, seq_len, 1)
halting_prob = self._halting_fn(hidden).squeeze(-1)
# temp_cumulation: (batch, seq_len)
temp_cumulation = halting_prob * alive_mask + halting_prob_cumulation
# mask to the newly halted tokens, which is exhausted at the current timestep of computation
# new_halted: (batch, seq_len)
new_halted = (temp_cumulation > self._threshold).float()
remainder = 1. - halting_prob_cumulation + 1.e-10
# all tokens that survives from the current timestep's computation
# alive_mask: (batch, seq_len)
alive_mask = (1 - new_halted) * alive_mask
halting_prob_cumulation += halting_prob * alive_mask
# cumulations for newly halted positions will reach 1.0 after adding up remainder at the current timestep
halting_prob_cumulation += remainder * new_halted
step_out = step_fn(hidden, *args, timestep)
timestep += 1
state_update_weight = alive_mask.unsqueeze(-1)
hidden = state_update_weight * step_out + (1 - state_update_weight) * hidden
return hidden
| true
|
c879c4be55fa646b6f909bb49e0cae2873efde91
|
Python
|
KristallWang/LMS8001-PLL-Sim
|
/src/vco.py
|
UTF-8
| 14,152
| 2.6875
| 3
|
[
"Apache-2.0",
"CC-BY-3.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
from header import *
from plot_funcs import searchF
import pandas as pnd
class vco(object):
"""Represents ideal VCO. F is frequency of oscillation in Hz, and KVCO is tuning sensistivity in Hz/V"""
def __init__(self, name='VCO', F=1.0e9, KVCO=20.0e6, fc=200.0e3, (pn,fpn)=(-121, 1.0e6), noise_floor=-165.0):
self.name=name
if (F<=0):
print "VCO oscillation frequency should be positive real number."
print "VCO object not created. Returning None."
return None
else:
self.F=F
self.KVCO=KVCO
if (fc>0 and fpn>0):
self.fc=fc
self.pn=pn
self.fpn=fpn
self.noise_floor=noise_floor
else:
print "Corner frequency and phase noise frequency should be positive real number."
print "VCO object not created. Returning None."
return None
def setNoise(self, fc, (pn, fpn)=(-121, 1.0e6), noise_floor=-165.0):
if (fc>0 and fpn>0):
self.fc=fc
self.pn=pn
self.fpn
else:
print "Corner frequnecy and phase noise frequency should be positive real number."
print "VCO Noise parameters not changed."
def __str__(self):
return ("VCO class instance\n\tInst.Name= %s\n\tF=%.2f GHZ\n\tKVCO=%.2f MHz/V\n\tfc=%.2e Hz\n\tPhase Noise-Freq Pair=(%.2f dBc/Hz, %.2eHz)\n\tNoise Floor=%.2f dBc/Hz" %(self.name, self.F/1.0e9, self.KVCO/1.0e6, self.fc, self.pn, self.fpn, self.noise_floor))
def tfunc(self,f):
j=np.complex(0,1)
w=2*math.pi*f
s=j*w
Kvco=2*math.pi*self.KVCO
return Kvco/(j*w)
def setName(self, name):
self.name=name
def setF(self,F):
if (F<0):
print "VCO oscillation frequency should be positive real number."
print "VCO oscillation frequency will not be changed."
else:
self.F=F
def setKVCO(self,KVCO):
self.KVCO=KVCO
def getF(self):
return self.F
def getKVCO(self):
return self.KVCO
def calc_pnoise_pwl(self, f):
pn_vals=[]
fc=self.fc
pn=self.pn
fpn=self.fpn
ind_fst=searchF(fpn, f[0], 'greater-equal', 'first')
ind_lst=searchF(fpn, f[len(f)-1], 'lower-equal', 'last')
if (ind_lst<ind_fst):
print "Error: VCO Calculate Phase Noise Method, PieceWise Approx."
print "Unordered list of offset frequencies. Returning None."
return None
if (ind_fst>0 and ind_fst<len(fpn)):
slope_fst=(pn[ind_fst]-pn[ind_fst-1])/log10(fpn[ind_fst]/fpn[ind_fst-1])
pn_fst=pn[ind_fst-1]+slope_fst*log10(f[0]/fpn[ind_fst-1])
else:
if (fpn[ind_fst]<fc):
slope_fst=-30.0
else:
slope_fst=-20.0
if (ind_fst==0):
pn_fst=pn[0]-slope_fst*log10(fpn[0]/f[0])
else:
pn_fst=pn[len(fpn)-1]+slope_fst*log10(f[0]/fpn[len(fpn)-1])
if (ind_lst>0 and ind_lst<len(fpn)-1):
slope_lst=(pn[ind_lst]-pn[ind_lst-1])/log10(fpn[ind_lst]/fpn[ind_lst-1])
#if (fpn[ind_lst]<fc):
# slope_lst=max(-30.0, slope_lst)
#else:
# slope_lst=max(-20.0, slope_lst)
else:
if (fpn[ind_lst]<fc):
slope_lst=-30.0
else:
slope_lst=-20.0
fpn_ind=ind_fst
pn_sec=pn_fst
f_ind_sec=0
slope=slope_fst
f_ind=0
while (f_ind<len(f)):
if (f[f_ind]>fpn[fpn_ind]):
pn_sec=pn_last
f_ind_sec=f_ind-1
if (fpn_ind==ind_lst):
slope=slope_lst
else:
fpn_ind+=1
slope=(pn[fpn_ind]-pn[fpn_ind-1])/log10(fpn[fpn_ind]/fpn[fpn_ind-1])
pn_last=pn_sec+slope*log10(f[f_ind]/f[f_ind_sec])
pn_vals.append(pn_last)
f_ind+=1
pn_vco=np.array(pn_vals)
pn_vco=np.power(10.0, pn_vco/10.0)+np.power(10.0, self.noise_floor/10.0)
pn_vco=10.0*np.log10(pn_vco)
return pn_vco
def calc_pnoise_1p(self, f):
"""fc - VCO pnoise corner frequency, where slope changes from -30 dB/dec(upconv. flicker noise) to -20 dB/dec (upconv. thermal noise), (pn, fpn) - tuple which defines one (pnoise[dBc/Hz], foffset[Hz]) known pair."""
fc=self.fc
pn=self.pn
fpn=self.fpn
if (fpn<fc):
slope=-30
else:
slope=-20
pn_fc=pn-slope*log10(fpn/fc)
pn_vals=[]
for fval in f:
if (fval<fc):
slope=-30
else:
slope=-20
pn_vals.append(pn_fc+slope*log10(fval/fc))
pn_vco=np.array(pn_vals)
pn_vco=np.power(10.0, pn_vco/10.0)+np.power(10.0, self.noise_floor/10.0)
pn_vco=10.0*np.log10(pn_vco)
return pn_vco
def calc_pnoise(self,f):
if (type(self.fpn) is list) and (type(self.pn) is list):
return self.calc_pnoise_pwl(f)
else:
return self.calc_pnoise_1p(f)
class lms8001_vco(vco):
def __init__(self, name='VCO_LMS8001IC', EM_MODEL=False, MEAS_FREQ=True, SEL=3, FREQ=128, VTUNE=0.6, fc=200.0e3, (pn,fpn)=(-121, 1.0e6), noise_floor=-165.0):
self.name=name
SEL=lms8001_vco.checkCTRL(SEL, 'SEL', 0, 3)
FREQ=lms8001_vco.checkCTRL(FREQ, 'FREQ', 0, 255)
VTUNE=lms8001_vco.checkVTUNE(VTUNE, 'VTUNE', LMS8001_VTUNE_MIN, LMS8001_VTUNE_MAX)
self.SEL=SEL
self.FREQ=FREQ
self.VTUNE=VTUNE
script_dir=os.path.dirname(__file__)
if not(EM_MODEL):
VCOL_FILEPATH=os.path.join(script_dir, 'Data/VCOL_RCEXT.csv')
VCOM_FILEPATH=os.path.join(script_dir, 'Data/VCOM_RCEXT.csv')
VCOH_FILEPATH=os.path.join(script_dir, 'Data/VCOH_RCEXT.csv')
else:
if (MEAS_FREQ):
VCOL_FILEPATH=os.path.join(script_dir, 'Data/VCOL_MEAS.csv')
VCOM_FILEPATH=os.path.join(script_dir, 'Data/VCOM_MEAS.csv')
VCOH_FILEPATH=os.path.join(script_dir, 'Data/VCOH_MEAS.csv')
else:
VCOL_FILEPATH=os.path.join(script_dir, 'Data/VCOL_RLCKEXT.csv')
VCOM_FILEPATH=os.path.join(script_dir, 'Data/VCOM_RLCKEXT.csv')
VCOH_FILEPATH=os.path.join(script_dir, 'Data/VCOH_RLCKEXT.csv')
self.VCOL_DATA=(pnd.read_csv(VCOL_FILEPATH)).values
self.VCOM_DATA=(pnd.read_csv(VCOM_FILEPATH)).values
self.VCOH_DATA=(pnd.read_csv(VCOH_FILEPATH)).values
self.setF(SEL, FREQ, VTUNE)
if ((type(fc) is float) and (type(fpn) is float)) and (fc>0 and fpn>0):
self.fc=fc
self.pn=pn
self.fpn=fpn
elif (type(fc) is float) and ((type(fpn) is list) and (type(pn) is list)):
print "Warning: List of offset frequencies should contain only positive real numbers."
self.fc=fc
self.pn=pn
self.fpn=fpn
else:
print "Offset frequency and phase noise frequency should be positive real number(s) (scalar or list)."
print "If a list of offset frequencies and corresponding phase noise values is used, they should have the same length."
print "VCO object not created. Returning None."
return None
self.noise_floor=noise_floor
@staticmethod
def checkVTUNE(val, valname, val_min, val_max):
if (val<val_min):
print "Warning: VCO %s cannot go lower then %d. Minimum value will be used." % (valname, val_min)
val=val_min
elif (val>val_max):
print "Warning: VCO %s cannot go higher then %d. Maximum value will be used." % (valname, val_max)
val=val_max
return val
@staticmethod
def checkCTRL(val, valname, val_min, val_max):
val=floor(val)
if (val<val_min):
print "Warning: VCO digital control %s is considered as %d bit positive integer. Minimum value is %d. This value will be used." % (valname, math.log10(val_max-val_min+1)/math.log10(2.0), val_min)
val=val_min
elif (val>val_max):
print "Warning: VCO digital control %s is considered as %d bit positive integer. Maximum value is %d. This value will be used." % (valname, math.log10(val_max-val_min+1)/math.log10(2.0), val_max)
val=val_max
return val
def setNoise(self, fc, (pn, fpn), noise_floor=-165.0):
if ((type(fc) is float) and (type(fpn) is float)) and (fc>0 and fpn>0):
self.fc=fc
self.pn=pn
self.fpn=fpn
elif (type(fc) is float) and ((type(fpn) is list) and (type(pn) is list)) and (len(pn)==len(fpn)) and (len(pn)==len(fpn)):
print "Warning: List of offset frequencies should contain only positive real numbers."
self.fc=fc
self.pn=pn
self.fpn=fpn
else:
print "Offset frequency and phase noise frequency should be positive real number(s) (scalar or list)."
print "If a list of offset frequencies and corresponding phase noise values is used, they should have the same length."
print "VCO object not created. Returning None."
return None
self.noise_floor=noise_floor
def getVars(self, SEL,FREQ):
if (SEL==0):
self.F=0
print "Warning: External LO Mode Configured. SEL of %s set to 0." % (self.name)
return (0.0, 0.0, 0.0, 0.0, 0.0)
else:
if (SEL==1):
VCO_DATA=self.VCOL_DATA
elif (SEL==2):
VCO_DATA=self.VCOM_DATA
else:
VCO_DATA=self.VCOH_DATA
F0=VCO_DATA[FREQ,1]
P3=VCO_DATA[FREQ,2]
P2=VCO_DATA[FREQ,3]
P1=VCO_DATA[FREQ,4]
P0=VCO_DATA[FREQ,5]
return (F0,P3,P2,P1,P0)
def calcF(self,SEL,FREQ,VTUNE):
(F0,P3,P2,P1,P0)=self.getVars(SEL,FREQ)
return F0*(P3*(VTUNE**3)+P2*(VTUNE**2)+P1*VTUNE+P0)
def calcKVCO(self, SEL, FREQ, VTUNE):
(F0,P3,P2,P1,P0)=self.getVars(SEL,FREQ)
return F0*(3*P3*(VTUNE**2)+2*P2*VTUNE+P1)
def setF(self, SEL, FREQ, VTUNE):
SEL=lms8001_vco.checkCTRL(SEL, 'SEL', 0, 3)
FREQ=lms8001_vco.checkCTRL(FREQ, 'FREQ', 0, 255)
VTUNE=lms8001_vco.checkVTUNE(VTUNE, 'VTUNE', 0.0, 1.2)
self.SEL=SEL
self.FREQ=FREQ
self.VTUNE=VTUNE
self.F=self.calcF(SEL,FREQ,VTUNE)
self.KVCO=self.calcKVCO(SEL,FREQ,VTUNE)
def setKVCO(self, KVCO=0):
print "Warning: %s Tuning Sensitivity is automatically calculated when defining (SEL,FREQ,VTUNE) parameters." % (self.name)
print "Warning: KVCO attribute of %s will not be changed." % (self.name)
def AUTO_CAL(self, F_TARGET, VTUNE_FIX=0.6, VTUNE_STEP=0.01, SEL_INIT=0):
"""This method is used to find optimal VCO digital configuration (SEL and FREQ) and to estimate approx. VTUNE value for targeted VCO frequency of F_TARGET"""
DF_BEST=100.0e9
if (SEL_INIT==0):
SEL_OPT=0
SEL_LIST=range(1,4)
else:
SEL_INIT=lms8001_vco.checkCTRL(SEL_INIT, 'VCO_AUTO_CAL:SEL_INIT', 1, 3)
SEL_OPT=SEL_INIT
SEL_LIST=range(int(SEL_OPT),int(SEL_OPT)+1)
FREQ_OPT=0
VTUNE_OPT=VTUNE_FIX
VTUNE_N=int(floor((LMS8001_VTUNE_MAX-LMS8001_VTUNE_MIN)*1.0/VTUNE_STEP))
for SEL in SEL_LIST:
for FREQ in range(0,256):
F_ESTIM=self.calcF(SEL,FREQ,VTUNE_FIX)
DF=abs(F_ESTIM-F_TARGET)
if (DF<DF_BEST):
DF_BEST=DF
SEL_OPT=SEL
FREQ_OPT=FREQ
DF_BEST=100.0e9
if (SEL_OPT>0):
for i in range(0,VTUNE_N):
VTUNE_VAL=LMS8001_VTUNE_MIN+i*VTUNE_STEP
F_ESTIM=self.calcF(SEL_OPT,FREQ_OPT,VTUNE_VAL)
DF=abs(F_ESTIM-F_TARGET)
if (DF<DF_BEST):
DF_BEST=DF
VTUNE_OPT=VTUNE_VAL
VCO_CONFIG={'SEL':SEL_OPT, 'FREQ':FREQ_OPT, 'VTUNE':VTUNE_OPT}
return VCO_CONFIG
def get_TCURVE(self, SEL, FREQ, VTUNE_STEP=0.05):
SEL=lms8001_vco.checkCTRL(SEL, 'SEL', 0, 3)
FREQ=lms8001_vco.checkCTRL(FREQ, 'FREQ', 0, 255)
VTUNE_N=int(floor((LMS8001_VTUNE_MAX-LMS8001_VTUNE_MIN)*1.0/VTUNE_STEP))
VTUNE_SWEEP=[]
F_SWEEP=[]
for i in range(0,VTUNE_N):
VTUNE_VAL=LMS8001_VTUNE_MIN+i*VTUNE_STEP
F_ESTIM=self.calcF(SEL,FREQ,VTUNE_VAL)
VTUNE_SWEEP.append(VTUNE_VAL)
F_SWEEP.append(F_ESTIM)
return (np.array(VTUNE_SWEEP),np.array(F_SWEEP))
def CTUNE_SEL(self, F_TARGET, SEL_FORCE=False, SEL_INIT=2, FREQ_MIN=5, FREQ_MAX=250, VTUNE_FIX=0.6):
if (SEL_FORCE):
return SEL_INIT
else:
if (self.calcF(2, FREQ_MIN, VTUNE_FIX)>F_TARGET):
return 1
elif (self.calcF(2, FREQ_MAX, VTUNE_FIX)<F_TARGET):
return 3
else:
return 2
def CTUNE_FREQ(self, F_TARGET, SEL, FREQ_FORCE=0, FREQ_INIT=128, FREQ_INIT_POS=7, VTUNE_FIX=0.6, VTUNE_STEP=0.01):
SEL=lms8001_vco.checkCTRL(SEL, 'SEL', 1, 3)
FREQ_INIT=lms8001_vco.checkCTRL(FREQ_INIT, 'FREQ_INIT', 0, 255)
FREQ_INIT_POS=lms8001_vco.checkCTRL(FREQ_INIT_POS, 'FREQ_INIT_POS', 0, 7)
VTUNE_FIX=lms8001_vco.checkVTUNE(VTUNE_FIX, 'VTUNE_FIX', LMS8001_VTUNE_MIN, LMS8001_VTUNE_MAX)
VTUNE_N=int(floor((LMS8001_VTUNE_MAX-LMS8001_VTUNE_MIN)*1.0/VTUNE_STEP))
if not (FREQ_FORCE):
FREQ_INIT_POS=7
FREQ_POS=FREQ_INIT_POS
FREQ_CURRENT=int(pow(2,FREQ_INIT_POS))
else:
FREQ_POS=FREQ_INIT_POS
i=7
FREQ_MASK=0
while (i>=FREQ_POS):
FREQ_MASK+=int(pow(2,i))
FREQ_CURRENT=FREQ_INIT&FREQ_MASK
while (FREQ_POS>=0):
F_VAL=self.calcF(SEL, FREQ_CURRENT, VTUNE_FIX)
FREQ_HIGH=(F_VAL>F_TARGET)
#print 'FREQ_CURRENT= %d , FREQ_HIGH= %d' % (FREQ_CURRENT, FREQ_HIGH)
FREQ_EQUAL=(F_VAL==F_TARGET)
FREQ_LOW=(F_VAL<F_TARGET)
FREQ_MASK=0
i=7
while (i>FREQ_POS):
FREQ_MASK+=int(pow(2,i))
i=i-1
if not (FREQ_HIGH):
FREQ_MASK+=int(pow(2,FREQ_POS))
FREQ_CURRENT=FREQ_CURRENT&FREQ_MASK
FREQ_POS-=1
if (FREQ_POS>=0):
FREQ_CURRENT+=int(pow(2,FREQ_POS))
FREQ_OPT=FREQ_CURRENT
DF_BEST=100.0e9
for i in range(0,VTUNE_N):
VTUNE_VAL=LMS8001_VTUNE_MIN+i*VTUNE_STEP
F_ESTIM=self.calcF(SEL,FREQ_OPT,VTUNE_VAL)
DF=abs(F_ESTIM-F_TARGET)
if (DF<DF_BEST):
DF_BEST=DF
VTUNE_OPT=VTUNE_VAL
return (FREQ_OPT, VTUNE_OPT)
def CTUNE(self, F_TARGET, SEL_FORCE=False, SEL_INIT=2, FREQ_FORCE=False, FREQ_INIT=128, FREQ_INIT_POS=7, FREQ_MIN=5, FREQ_MAX=250, VTUNE_FIX=0.6, VTUNE_STEP=0.01):
SEL_OPT=self.CTUNE_SEL(F_TARGET, SEL_FORCE, SEL_INIT, FREQ_MIN, FREQ_MAX, VTUNE_FIX)
(FREQ_OPT, VTUNE_OPT)=self.CTUNE_FREQ(F_TARGET, SEL_OPT, FREQ_FORCE, FREQ_INIT, FREQ_INIT_POS, VTUNE_FIX, VTUNE_STEP)
VCO_CONFIG={'SEL':SEL_OPT, 'FREQ':FREQ_OPT, 'VTUNE':VTUNE_OPT}
return VCO_CONFIG
def __str__(self):
pn_str='['
if ((type(self.fpn) is list) and (type(self.pn) is list)):
for i in range(0,len(self.fpn)-1):
pn_str+='(%.2f dBc/Hz, %.2eHz), ' % (self.pn[i], self.fpn[i])
pn_str+='(%.2f dBc/Hz, %.2eHz)' % (self.pn[len(self.fpn)-1], self.fpn[len(self.fpn)-1])
else:
pn_str='(%.2f dBc/Hz, %.2eHz)]' % (self.pn, self.fpn)
#return ("VCO class instance\n\tInst.Name= %s\n\tF=%.4f GHZ (SEL= %d, FREQ= %d, VTUNE= %.2f V)\n\tKVCO=%.2f MHz/V\n\tfc=%.2e Hz\n\tPhase Noise-Freq Pair=(%.2f dBc/Hz, %.2eHz)" %(self.name, self.F/1.0e9, self.SEL, self.FREQ, self.VTUNE, self.KVCO/1.0e6, self.fc, self.pn, self.fpn))
return ("VCO class instance\n\tInst.Name= %s\n\tF=%.4f GHZ (SEL= %d, FREQ= %d, VTUNE= %.3f V)\n\tKVCO=%.2f MHz/V\n\tfc=%.2e Hz\n\tPhase Noise vs. Freq=%s\n\tNoise Floor=%.2f dBc/Hz" %(self.name, self.F/1.0e9, self.SEL, self.FREQ, self.VTUNE, self.KVCO/1.0e6, self.fc, pn_str, self.noise_floor))
| true
|
0a320278c301f722dfbc51a8ef33108428545692
|
Python
|
FlaviaGarcia/Content-Based-Artist-Classifier-using-CNN
|
/01-Development_code/CV_CNN1_.py
|
UTF-8
| 11,342
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 19 18:54:54 2018
@author: flaviagv
In this script, the training, validation, and test accuracy of a CNN with different inputs has been made. This was in order to compare
what input performs better with this CNN. This CNN is composed by the following layers:
2D CONV(128 filters, 3x3) + MAX_POOLING(2x2) + 2D CONV(256 filters, 3x3) + MAX_POOLING + FLATTEN + DENSE(64) + DROPOUT(0.2) + DENSE(num_classes = 50)
"""
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import accuracy_score #confusion_matrix,
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
from keras.callbacks import EarlyStopping
import tensorflow as tf
import keras
from six.moves import cPickle as pickle
#from sklearn.metrics import roc_auc_score
#from sklearn.metrics import f1_score
#
import matplotlib.pyplot as plt
#import random
#import datetime
def LoadDatasets(pickle_file = "dataset.pickle"):
f = open(pickle_file, "rb")
Datasets = pickle.load(f)
return Datasets
def createConvNN(input_shape, num_classes):
model = Sequential()
# conv: 128 filtros
model.add(Conv2D(filters = 128,
kernel_size = 3,
activation = "relu",
use_bias = True,
strides = 1,
padding = "same",
input_shape = input_shape))
# max pool 2x2
model.add(MaxPooling2D(pool_size = 2, strides = None, padding = "same"))
# conv
# 256 filtros
model.add(Conv2D(filters = 256,
kernel_size = 3,
activation = "relu",
use_bias = True,
strides = 1,
padding = "same"))
# kernel_initializer='glorot_uniform',
# bias_initializer='zeros'
# padding='valid'
# max pool 2x2
model.add(MaxPooling2D(pool_size = 2,
strides = None,
padding = "same"))
# flatten -> poner todo en un vector (sera de 256x32x32)
model.add(Flatten())
# fully connected 64
model.add(Dense(64, activation = 'relu'))
model.add(Dropout(0.2)) # we randomnly disable 20% of the neurons
# softmax de 50 outputs
model.add(Dense(num_classes, activation='softmax'))
# Adam Optimizer, Batch size = 64, n_epochs -> early stopping
model.compile(optimizer = Adam(), #lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08, decay = 0.0)
loss = categorical_crossentropy, #"mse",
metrics = ["accuracy"])
return model
def plot_model_history(history, iteracion, tituloIm):
plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train'], loc='upper left') # , 'test'
plt.savefig(tituloIm + "_IT_" + str(iteracion )+ '.jpg')
#plt.show()
# summarize history for loss
# plt.plot(history.history['loss'])
# # plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
def cross_validation(X, y, tituloIm):
"""
Hacemos tanto las 4 rounds como el train y la evaluation
El 75% del train será usado para el tren de CV y el 15% para la validacion
"""
# X sigue siendo una lista
# iteracion es un numero que tiene que estar en range(5), sino lanzar una excepcion
# quedarnos con 4/5(80%) para el train y 1/5 para el test(20%)
# En cada iteracion coger un trozo diferente del test, y que el resto sean train
acc_train_list = []
acc_val_list = []
num_classes = 20
num_elems = len(y)//20 # Coger 3
sumAcc = 0
# Si hay restantes, se lo queda el train
# primer bloque -> [0:num_elems]
# segundo bloque -> [num_elems*1:num_elems*2]
# tercer bloque -> [num_elems*2:num_elems*3]
# cuarto bloque -> [num_elems*3:num_elems*4]
# quinto bloque -> [num_elems*4:num_elems*5]
# el numero del bloque del test te lo va a dar la iteracion
np.random.seed(133)
np.random.shuffle(X)
np.random.shuffle(y)
fin = 0
#np.random.permutation(len(y)).shape
#iteracion = 0
for iteracion in range(6):
X_val = X[fin: 3*num_elems*(iteracion+1)]
y_val = y[fin : 3*num_elems*(iteracion+1)]
X_train = X[0:fin] + X[3*num_elems*(iteracion+1):len(X)]
y_train = y[0:fin] + y[3*num_elems*(iteracion+1):len(y)]
print("----- iteracion %d -----"%iteracion)
print(" X_val --> %f"%(len(X_val)*100/len(X)))
print(" y_val --> %f"%(len(y_val)*100/len(X)))
print(" X_train --> %f"%(len(X_train)*100/len(X)))
print(" y_train --> %f\n"%(len(y_train)*100/len(X)))
fin = 3*num_elems*(iteracion+1)
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
X_val = np.asarray(X_val)
y_val = np.asarray(y_val)
### HACEMOS EL ENTRENAMIENTO DE LA RED
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)
X_val = X_val.reshape(X_val.shape[0], X_val.shape[1], X_val.shape[2], 1)
y_train = keras.utils.to_categorical(y_train, num_classes)
# y_val = keras.utils.to_categorical(y_val, num_classes)
input_shape = X_train.shape[1:]
model = createConvNN(input_shape, num_classes)
# define early stopping callback
# earlystop = EarlyStopping(monitor = 'acc',
# min_delta = 0.0001,
# patience = 5,
# verbose = 1,
# mode = 'auto')
# callbacks_list = [earlystop]
keras.backend.get_session().run(tf.global_variables_initializer())
model_info = model.fit(X_train,
y_train,
# validation_split=0.33
verbose = 0,
batch_size = 16,
epochs = 100)#100, #100
# callbacks = callbacks_list) # No estoy poniendo validation split
plot_model_history(model_info, iteracion, tituloIm) # Te indica en que epoch se ha parado debido a early stopping y te enseñan dos gráficas:
# - Model accuracy / epoch
# - Model loss / epoch
#score = model.evaluate(X_test, y_test, verbose = 0)
y_pred_probs = model.predict(X_val)
y_pred = np.argmax(y_pred_probs, axis = 1)
#y_pred = model.predict_classes(X_test)
#print('Test loss:', score[0])
print("Getting validation ACC ...")
acc_val = accuracy_score(y_val, y_pred)
print("Getting train ACC ...")
acc_train = model.evaluate(X_train, y_train, verbose = 0)[1]
print("Validation ACC iteration " + str(iteracion) +": " + str(acc_val))
print("Train ACC: iteration " + str(iteracion) + ": " + str(acc_train))
acc_val_list.append(acc_val)
acc_train_list.append(acc_train)
sumAcc += acc_val
# roc_auc_score(y_val, y_pred)
# sumAcc += accuracy_score(np.argmax(y_val, axis = 1), y_pred) #score[1])
# print("CONFUSION MATRIX: \n", confusion_matrix(np.argmax(y_test, axis = 1), y_pred))
k_fold = range(6)
plt.plot(k_fold, acc_train_list)
plt.plot(k_fold, acc_val_list, "g-")
#plt.plot(eje_x, f1_10_list_, "r-")
#plt.plot(eje_x, f1_20_list_, "y-")
plt.legend(["Train accuracy", "Test accuracy"])#, "Top 20 list"])
plt.title("Accurary in each cross validation fold")
plt.xlabel("Number of k-fold")
plt.ylabel("Accuracy")
# dt = datetime.datetime.now().strftime("%m_%d_%H_%M_%S")
plt.savefig(tituloIm + '.jpg')
#plt.show()
return (sumAcc/6)# average AUC values
# y_pred_ran = [random.randint(0, 19) for test in y_test]
# acc_val = accuracy_score(y_test, y_pred_ran)
# ##### RANDOM DE MFCC DE LOS H5
# Datasets = LoadDatasets("../02-Data_out/Dataset_MFCC_h5.pickle")
# X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
# y_pred_ran = [random.randint(0, 19) for test in y_test]
# accuracy_score(y_test, y_pred_ran)
if __name__ == "__main__":
### ITERAR PARA CADA MFCC QUE HE SACADO
Datasets = LoadDatasets("../02-Data_out/Xy_7_MFCC1.pickle")
X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
acc_7_MFCC1 = cross_validation(X_train, y_train, "CNN1_7_MFCC1")
print("ACC 7 SECONDS MFCC1 -->" + str(acc_7_MFCC1))
Datasets = LoadDatasets("../02-Data_out/Xy_7_MFCC2.pickle")
X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
acc_7_MFCC2 = cross_validation(X_train, y_train, "CNN1_7_MFCC2")
print("ACC 7 SECONDS MFCC2 -->" + str(acc_7_MFCC2))
Datasets = LoadDatasets("../02-Data_out/Xy_14_MFCC1.pickle")
X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
acc_14_MFCC1 = cross_validation(X_train, y_train, "CNN1_14_MFCC1")
print("ACC 14 SECONDS MFCC1 -->" + str(acc_14_MFCC1))
Datasets = LoadDatasets("../02-Data_out/Xy_14_MFCC2.pickle")
X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
acc_14_MFCC2 = cross_validation(X_train, y_train, "CNN1_14_MFCC2")
print("ACC 14 SECONDS MFCC2 -->" + str(acc_14_MFCC2))
Datasets = LoadDatasets("../02-Data_out/Dataset_MFCC_h5_24-48_segs.pickle")
X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
acc_24_48_MFCC_h5 = cross_validation(X_train, y_train, "CNN1_24-48_MFCC_h5")
print("ACC 24-48 SECONDS MFCC H5-->" + str(acc_24_48_MFCC_h5))
Datasets = LoadDatasets("../02-Data_out/Dataset_MFCC_h5_12-24_segs.pickle")
X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
acc_12_24_MFCC_h5 = cross_validation(X_train, y_train, "CNN1_12-24_MFCC_h5")
print("ACC 12-24 SECONDS MFCC H5-->" + str(acc_12_24_MFCC_h5))
Datasets = LoadDatasets("../02-Data_out/ataset_MFCC_h5_6-12_segs.pickleD")
X_train, X_test, y_train, y_test = train_test_split(Datasets["X"], Datasets["y"], test_size = 0.1, random_state=42)
acc_6_12_MFCC_h5 = cross_validation(X_train, y_train, "CNN1_6-12_MFCC_h5")
print("ACC 6-12 SECONDS MFCC H5-->" + str(acc_6_12_MFCC_h5))
# model.save(model_path)
| true
|
786893c4c73aa1b6c103db42df28d7cc07a3dbb5
|
Python
|
qubvel/ttach
|
/ttach/transforms.py
|
UTF-8
| 7,977
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
from functools import partial
from typing import Optional, List, Union, Tuple
from . import functional as F
from .base import DualTransform, ImageOnlyTransform
class HorizontalFlip(DualTransform):
"""Flip images horizontally (left->right)"""
identity_param = False
def __init__(self):
super().__init__("apply", [False, True])
def apply_aug_image(self, image, apply=False, **kwargs):
if apply:
image = F.hflip(image)
return image
def apply_deaug_mask(self, mask, apply=False, **kwargs):
if apply:
mask = F.hflip(mask)
return mask
def apply_deaug_label(self, label, apply=False, **kwargs):
return label
def apply_deaug_keypoints(self, keypoints, apply=False, **kwargs):
if apply:
keypoints = F.keypoints_hflip(keypoints)
return keypoints
class VerticalFlip(DualTransform):
"""Flip images vertically (up->down)"""
identity_param = False
def __init__(self):
super().__init__("apply", [False, True])
def apply_aug_image(self, image, apply=False, **kwargs):
if apply:
image = F.vflip(image)
return image
def apply_deaug_mask(self, mask, apply=False, **kwargs):
if apply:
mask = F.vflip(mask)
return mask
def apply_deaug_label(self, label, apply=False, **kwargs):
return label
def apply_deaug_keypoints(self, keypoints, apply=False, **kwargs):
if apply:
keypoints = F.keypoints_vflip(keypoints)
return keypoints
class Rotate90(DualTransform):
"""Rotate images 0/90/180/270 degrees
Args:
angles (list): angles to rotate images
"""
identity_param = 0
def __init__(self, angles: List[int]):
if self.identity_param not in angles:
angles = [self.identity_param] + list(angles)
super().__init__("angle", angles)
def apply_aug_image(self, image, angle=0, **kwargs):
k = angle // 90 if angle >= 0 else (angle + 360) // 90
return F.rot90(image, k)
def apply_deaug_mask(self, mask, angle=0, **kwargs):
return self.apply_aug_image(mask, -angle)
def apply_deaug_label(self, label, angle=0, **kwargs):
return label
def apply_deaug_keypoints(self, keypoints, angle=0, **kwargs):
angle *= -1
k = angle // 90 if angle >= 0 else (angle + 360) // 90
return F.keypoints_rot90(keypoints, k=k)
class Scale(DualTransform):
"""Scale images
Args:
scales (List[Union[int, float]]): scale factors for spatial image dimensions
interpolation (str): one of "nearest"/"lenear" (see more in torch.nn.interpolate)
align_corners (bool): see more in torch.nn.interpolate
"""
identity_param = 1
def __init__(
self,
scales: List[Union[int, float]],
interpolation: str = "nearest",
align_corners: Optional[bool] = None,
):
if self.identity_param not in scales:
scales = [self.identity_param] + list(scales)
self.interpolation = interpolation
self.align_corners = align_corners
super().__init__("scale", scales)
def apply_aug_image(self, image, scale=1, **kwargs):
if scale != self.identity_param:
image = F.scale(
image,
scale,
interpolation=self.interpolation,
align_corners=self.align_corners,
)
return image
def apply_deaug_mask(self, mask, scale=1, **kwargs):
if scale != self.identity_param:
mask = F.scale(
mask,
1 / scale,
interpolation=self.interpolation,
align_corners=self.align_corners,
)
return mask
def apply_deaug_label(self, label, scale=1, **kwargs):
return label
def apply_deaug_keypoints(self, keypoints, scale=1, **kwargs):
return keypoints
class Resize(DualTransform):
"""Resize images
Args:
sizes (List[Tuple[int, int]): scale factors for spatial image dimensions
original_size Tuple(int, int): optional, image original size for deaugmenting mask
interpolation (str): one of "nearest"/"lenear" (see more in torch.nn.interpolate)
align_corners (bool): see more in torch.nn.interpolate
"""
def __init__(
self,
sizes: List[Tuple[int, int]],
original_size: Tuple[int, int] = None,
interpolation: str = "nearest",
align_corners: Optional[bool] = None,
):
if original_size is not None and original_size not in sizes:
sizes = [original_size] + list(sizes)
self.interpolation = interpolation
self.align_corners = align_corners
self.original_size = original_size
super().__init__("size", sizes)
def apply_aug_image(self, image, size, **kwargs):
if size != self.original_size:
image = F.resize(
image,
size,
interpolation=self.interpolation,
align_corners=self.align_corners,
)
return image
def apply_deaug_mask(self, mask, size, **kwargs):
if self.original_size is None:
raise ValueError(
"Provide original image size to make mask backward transformation"
)
if size != self.original_size:
mask = F.resize(
mask,
self.original_size,
interpolation=self.interpolation,
align_corners=self.align_corners,
)
return mask
def apply_deaug_label(self, label, size=1, **kwargs):
return label
def apply_deaug_keypoints(self, keypoints, size=1, **kwargs):
return keypoints
class Add(ImageOnlyTransform):
"""Add value to images
Args:
values (List[float]): values to add to each pixel
"""
identity_param = 0
def __init__(self, values: List[float]):
if self.identity_param not in values:
values = [self.identity_param] + list(values)
super().__init__("value", values)
def apply_aug_image(self, image, value=0, **kwargs):
if value != self.identity_param:
image = F.add(image, value)
return image
class Multiply(ImageOnlyTransform):
"""Multiply images by factor
Args:
factors (List[float]): factor to multiply each pixel by
"""
identity_param = 1
def __init__(self, factors: List[float]):
if self.identity_param not in factors:
factors = [self.identity_param] + list(factors)
super().__init__("factor", factors)
def apply_aug_image(self, image, factor=1, **kwargs):
if factor != self.identity_param:
image = F.multiply(image, factor)
return image
class FiveCrops(ImageOnlyTransform):
"""Makes 4 crops for each corner + center crop
Args:
crop_height (int): crop height in pixels
crop_width (int): crop width in pixels
"""
def __init__(self, crop_height, crop_width):
crop_functions = (
partial(F.crop_lt, crop_h=crop_height, crop_w=crop_width),
partial(F.crop_lb, crop_h=crop_height, crop_w=crop_width),
partial(F.crop_rb, crop_h=crop_height, crop_w=crop_width),
partial(F.crop_rt, crop_h=crop_height, crop_w=crop_width),
partial(F.center_crop, crop_h=crop_height, crop_w=crop_width),
)
super().__init__("crop_fn", crop_functions)
def apply_aug_image(self, image, crop_fn=None, **kwargs):
return crop_fn(image)
def apply_deaug_mask(self, mask, **kwargs):
raise ValueError("`FiveCrop` augmentation is not suitable for mask!")
def apply_deaug_keypoints(self, keypoints, **kwargs):
raise ValueError("`FiveCrop` augmentation is not suitable for keypoints!")
| true
|
47877a9c168037a3c44e1a70a4af0abee53a1a62
|
Python
|
jiaoyasen/interview-witn-X.Wang
|
/data_generator.py
|
UTF-8
| 163
| 2.625
| 3
|
[] |
no_license
|
import random
import collections
def data_generator(n):#生成随机数list,list长度为n
data= [random.randint(-100,101) for j in range(n)]
return data
| true
|
48703e2c0e492df310cfd9d8ac79c49d7a2ce511
|
Python
|
Success2014/Leetcode
|
/multipleStrings_2.py
|
UTF-8
| 741
| 3.53125
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 06 16:26:14 2015
@author: Neo
"""
class Solution:
# @param {string} num1
# @param {string} num2
# @return {string}
def multiply(self, num1, num2):
num1 = num1[::-1]
num2 = num2[::-1]
prodt = [0 for i in range(len(num1)+len(num2))]
for i in xrange(len(num1)):
for j in xrange(len(num2)):
prodt[i+j] += int(num1[i]) * int(num2[j])
res = 0
t = 1
for i in xrange(len(prodt)):
res += prodt[i]*t#prodt[i]最大81,乘以t不会溢出,但是没有之前的算法快
t *= 10
return str(res)
sol = Solution()
print sol.multiply("25","25")
| true
|
7351306f117daf1a8ddef75b5ce9007be56829e4
|
Python
|
yochaiz/SmartHome
|
/Visualization/Microphone.py
|
UTF-8
| 1,669
| 2.859375
| 3
|
[] |
no_license
|
from Device import Device
from datetime import datetime
from Plot import Plot
class Microphone(Device):
keys = {'RmsVolume': 'go', 'MaxVolume': 'bo'}
nullValue = "null"
def __init__(self, filename):
super(Microphone, self).__init__(filename)
def collectData(self, startDate, lambdaFunc):
x = []
xByClass = {}
yByClass = {}
for key in self.keys.iterkeys():
xByClass[key] = []
yByClass[key] = []
i = self._Device__skipToDate(startDate)
while i < len(self.root):
child = self.root[i]
date = datetime.strptime(child.get('Time')[:-3], self.dateFormat)
if lambdaFunc(x, date) != date:
break
elem = {}
for k in self.keys:
val = child.findall(k)
if len(val) > 0 and val[0].text != self.nullValue:
elem[k] = float(val[0].text)
# print(elem)
if len(elem.keys()) > 0:
x.append(date)
for key in elem.keys():
xByClass[key].append(date)
yByClass[key].append(elem[key])
i += 1
return x, [xByClass, yByClass]
def _Device__plotInternal(self, ax, x, k):
ax.set_ylabel("Volume [dB]")
xByClass = k[0]
yByClass = k[1]
for key in xByClass.keys():
ax.__plot(xByClass[key], yByClass[key], self.keys[key], label=key)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
return None, None
def __plotInternal(self, ax, x, k):
self._Device__plotInternal(ax, x, k)
| true
|
cd4c9a5f4f9dc49140a0f41cf39eea29cb051247
|
Python
|
BenjaminGaymay/trade_2017
|
/test_client/data.py
|
UTF-8
| 2,668
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
##
## EPITECH PROJECT, 2018
## Sans titre(Espace de travail)
## File description:
## data
##
"""
Data class
hold datas
"""
class Data:
"""Class that holds different data"""
def __init__(self):
self.avg = {
'crypto': -1,
'forex': -1,
'stock_exchange': -1,
'raw_material': -1
}
self.current = {
'crypto': -1,
'forex': -1,
'stock_exchange': -1,
'raw_material': -1
}
self.history = {
'crypto': [],
'forex': [],
'stock_exchange': [],
'raw_material': []
}
self.bought_price = {
'crypto': -1,
'forex': -1,
'stock_exchange': -1,
'raw_material': -1
}
def parse_data(self, data):
"""Store data in variables of the class"""
for elem in data:
for key in self.current:
if elem.split(':')[0] == key:
string = elem.split(':')[1].replace(',', '.')
value = float(string)
self.current[key] = value
self.history[key].append(value)
def get_bought_price(self, market):
"""
get_bought_price [summary]
Returns the price of the last market's share price bought
:param market: market's name
:type market: str
"""
return self.bought_price[market]
def get_prev_day(self, market):
"""
get_prev_day
Return previous day
:param market: market's name
:type market: str
:return: price of stock of previous day
:rtype: float
"""
try:
return self.history[market][-2]
except IndexError:
return -1
def get_current_day(self, market):
"""
get_current_day
Return current day
:param market: market's name
:type market: str
:return: price of current day's stock
:rtype: float
"""
return self.current[market]
def calc_avg(self):
"""
calc_avg
"""
for key in self.avg:
self.avg[key] = sum(self.history[key]) / len(self.history[key])
def __str__(self):
return 'crypto : {}\nforex : {}\n\
stock : {}\nraw : {}\n'.format(self.current['crypto'],
self.current['forex'],
self.current['stock_exchange'],
self.current['raw_material'])
| true
|
d8d69f8a833bab54478c510940ab1739298a0835
|
Python
|
pombredanne/poodle-lex
|
/Test/TestLexicalAnalyzer.py
|
UTF-8
| 6,724
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
# coding=utf8
import sys
sys.path.append("..")
import unittest
from Generator import Automata
from Generator.CoverageSet import CoverageSet
from Generator.RulesFile import AST, NonDeterministicIR, DeterministicIR
def hash_ids(*ids):
rules = set()
for id in ids:
rules.add(hash((id.lower(), frozenset(), (None, None))))
return frozenset(rules)
class TestDFAEquivalency(unittest.TestCase):
def test_lexical_analyzer(self):
# Generate DFA through the LexicalAnalyzer class
rules_file = AST.Section("::main::", None, rule = [
AST.Rule("keyword", AST.Pattern(ur'k€yword', AST.PatternAttributes(True, False, False))),
AST.Rule("identifier", AST.Pattern(ur'[a-zA-Z][a-zA-Z0-9€_]*', AST.PatternAttributes(True, False, False))),
AST.Rule("float", AST.Pattern(ur'[0-9]+\.[0-9]*|[0-9]*\.[0-9]+')),
AST.Rule("integer", AST.Pattern(ur'[[:digit:]]+')),
AST.Rule("quote", AST.Pattern(ur'"([^"\\]|\\.)*"')),
AST.Rule("newline", AST.Pattern(ur'\r\n|\r|\n')),
AST.Rule("whitespace", AST.Pattern(ur'[\t\s]+'))])
nfa_ir = NonDeterministicIR(rules_file)
dfa_ir = DeterministicIR(nfa_ir)
# Compare DFA to expected minimized DFA
expected_dfa = Automata.DeterministicFinite()
dfa_s = [Automata.DeterministicState() for i in xrange(18)]
#Start state
dfa_s[0].ids = hash_ids("float", "whitespace", "keyword", "integer", "quote", "identifier", "newline")
#quote
dfa_s[0].edges[dfa_s[2]] = CoverageSet([(34, 34)])
dfa_s[2].is_final = False
dfa_s[2].ids = hash_ids("quote")
dfa_s[2].edges[dfa_s[10]] = CoverageSet([(92, 92)])
dfa_s[2].edges[dfa_s[2]] = CoverageSet([(1, 33), (35, 91), (93, 0x10FFFF)])
dfa_s[10].is_final = False
dfa_s[10].ids = hash_ids("quote")
dfa_s[10].edges[dfa_s[2]] = CoverageSet([(1, 0x10FFFF)])
dfa_s[2].edges[dfa_s[9]] = CoverageSet([(34, 34)])
dfa_s[9].is_final = True
dfa_s[9].final_ids = hash_ids("quote")
dfa_s[9].ids = hash_ids("quote")
#newline
dfa_s[0].edges[dfa_s[3]] = CoverageSet([(13, 13)])
dfa_s[0].edges[dfa_s[1]] = CoverageSet([(10, 10)])
dfa_s[3].is_final = True
dfa_s[3].ids = hash_ids("newline")
dfa_s[3].final_ids = hash_ids("newline")
dfa_s[3].edges[dfa_s[1]] = CoverageSet([(10, 10)])
dfa_s[1].is_final = True
dfa_s[1].ids = hash_ids("newline")
dfa_s[1].final_ids = hash_ids("newline")
#identifier
dfa_s[0].edges[dfa_s[5]] = CoverageSet([(65, 74), (76, 90), (97, 106), (108, 122)])
dfa_s[5].is_final = True
dfa_s[5].ids = hash_ids("identifier")
dfa_s[5].final_ids = hash_ids("identifier")
dfa_s[5].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 90), (95, 95), (97, 122), (0x20ac, 0x20ac)])
#integer
dfa_s[0].edges[dfa_s[4]] = CoverageSet([(48, 57)])
dfa_s[4].is_final = True
dfa_s[4].ids = hash_ids("integer","float")
dfa_s[4].final_ids = hash_ids("integer")
dfa_s[4].edges[dfa_s[4]] = CoverageSet([(48, 57)])
dfa_s[4].edges[dfa_s[11]] = CoverageSet([(46, 46)])
#float
dfa_s[0].edges[dfa_s[6]] = CoverageSet([(46, 46)])
dfa_s[6].is_final = False
dfa_s[6].ids = hash_ids("float")
dfa_s[6].edges[dfa_s[11]] = CoverageSet([(48, 57)])
dfa_s[11].is_final = True
dfa_s[11].ids = hash_ids("float")
dfa_s[11].final_ids = hash_ids("float")
dfa_s[11].edges[dfa_s[11]] = CoverageSet([(48, 57)])
#whitespace
dfa_s[0].edges[dfa_s[7]] = CoverageSet([(9, 9), (32, 32)])
dfa_s[7].is_final = True
dfa_s[7].ids = hash_ids("whitespace")
dfa_s[7].final_ids = hash_ids("whitespace")
dfa_s[7].edges[dfa_s[7]] = CoverageSet([(9, 9), (32, 32)])
#keyword
dfa_s[0].edges[dfa_s[8]] = CoverageSet([(75, 75), (107, 107)])
dfa_s[8].is_final = True
dfa_s[8].ids = hash_ids("identifier", "keyword")
dfa_s[8].final_ids = hash_ids("identifier")
dfa_s[8].edges[dfa_s[12]] = CoverageSet([(0x20ac, 0x20ac)])
dfa_s[8].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 90), (95, 95), (97, 122)])
dfa_s[12].is_final = True
dfa_s[12].ids = hash_ids("identifier", "keyword")
dfa_s[12].final_ids = hash_ids("identifier")
dfa_s[12].edges[dfa_s[13]] = CoverageSet([(121, 121), (89, 89)])
dfa_s[12].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 88), (90, 90), (95, 95), (97, 120), (122, 122), (0x20ac, 0x20ac)])
dfa_s[13].is_final = True
dfa_s[13].ids = hash_ids("identifier", "keyword")
dfa_s[13].final_ids = hash_ids("identifier")
dfa_s[13].edges[dfa_s[14]] = CoverageSet([(119, 119), (87, 87)])
dfa_s[13].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 86), (88, 90), (95, 95), (97, 118), (120, 122), (0x20ac, 0x20ac)])
dfa_s[14].is_final = True
dfa_s[14].ids = hash_ids("identifier", "keyword")
dfa_s[14].final_ids = hash_ids("identifier")
dfa_s[14].edges[dfa_s[15]] = CoverageSet([(111, 111), (79, 79)])
dfa_s[14].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 78), (80, 90), (95, 95), (97, 110), (112, 122), (0x20ac, 0x20ac)])
dfa_s[15].is_final = True
dfa_s[15].ids = hash_ids("identifier", "keyword")
dfa_s[15].final_ids = hash_ids("identifier")
dfa_s[15].edges[dfa_s[16]] = CoverageSet([(114, 114), (82, 82)])
dfa_s[15].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 81), (83, 90), (95, 95), (97, 113), (115, 122), (0x20ac, 0x20ac)])
dfa_s[16].is_final = True
dfa_s[16].ids = hash_ids("identifier", "keyword")
dfa_s[16].final_ids = hash_ids("identifier")
dfa_s[16].edges[dfa_s[17]] = CoverageSet([(100, 100), (68, 68)])
dfa_s[16].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 67), (69, 90), (95, 95), (97, 99), (101, 122), (0x20ac, 0x20ac)])
dfa_s[17].is_final = True
dfa_s[17].ids = hash_ids("identifier", "keyword")
dfa_s[17].final_ids = hash_ids("identifier","keyword")
dfa_s[17].edges[dfa_s[5]] = CoverageSet([(48, 57), (65, 90), (95, 95), (97, 122), (0x20ac, 0x20ac)])
expected_dfa.start_state = dfa_s[0]
self.assertEqual(dfa_ir.sections.values()[0].dfa, expected_dfa)
if __name__ == '__main__':
unittest.main()
| true
|
3674aa4be7d0d603aaf493259ee7fda75f04295a
|
Python
|
kishandongare/genetic-algorithm-tsp
|
/genetic algorithm tsp.py
|
UTF-8
| 407
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import random
# # Travelling Salesman problem
#
# # for any sample dataset,implement order encoding for TSP
# In[8]:
def tsp(n):
p = random.sample(range(n),n)
random.shuffle(p)
return p
n_cities = int(input('No. of cities '))
p1 = tsp(n_cities)
print("Parent 1: ",p1)
p2 = tsp(n_cities)
print("Parent 2: ",p2)
# In[ ]:
| true
|
d5ff7adbacdae1d6e361a37ab34129fae0e064da
|
Python
|
fabiovpcaumo/curso-de-python
|
/semana04/aula02/tests_exercicio_12.py
|
UTF-8
| 583
| 2.921875
| 3
|
[] |
no_license
|
from unittest import TestCase
from exercicio_12 import ContaInvestimento
class TestContaInvestimento(TestCase):
def setUp(self):
self.c = ContaInvestimento(100, 10)
def tearDown(self):
del(self.c)
def test_adicionar_juros_deve_retornar_121_com_100_reais_e_5_meses_e_taxa_10(self):
esperado = (161.051)
self.assertEqual(self.c.adicionar_juros(5), esperado)
def test_adicionar_juros_deve_retornar_100_com_100_reais_e_0_meses_e_taxa_10(self):
esperado = (100.00)
self.assertEqual(self.c.adicionar_juros(0), esperado)
| true
|
adf323d03d5ec4996f1f492dec2cbfb2cb5469db
|
Python
|
Gawesomer/cTest
|
/run_tests.py
|
UTF-8
| 3,468
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import argparse
import os
"""
Get command line arguments
params:
none
returns:
taget_file: filename
valgrind: boolean to indicate whether Valgrind should be used
"""
def get_args():
help_text = "Run tests"
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument("target_file", nargs='?', default=os.getcwd(), \
help="top directory from which test executables will be searched")
parser.add_argument("--valgrind", action="store_true",
help="run tests with Valgrind")
args = parser.parse_args()
return (args.target_file, args.valgrind)
"""
Search recursively from `search_dir` for test files.
Test files are files with names starting in "test_" that are executable.
params:
- search_dir: top directory from which recursive search will begin
returns:
set of test file names relative to current working directory
"""
def find_test_files(search_dir):
test_files = set()
for (root, dirs, files) in os.walk(search_dir):
for filename in (name for name in files if name.startswith("test_")):
file_rel_path = os.path.join(root, filename)
if os.access(file_rel_path, os.X_OK):
test_files.add(file_rel_path)
return test_files
"""
Determine if Valgrind is installed
params:
none
return:
True if Valgrind is installed, False otherwise
"""
def valgrind_installed():
return (os.system("which valgrind > /dev/null") == 0)
"""
Run test files in `test_files`
params:
- test_files: collection of test file names
file name must be relative to the current working directory
test files must be executable
- use_valgrind: boolean too indicate whether tests should be run using
Valgrind. Valgrind will only be used if it`s determined to
be installed
returns:
set of test file names that failed (i.e. returned a nonzero exit status)
"""
def run_tests(test_files, use_valgrind):
failed_tests = set()
print("### Running tests ////////////////////////////////////////////////")
if use_valgrind and valgrind_installed():
valgrind_cmd = "valgrind --error-exitcode=1 --leak-check=full"
else:
valgrind_cmd = ""
for test_file in test_files:
cmd = "{} {}".format(valgrind_cmd, test_file)
if os.system(cmd) != 0:
failed_tests.add(test_file)
return failed_tests
"""
Display final status
Prints OK if all tests passed, prints the filenames of the test files that
failed otherwise
params:
- test_files: collection of test files that were run
- failed_tests: collection of test file names that failed
returns:
nothing
"""
def display_status(test_files, failed_tests):
print("//////////////////////////////////////////////////////////////////")
print("")
print("Ran the following files:")
for test_file in test_files:
print("\t- {}".format(test_file))
print("")
if len(failed_tests) == 0:
print("OK - All tests passed")
return
print("FAILED - The following failed:")
for test_file in failed_tests:
print("\t- {}".format(test_file))
if __name__ == "__main__":
search_dir, use_valgrind = get_args()
test_files = find_test_files(search_dir)
failed_tests = run_tests(test_files, use_valgrind)
display_status(test_files, failed_tests)
exit(len(failed_tests))
| true
|
b1fb74f1f33b019e8bebed6f49ea803a1897d273
|
Python
|
MLDL/uninas
|
/uninas/optimization/metrics/nas/by_value.py
|
UTF-8
| 3,732
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import matplotlib.pyplot as plt
from uninas.optimization.metrics.nas.abstract import AbstractNasMetric
from uninas.register import Register
class ByXNasMetric(AbstractNasMetric):
"""
Calculate metrics based on predicted/true network performance values,
how much the network quality improves over the worst possible one,
when we consider fewer networks as selected by a NAS algorithm
"""
_short_name = None
_x_label = None
_y_label = None
@classmethod
def _get_data(cls, predictions: np.array, targets: np.array) -> {str: np.array}:
"""
:param predictions: network predictions (e.g. accuracy)
:param targets: ground truth values
"""
raise NotImplementedError
@classmethod
def _plot_to_axis(cls, ax: plt.Axes, x: np.array, data: {str: np.array}, name: str, has_multiple=True, index=0,
**_) -> dict:
"""
plots the data to an axis
:param ax: plt axis to plot to
:param data: {key: np.array(runs, data)} as returned from get_data,
but possibly containing data of multiple runs
:param name: name
:param has_multiple: whether multiple plots will be added to this axis
:return: dict of plotting state
"""
ax.set_ylabel(cls._y_label)
if data.get('min').shape[0] == 1:
mean_min = np.min(data.get('min'), axis=0)
mean_max = np.max(data.get('max'), axis=0)
ax.fill_between(x, mean_min, mean_max, alpha=0.15, color=cls._cols[index])
for i, v in enumerate(data.get('values')):
ax.scatter(x, v, label=("%s, all networks" % name) if i == 0 else None, color=cls._cols[index], s=2)
return {}
@Register.nas_metric()
class ByPredictionNasMetric(ByXNasMetric):
"""
Calculate metrics based on predicted/true network performance values,
how much the network quality improves over the worst possible one,
when we consider fewer networks as selected by a NAS algorithm
"""
_short_name = "by prediction"
_x_label = "top n best predicted networks"
_y_label = "ground truth"
@classmethod
def _get_data(cls, predictions: np.array, targets: np.array) -> {str: np.array}:
"""
:param predictions: network predictions (e.g. accuracy)
:param targets: ground truth values
"""
predictions, targets = cls._sorted_by_predictions(predictions, targets, ascending=True)
min_, max_ = [], []
for i in range(len(predictions)):
min_.append(np.min(targets[i:]))
max_.append(np.max(targets[i:]))
return dict(min=np.array(min_), max=np.array(max_), values=np.array(targets))
@Register.nas_metric()
class ByTargetsNasMetric(ByXNasMetric):
"""
Calculate metrics based on predicted/true network performance values,
how much the network quality improves over the worst possible one,
when we consider fewer networks as selected by a NAS algorithm
"""
_short_name = "by prediction"
_x_label = "top n networks"
_y_label = "predictions"
@classmethod
def _get_data(cls, predictions: np.array, targets: np.array) -> {str: np.array}:
"""
:param predictions: network predictions (e.g. accuracy)
:param targets: ground truth values
"""
predictions, targets = cls._sorted_by_targets(predictions, targets, ascending=True)
min_, max_ = [], []
for i in range(len(predictions)):
min_.append(np.min(predictions[i:]))
max_.append(np.max(predictions[i:]))
return dict(min=np.array(min_), max=np.array(max_), values=np.array(predictions))
| true
|
885f99a98201a27283f39380895a936f8b1622f3
|
Python
|
krish7891/pylearn
|
/numpy/arr_numeric_range/logspace.py
|
UTF-8
| 87
| 2.796875
| 3
|
[] |
no_license
|
import numpy as np
# default base is 10
a = np.logspace(1.0, 2.0, num = 10)
print a
| true
|
89e4b09db4162f7f3fed05adf68062ef72a7e863
|
Python
|
pallaviibhat/hacktoberfest2021
|
/Python program/quick_number_exponentiation.py
|
UTF-8
| 725
| 3.78125
| 4
|
[] |
no_license
|
import math
# Get_power: Returns "val" raised to the power of "power" with modulo "mod"
# Does that in O(logN), where N is power to raise number by
def Get_power(val, power, mod):
power = int(power)
val = int(val)
mod = int(mod)
powers = [ 0 for i in range(__next_power_of_2_power(power)) ]
powers[0] = val
for bit in range(1, len(powers)):
powers[bit] = ( powers[bit - 1] * powers[bit - 1] ) % mod
result = 1
for bit in range(len(powers)):
if int(2**bit) & power:
result = (result * powers[bit]) % mod
return result
def __next_power_of_2_power(x):
return 1 if x == 0 else math.ceil(math.log2(x))
if __name__ == "__main__":
print( Get_power(2, 5, 10000) )
print( Get_power(2, 20, 1e9+7) )
| true
|
8031e53cb481fdf3381c3167c6a3ea2c4713dc0b
|
Python
|
00void00/DSA
|
/List/reverse.py
|
UTF-8
| 73
| 3.328125
| 3
|
[] |
no_license
|
L = [1,2,4,2,6,5]
print(L)
print("after reversing")
L.reverse()
print(L)
| true
|
eab468209a68401bd61bb9e41de177fd3cb092cc
|
Python
|
timcurrell/dev
|
/python/IdeaProjects/MyWork/Identifiers.py
|
UTF-8
| 557
| 3.546875
| 4
|
[] |
no_license
|
# Python Identifiers
# Python identifiers must start with an alpha character
# or an underscore. They are case-sensitive. Numerals
# are allowed in the identifier, but no special chars
Tim = 1
tIm = 2
tiM = 3
_tim = 4
_TIM = 5
t_1_m = 6
print(Tim, tIm, tiM, _tim, _TIM, t_1_m)
# By convention, class names start with an uppercase
# letter, private variables start with an underscore,
# and (with rare exception) everything else starts
# with a lowercase letter.
# Python has the usual boat-load of reserved words,
# look them up if you're wondering.
| true
|
375af0779b68906e41910b67d47763eb4484f311
|
Python
|
Keonhong/IT_Education_Center
|
/Jumptopy/Jump_to_python/Chapter4/149.py
|
UTF-8
| 81
| 3.46875
| 3
|
[] |
no_license
|
def sum_and_mul(a,b):
return a+b,a*b
result = sum_and_mul(3,4)
print(result)
| true
|
64ec52fbfe03645548d725d097b18fcc992c5187
|
Python
|
ideoforms/pylive
|
/live/exceptions.py
|
UTF-8
| 422
| 2.515625
| 3
|
[] |
no_license
|
class LiveException(Exception):
"""
Base class for all Live-related errors
"""
pass
class LiveConnectionError(LiveException):
"""
Error establishing a connection to AbletonOSC.
"""
pass
class LiveIOError(LiveException):
"""
Error accessing a file descriptor.
"""
pass
class LiveInvalidOperationException(LiveException):
"""
Error performing an operation.
"""
| true
|
fced4fb55f654502b328f79d2972911a8a78c1a3
|
Python
|
otakbeku/elixir-bnf
|
/Contoh_Symbols.py
|
UTF-8
| 3,654
| 3.359375
| 3
|
[] |
no_license
|
contoh3_code = """
defmodule Contoh3 do
def find_fibonacci(nth) do
list = [1, 1]
fib(list, nth)
end
def fib(list, 2) do
Enum.reverse(list)
end
def fib(list, n) do
[first_elem, second_elem | _] = list
fib([first_elem + second_elem | list], n - 1)
end
end
IO.puts(inspect(Contoh3.find_fibonacci(10)))
"""
terminals3 = ["defmodule","alias_name","do","def", "funct_name", "(", ")", "param_name", ",", "var_name", "=", "[", "]", "number", "end", ".", "|", "_", "+", "-"]
symbols3 = [
"defmodule", "alias_name", "do",
"def", "funct_name", "(","param_name", ")", "do",
"var_name", "=", "[","number", ",", "number", "]",
"funct_name", "(", "var_name", ",", "var_name", ")",
"end",
"def", "funct_name", "(", "param_name", ",", "number", ")", "do",
"alias_name", ".", "funct_name", "(","var_name", ")",
"end",
"def", "funct_name", "(", "param_name", ",", "param_name", ")", "do",
"[","var_name", ",", "var_name", "|", "_", "]", "=", "var_name",
"funct_name", "(","[","var_name", "+", "var_name", "|", "var_name", "]", ",", "var_name", "-", "number", ")",
"end",
"end",
"alias_name",".","funct_name", "(", "funct_name", "(", "alias_name", ".", "funct_name", "(", "number", ")", ")", ")", "EOF"]
contoh3 = [contoh3_code, terminals3, symbols3]
# asignment, list, patern matching, or and empty,
contoh2_code = """
defmodule Contoh2 do
def is_prime_number(a, i) when i<a/2 and rem(a, i) != 0 do
IO.puts("#{a} is prime number")
end
def is_prime_number(a, i) when i<a/2 and rem(a, i) == 0 do
IO.puts("#{a} is not prime number")
end
def find_prime(a) do
k = 1
is_prime_number(a, k+1)
end
end
Contoh2.find_prime(10)
"""
terminals2 = ["defmodule","alias_name","do","def", "funct_name", "(", ")", "parameter", ",", "when","and", "<", "/", "!=", "==", "literal",'"', "#", "{", "}", "var_name", "=", "[", "]", "number", "end", ".", "|", "_", "+", "-"]
symbols2 = [
"defmodule", "alias_name", "do",
"def", "funct_name", "(","param_name", ",", "param_name", ")",
"when", "var_name", "<","var_name","/", "number",
"and", "funct_name","(","var_name", ",", "var_name", ")", "!=", "number", "do",
"alias_name", ".", "funct_name", "(",'"',"#","{", "var_name" ,"}", "literal", "literal", "literal", '"', ")" ,
"end",
"def", "funct_name", "(","param_name", ",", "param_name", ")",
"when", "var_name", "<","var_name","/", "number",
"and", "funct_name","(","var_name", ",", "var_name", ")", "==", "number", "do",
"alias_name", ".", "funct_name", "(",'"',"#","{", "var_name" ,"}", "literal", "literal", "literal", "literal", '"', ")" ,
"end",
"def", "funct_name", "(","param_name", ")", "do",
"var_name", "=", "number",
"funct_name", "(", "var_name", ",", "var_name", "+", "number", ")",
"end",
"end",
"alias_name", ".", "funct_name", "(", "number", ")", "EOF"]
contoh2 = [contoh2_code, terminals2, symbols2]
# parameter list, binary operation (arith and comparison), string interpolation, pattern matching, when in function def
contoh1_code = """
defmodule Contoh1 do
def hello do
IO.puts("Hello world!")
end
end
Contoh1.hello()
"""
# Lexxers :
# code : str -> terminal : list[str]
terminals1 = ["defmodule","alias_name","do","def","funct_name",".","(",r'"',r"'", ")", "end", "literal"]
symbols1 = ["defmodule","alias_name","do","def","funct_name","do", "alias_name",".","funct_name","(",'"',"literal","literal",'"',")","end","end", "alias_name", ".", "funct_name", "(", ")", "EOF"]
contoh1 = [contoh1_code, terminals1, symbols1]
| true
|
2e337c5857c3a002e07d10c8b54b66a48d6eef47
|
Python
|
eric93/scheduler
|
/poc/solver.py
|
UTF-8
| 2,415
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/env python2
from z3 import *
num = 0
def genSym():
global num
num += 1
return 'x-' + str(num)
def getSolver(nodes, edges):
s = z3.Solver()
for node_num in nodes:
n = Int('n' + str(node_num))
s.add(n == node_num)
dependency = Function('depends-on',IntSort(),IntSort(),BoolSort());
for e in edges:
s.add(dependency(e[1],e[0]))
# (a depends on b) and (b depends on c) => a depends on c
a = Int(genSym())
b = Int(genSym())
c = Int(genSym())
s.add(ForAll([a,b,c], Implies(And(dependency(a,b),dependency(b,c)),dependency(a,c))))
result = Function('partition', IntSort(),IntSort())
h = Int(genSym())
s.add(ForAll([h], 1 <= result(h)))
d = Int(genSym())
e = Int(genSym())
s.add(ForAll([d,e],\
Implies(dependency(d,e), result(d) >= result(e))))
return (s,result,len(nodes))
def getSolution(solver):
(s, f, num_nodes) = solver
(model, num_partitions) = findMinimal(s,f,num_nodes)
traversals = []
for i in range(num_partitions):
traversals.append(set())
for i in range(1, num_nodes + 1):
cur_trav = model.evaluate(f(i)).as_long()
traversals[cur_trav - 1].add(i)
return traversals
def addConstraint(solver, sets):
(sol, f, n) = solver
for s in sets:
l = list(s)
prev = None
prevNode = None
for node in l:
if prev == None:
prev = f(IntVal(l[0])) == f(IntVal(l[-1]))
else:
prev = And(prev, f(IntVal(node)) == f(IntVal(prevNode)))
prevNode = node
sol.add(Not(prev))
def findMinimal(solver,mapping,domain):
ret = None
maxVal = 0
solver.push()
while True:
if(solver.check() == sat):
ret = solver.model()
maxVal = 0
for i in range(1,domain + 1):
maxVal=max(maxVal, ret.evaluate(mapping(i)).as_long())
print 'Found solution with at most ' + str(maxVal) + ' traversals, iterating...'
qv = Int(genSym())
solver.add(ForAll([qv], mapping(qv) < maxVal))
else:
if ret != None:
print 'Using solution with at most ' + str(maxVal) + ' traversals.'
solver.pop()
return (ret, maxVal)
else:
raise Exception('Could not find a solution')
| true
|
da779c99d8df716b6fc8ee95451b486092f99a41
|
Python
|
cooper-mj/tabbycat
|
/tabbycat/venues/allocator.py
|
UTF-8
| 7,998
| 2.84375
| 3
|
[] |
no_license
|
import itertools
import logging
import random
from .models import VenueConstraint
logger = logging.getLogger(__name__)
def allocate_venues(round, debates=None):
allocator = VenueAllocator()
allocator.allocate(round, debates)
class VenueAllocator:
"""Allocates venues in a draw to satisfy, as best it can, applicable venue
constraints.
The algorithm naïvely allocates from the debate with the highest-priority
constraint to the debate with the lowest-priority constraint, choosing at
random if more than one is available. This isn't guaranteed to be optimal,
since a flexible high-priority debate might randomly choose a room demanded
by a picky low-priority room.
"""
def allocate(self, round, debates=None):
if debates is None:
debates = round.debate_set_with_prefetches(speakers=False)
self._all_venues = list(round.active_venues.order_by('-priority'))
self._preferred_venues = self._all_venues[:len(debates)]
# take note of how many venues we expect to be short by (for error checking)
self._venue_shortage = max(0, len(debates) - len(self._all_venues))
debate_constraints = self.collect_constraints(debates)
debate_venues = self.allocate_constrained_venues(debate_constraints)
unconstrained_debates = [d for d in debates if d not in debate_venues]
unconstrained_venues = self.allocate_unconstrained_venues(unconstrained_debates)
debate_venues.update(unconstrained_venues)
# this set is only non-empty if there were too few venues overall
debates_without_venues = [d for d in debates if d not in debate_venues]
if len(debates_without_venues) != self._venue_shortage:
logger.error("Expected venue shortage %d, but %d debates without venues",
self._venue_shortage, len(debates_without_venues))
debate_venues.update({debate: None for debate in debates_without_venues})
self.save_venues(debate_venues)
def collect_constraints(self, debates):
"""Returns a list of tuples `(debate, constraints)`, where `constraints`
is a list of constraints. Each list of constraints is sorted by
descending order of priority. Debates with no constraints are omitted
from the dict, so each list of constraints is guaranteed not to be
empty.
The constraints for each debate are just all of the venue constraints
relating to the teams, adjudicators, institutions and division of the
debate."""
all_constraints = {}
for vc in VenueConstraint.objects.filter_for_debates(debates).prefetch_related('subject'):
all_constraints.setdefault(vc.subject, []).append(vc)
debate_constraints = []
for debate in debates:
subjects = itertools.chain(
debate.teams,
debate.adjudicators.all(),
[team.institution for team in debate.teams],
[] if debate.division is None else [debate.division]
)
constraints = [vc for subject in subjects for vc in all_constraints.get(subject, [])]
if len(constraints) > 0:
constraints.sort(key=lambda x: x.priority, reverse=True)
debate_constraints.append((debate, constraints))
logger.info("Constraints on %s: %s", debate, constraints)
debate_constraints.sort(key=lambda x: x[1][0].priority, reverse=True)
return debate_constraints
def allocate_constrained_venues(self, debate_constraints):
"""Allocates venues for debates that have one or more constraints on
them. `debate_constraints` should be
For each debate, it finds the set of venues that meet all its
constraints, or if that set is empty, then it satisfies as many
constraints as it can, with higher-priority constraints taking absolute
precedence over lower-priority constraints. It then chooses a random
venue from the preferred venues in that set, or if there are no
preferred venues, then from all venues in that set.
It runs through debates in descending order of priority, where the
priority of a debate is the priority of its highest-priority constraint.
"""
debate_venues = dict()
while len(debate_constraints) > 0:
debate, constraints = debate_constraints.pop(0)
highest_constraint = constraints.pop(0)
eligible_venues = set(highest_constraint.category.venues.all()) & set(self._all_venues)
# If we can't fulfil the highest constraint, bump it down the list.
if len(eligible_venues) == 0:
logger.debug("Unfulfilled (highest): %s", highest_constraint)
if len(constraints) == 0:
logger.debug("%s is now unconstrained", debate)
continue # Failed all constraints, debate is now unconstrained
new_priority = constraints[0].priority
for i, dc in enumerate(debate_constraints):
if new_priority >= dc[1][0].priority:
break
else:
i = 0
debate_constraints.insert(i, (debate, constraints))
continue
# If we get this far, we have at least one eligible venue.
# Find the set of eligible venues satisfying the best set of constraints.
satisified_constraints = []
for constraint in constraints:
if any(sc.subject == constraint.subject for sc in satisified_constraints):
continue # Skip if we've already done a constraint for this subject
constraint_venues = set(constraint.category.venues.all())
if eligible_venues.isdisjoint(constraint_venues):
logger.debug("Unfilfilled: %s", constraint)
else:
eligible_venues &= constraint_venues
satisified_constraints.append(constraint)
# If no eligible venues are preferred venues, drop the last preferred venue.
preferred_venues = set(self._preferred_venues)
if eligible_venues.isdisjoint(preferred_venues):
logger.debug("No preferred venues available: %s", debate)
self._preferred_venues = self._preferred_venues[:-1]
else:
eligible_venues &= preferred_venues
# Finally, assign the venue.
venue = random.choice(list(eligible_venues))
debate_venues[debate] = venue
self._all_venues.remove(venue)
if venue in self._preferred_venues:
self._preferred_venues.remove(venue)
logger.debug("Assigning %s to %s", venue, debate)
return debate_venues
def allocate_unconstrained_venues(self, debates):
"""Allocates unconstrained venues by randomly shuffling the remaining
preferred venues."""
if len(debates) - len(self._preferred_venues) != self._venue_shortage:
logger.error("preferred venues to unconstrained debates mismatch: "
"%s preferred venues, %d debates", len(self._preferred_venues), len(debates))
# we'll still keep going, since zip() stops at the end of the shorter list
elif len(debates) != len(self._preferred_venues):
logger.warning("%s preferred venues, %d debates, matches expected venue shortage %s",
len(self._preferred_venues), len(debates), self._venue_shortage)
random.shuffle(debates)
return {debate: venue for debate, venue in zip(debates, self._preferred_venues)}
def save_venues(self, debate_venues):
for debate, venue in debate_venues.items():
logger.debug("Saving %s for %s", venue, debate)
debate.venue = venue
debate.save()
| true
|
06c02822974aa71928c08132b989967b5733c154
|
Python
|
Leedokyeong95/PythonWorks
|
/Ch09/9_2_Selector.py
|
UTF-8
| 931
| 3.359375
| 3
|
[] |
no_license
|
"""
날짜 : 2021/03/08
이름 : 이도경
내용 : 파이썬 크롤링 선택자 실습하기
"""
import requests as req
from bs4 import BeautifulSoup as bs
# 페이지 요청
resp = req.get('http://chhak.kr/py/test1.html')
resp.encoding = 'utf-8' # 한글이 깨질때
print(resp.text)
# 파싱
dom = bs(resp.text, 'html.parser') # dom 문서 객체 document object model
tag_tit = dom.html.body.h1
tag_txt = dom.select_one('#txt') # id 검색전에는 #(샵)
tag_li1 = dom.select_one('ul > li:nth-child(1)') # = first-child
tag_li2 = dom.select_one('ul > li:nth-child(2)')
tag_li_last = dom.select_one('ul > li:last-child')
tag_lis = dom.select('ul > li')
print('tit :', tag_tit.text)
print('txt :', tag_txt.text)
print('li1 text :', tag_li1.text)
print('li2 text :', tag_li2.text)
print('li_last text :', tag_li_last.text)
>> print('lis :', tag_lis)
>>for li in tag_lis:
print('li text :', li.text)
| true
|
cd8f92dfe25384ecae34f96c43463b590244115d
|
Python
|
BuLiHanjie/Ml_Gugu
|
/ml_model/sequence_cv_model.py
|
UTF-8
| 1,455
| 2.65625
| 3
|
[] |
no_license
|
from ml_model.cv_model import CvModel
class SequenceCvModel:
def __init__(self, model_classes, params, train_x, train_y):
self.model_classes = model_classes
self.params = params
self.dtrain = (train_x, train_y)
self.models = None
pass
def train(self, train_params, metric):
self.models = list()
init_score = None
for index, (m_class, param, train_param) in enumerate(
zip(self.model_classes, self.params, train_params)):
p = {
'init_score': init_score,
}
for k, v in param.items():
p[k] = v
model = CvModel(self.dtrain[0], self.dtrain[1], m_class, **p)
train_pred = model.train(**train_param)
init_score = train_pred
print('step {} train evaluate {}'.format(index, metric(self.dtrain[1], train_pred)))
self.models.append(model)
return init_score
def predict(self, x, names=None, init_score=None):
for model in self.models:
init_score = model.predict(x, names=names, init_score=init_score)
return init_score
def get_score(self, importance_type='split'):
'''gain, split'''
res = dict()
for model in self.models:
score = model.get_score(importance_type)
for k, v in score.items():
res[k] = res.get(k, 0) + v
return res
| true
|
b9b3ce7939eb1db7ae02e1aff50dfe7db527ff53
|
Python
|
liuiuge/LeetCodeSummary
|
/242.ValidAnagram.py
|
UTF-8
| 648
| 3.40625
| 3
|
[] |
no_license
|
# -*- coding:utf8 -*-
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) == len(t):
if s == t:
return True
dicts, dictt = {}, {}
for x in s:
dicts[x] = dicts.get(x, 0) + 1
for y in t:
dictt[y] = dictt.get(y, 0) + 1
for k, v in dicts.items():
if dictt.get(k) != v:
return False
return True
return False
if __name__ == "__main__":
sol = Solution()
print(sol.isAnagram("rat", "car"))
print(sol.isAnagram("rat", "tar"))
| true
|
0624c8ab2eb8ae31588096f88a9ba35c05dbf07d
|
Python
|
likaiguo/job-salary-prediction
|
/src/main/python/jobutil.py
|
UTF-8
| 2,922
| 2.59375
| 3
|
[] |
no_license
|
from __future__ import print_function
import math
import random
from pymongo import MongoClient
from scipy.sparse import csr_matrix
from sklearn.linear_model import Lasso, LassoLars
from features import create_sparse_features
class MongoSalaryDB(object):
def __init__(self):
connection = MongoClient()
self.salary_db = connection['salary']
def __getitem__(self, key):
return self.salary_db[key]
def build_docfreqs(data_coll, words):
doc_freqs = [ ]
for word in words:
entry = data_coll.find_one({ '_id': word })
try:
doc_freqs.append(entry['value'])
except TypeError, e:
print(data_coll, word)
return doc_freqs
def create_reverse_data_index(data_coll, key=None, indices=None):
collect_values = key is not None
skip_indices = indices is None
forward_index = [ ]
original_values = { }
for i, row in enumerate(data_coll.find()):
if skip_indices or i in indices:
row_id = row[u'Id']
forward_index.append(row_id)
if collect_values:
original_values[row_id] = row[key]
forward_index.sort()
reverse_index = { }
values = [ ]
for i, row_id in enumerate(forward_index):
reverse_index[row_id] = i
if collect_values:
values.append(original_values[row_id])
return (forward_index, reverse_index, values)
def create_reverse_index(coll, offset=0):
reverse_index = { }
for i, elem in enumerate(coll):
reverse_index[elem] = offset + i
return reverse_index
def select_important_words(in_words, salary, domain, field, num_chunks, alpha):
random.shuffle(in_words)
chunks = split_in_chunks(in_words, num_chunks)
multi_reverse_words = [ ]
for chunk in chunks:
multi_reverse_words.append(create_reverse_index(chunk))
_, reverse_index, y = create_reverse_data_index(salary[domain], u'SalaryNormalized')
def salary_collection(name):
full_name = '_'.join([ domain, field, name ])
return salary[full_name]
out_words = [ ]
for i, reverse_words in enumerate(multi_reverse_words):
doc_freqs = build_docfreqs(salary_collection('docfreq'), chunks[i])
feature_maps = [ (salary_collection('counter'), reverse_words) ]
X = create_sparse_features(reverse_index, feature_maps, doc_freqs)
main_coef = select_main_coefficients(X.toarray(), y, alpha)
chunk = chunks[i]
for index in main_coef:
out_words.append(chunk[index])
print('chunk %d: %d words' % (i, len(main_coef)), map(lambda x: chunk[x], main_coef))
return out_words
def select_main_coefficients(X, values, alpha):
classifier = Lasso(alpha=alpha)
classifier.fit(X, values)
coef = classifier.coef_
main_coef = [ i for i, value in enumerate(coef) if value != 0 ]
return main_coef
def split_in_chunks(arr, num_chunks):
num_elems = len(arr)
chunk_size, r = divmod(num_elems, num_chunks)
chunks = [ ]
left = 0
for i in xrange(num_chunks):
stride = chunk_size + (1 if i < r else 0)
right = left + stride
chunk = arr[left:right]
chunks.append(chunk)
left = right
return chunks
| true
|
d49734d2e00443c23c40b523be33a784e6ce32b0
|
Python
|
RobertNeuzil/python-data-structures
|
/decorators.py
|
UTF-8
| 249
| 3.828125
| 4
|
[] |
no_license
|
def mydecorater(f):
def wrapper():
print ("Inside of the decorator before calling the function")
f()
print("Inside of the decorator after calling the function")
return wrapper
@mydecorater
def print_name():
print ("Robert")
print_name()
| true
|
f958e1f2f99966f41d0368f7e741ceeb8c3dadd9
|
Python
|
lucasgameiroborges/Python-lista-1---CES22
|
/item6.py
|
UTF-8
| 767
| 3.578125
| 4
|
[] |
no_license
|
import sys
import math
def teste_unitario(passou):
linha = sys._getframe(1).f_lineno
if passou:
msg = "linha {0} funciona".format(linha)
else:
msg = "erro na linha {0}".format(linha)
print(msg)
def testes():
teste_unitario(is_prime(2))
teste_unitario(is_prime(3))
teste_unitario(is_prime(11))
teste_unitario(not is_prime(1))
teste_unitario(not is_prime(22))
teste_unitario(not is_prime(20000316))
def is_prime(n):
a = True
if n == 1:
a = False
else:
for i in range(2, math.floor(n ** 0.5)):
if n % i == 0:
a = False
return a
testes()
""" usando o teorema do numero primo, aproximadamente 5 alunos dos 100 (?)
"""
| true
|
f12403dd791e99b1ae38667b649f775ebb7bdf60
|
Python
|
tedisfree/python
|
/compress-every-directory-into-each/compress-every-directory-into-each.py
|
UTF-8
| 769
| 2.640625
| 3
|
[] |
no_license
|
import sys
import os
import shutil
def main():
if len(sys.argv) < 3:
print('argument error')
exit(1)
root = sys.argv[-2]
hierarchy = sys.argv[-1].split(',')
if os.path.exists('tmp'):
shutil.rmtree('tmp')
os.mkdir('tmp')
cur = os.path.join('tmp')
for h in hierarchy:
cur = os.path.join(cur, h)
os.mkdir(cur)
if not os.path.exists('out'):
os.mkdir('out')
for d in [_ for _ in os.listdir(root) if os.path.isdir(os.path.join(root, _))]:
shutil.copytree(os.path.join(root, d), os.path.join(cur, d))
shutil.make_archive(os.path.join('out', d), 'zip', 'tmp')
shutil.rmtree(os.path.join(cur, d))
shutil.rmtree('tmp')
if __name__ == '__main__':
main()
| true
|
0d82aec6c5e359408bce0c00b4024f09ae992f44
|
Python
|
NigrumAquila/py_checkio
|
/storage/mind_switcher.py
|
UTF-8
| 754
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
def mind_switcher(journal):
NIKOLA, SOPHIA = "nikola", "sophia"
mind, log = {}, []
def swap(a, b, add_to_log=True):
mind[a], mind[b] = mind.get(b, b), mind.get(a, a)
if add_to_log:
log.append({a, b})
for a, b in journal:
swap(a, b, add_to_log=False)
robots = set(mind)
while robots:
robot = robots.pop()
robot_mind = mind[robot]
if robot_mind != robot:
swap(NIKOLA, robot)
swap(SOPHIA, robot_mind)
while mind[SOPHIA] != robot:
swap(SOPHIA, mind[SOPHIA])
swap(SOPHIA, robot)
swap(NIKOLA, robot_mind)
if mind[NIKOLA] == SOPHIA:
swap(NIKOLA, SOPHIA)
return log
| true
|
034dacb085d972648fe8605d4bacd3c037e4997a
|
Python
|
m-mcdougall/Traffic-TimeSeries-DATS6450
|
/Data Preprocessing.py
|
UTF-8
| 3,241
| 2.96875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
#To be run first.
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import datetime
#Set working directory
wd=os.path.abspath('C://Users//Mariko//Documents//GitHub//Traffic-TimeSeries-DATS6450//Data//')
os.chdir(wd)
pd.set_option('display.max_columns', None)
#%%
## Start with loading the raw data
traffic = pd.read_csv('Metro_Interstate_Traffic_Volume.csv', parse_dates=[7])
traffic=traffic.drop_duplicates()
#%%
#Preview the data
plt.figure(figsize=[11,3])
plt.plot(traffic.date_time, traffic.traffic_volume)
plt.title('Traffic Volume Visualization')
plt.ylabel('Traffic Density')
plt.xlabel('Time')
plt.show()
#%%
#Isolate after the break
d = datetime.datetime(2015, 6, 24)
print(d)
traffic_small = traffic[traffic.date_time>d].copy()
plt.figure(figsize=[11,3])
plt.plot(traffic_small.date_time, traffic_small.traffic_volume)
plt.title('Traffic Volume Visualization')
plt.ylabel('Traffic Density')
plt.xlabel('Time')
plt.show()
#%%
#OHE on the weather_main
for weather in traffic_small.weather_main.unique():
traffic_small[weather+'-OHE'] = traffic_small.weather_main
traffic_small[weather+'-OHE'] = (traffic_small[weather+'-OHE']==weather)*1
#%%
#Make Holiday a boolean, rather than explicitly listing the holiday
traffic_small['holiday'] = traffic_small.holiday != 'None'
#%%
#Make a boolean if weekday
traffic_small['Weekday'] = traffic_small.date_time.dt.weekday
traffic_small['Weekday'] = traffic_small['Weekday'] < 5
#%%
#Now, collapse by date, since if a given hour had multiple weather conditions it was added as two seperate times
keepFirst=['holiday', 'temp', 'rain_1h', 'snow_1h', 'clouds_all', 'traffic_volume', 'Weekday', 'weather_main', 'weather_description']
keepSum=[ 'Clear-OHE', 'Clouds-OHE', 'Rain-OHE', 'Haze-OHE', 'Thunderstorm-OHE', 'Mist-OHE',
'Fog-OHE', 'Drizzle-OHE', 'Smoke-OHE', 'Snow-OHE', 'Squall-OHE']
#Crate the agg dictionaries
keepFirst={i:'first' for i in keepFirst}
keepSum={i:'sum' for i in keepSum}
#Merge the two dictionaries
keepFirst.update(keepSum)
#Group to gather the dates and collapse the OHE into a single row
traffic_small=traffic_small.groupby('date_time').agg(keepFirst).reset_index()
#%%
#Group some of the Weather columns
traffic_small['Visibiliity-OHE'] = traffic_small['Fog-OHE'] + traffic_small['Mist-OHE'] + traffic_small['Haze-OHE'] + traffic_small['Smoke-OHE']
traffic_small['Precipitation-OHE'] = traffic_small['Thunderstorm-OHE'] + traffic_small['Mist-OHE'] + traffic_small['Drizzle-OHE'] + traffic_small['Snow-OHE'] + traffic_small['Rain-OHE'] + traffic_small['Squall-OHE']
traffic_small['Dry-OHE'] = traffic_small['Clear-OHE'] + traffic_small['Clouds-OHE']
#%%
x=traffic_small.shape[0]
print(f'\n\nDatapoints Before the fill: {traffic_small.shape[0]} ')
#Upsample the data to fill in the missing hours
#Pad to forward-fill the missing values (in a 2, NA, 5, it will fill as 2, 2, 5)
traffic_small=traffic_small.set_index('date_time').resample('H').pad()
print(f'Datapoints After the fill: {traffic_small.shape[0]} ')
print(f'Total points added: {traffic_small.shape[0]-x} ')
#%%
traffic_small.to_csv('Reduced_Data.csv')
| true
|
b0e0afe3ac87be5272f43053ef327b0fd2e629b5
|
Python
|
TorgaMnaj/collected_snipets
|
/multiprocessing+logging/multiprocessing_example.py
|
UTF-8
| 2,651
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main.py
#
# Copyright 2016 rob <rob@dellix>
#
#
#
import time
import random
import os
# these imports differ between 'threading' and 'multiprocessing' gist
from Queue import Empty
from multiprocessing import Process, Lock, JoinableQueue
def worker_job(q, process, lock, io_test=False, printing=False):
'''
this function reads the queue
and then starts the job which is either an IO test
or (default) a for loop with some simple calcultations
'''
# get the data from the queue in case it is not (can't be trusted)
while not q.empty():
try:
# set a timeout and catch it as this might otherwise block forever
data = q.get(timeout=0.1)
except Empty:
print "queue is empty, will shutdown process %s" % process
return
# lock for better printing and I/O safe
if printing:
lock.acquire()
print "process", process, data
lock.release()
# this is the place to get the work done
if io_test:
# write into a dummy file and remove it afterwards
dummyname = "tmp/" + str(process) + ".json"
with open(dummyname, "w") as f:
f.write(str(data))
os.remove(dummyname)
else:
# just a random calcultation
for i in range(1000):
float(i) + random.random() * data["number"]
# tell the queue that the task is done
q.task_done()
print "process %s finished with data: %s" % (process, data)
return
def main(test_length=100000):
# make a queue instance
q = JoinableQueue()
# define how many processes should be spawned
num_proccesses = 8
# make a lock instance for better printing and to make it I/O safe
lock = Lock()
# call the worker to do a job, in this case by sending a dict
for number in range(test_length):
q.put({"number": number})
# start the processes
process_list = []
for process_number in range(num_proccesses):
# this line differs compared to the 'threading' gist
multi_process = Process(target=worker_job, args=(q, process_number, lock, ))
process_list.append(multi_process)
# start the worker
[i.start() for i in process_list]
# wait until all worker a finished
[i.join() for i in process_list if i.is_alive()]
# join the queue as well
q.join()
if __name__ == "__main__":
start = time.time()
main()
print "\n\ntime needed multiprocessing %s seconds" % (time.time() - start)
| true
|
dc0248a99ca20c45a14edbef272648bcd7f907e9
|
Python
|
Parassharmaa/machine-vision-challenge
|
/baggage-fitness-index/modules/deep_vision.py
|
UTF-8
| 4,321
| 2.75
| 3
|
[] |
no_license
|
import cv2 as cv
import numpy as np
import json
from scipy.spatial import distance as dist
class DeepVision:
def __init__(self, model_path, model_config, classes_path, config_file, image):
with open(classes_path, 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
self.colors = np.random.uniform(
0, 255, size=(len(self.classes), 3))
self.model_config = model_config
self.model = model_path
self.image = cv.imread(image)
self.config_file = config_file
self.scale = 0.00392
self.conf_threshold = 0.5
self.nms_threshold = 0.4
def get_output_layers(self, net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1]
for i in net.getUnconnectedOutLayers()]
return output_layers
def detect(self):
net = cv.dnn.readNet(self.model, self.model_config)
image = self.image
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
blob = cv.dnn.blobFromImage(
image, self.scale, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(self.get_output_layers(net))
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv.dnn.NMSBoxes(
boxes, confidences, self.conf_threshold, self.nms_threshold)
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
class_id = class_ids[i]
# get dimension of bounding box with ppm
yield self.get_dimensions(box, class_id)
def get_label(self, class_id):
return str(self.classes[class_id])
def draw_box(self, class_id, x, y, x_plus_w, y_plus_h, dim_meta):
label = self.get_label(class_id)
color = self.colors[class_id]
(dim_a, dim_b, tr, trbr_x, trbr_y) = dim_meta
cv.rectangle(self.image, (x, y), (x_plus_w, y_plus_h), color, 2)
cv.putText(self.image, "{:.1f}in".format(dim_a),
(int(tr[0] - 15), int(tr[1] - 10)
), cv.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
cv.putText(self.image, "{:.1f}in".format(dim_b),
(int(trbr_x + 10), int(trbr_y)), cv.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
ims = cv.resize(self.image, (800, 600))
cv.imshow("Image", ims)
cv.waitKey()
cv.destroyAllWindows()
def load_file(self):
data = json.load(open(self.config_file))
return data
def get_dimensions(self, box, class_id):
pixels_per_metric = self.load_file()["ppm"][0]
(x, y, w, h) = box
tl = (x, y)
tr = (x+w, y)
bl = (x, y+h)
br = (x+w, y+h)
(tltr_x, tltr_y) = self.midpoint(tl, tr)
(blbr_x, blbr_y) = self.midpoint(bl, br)
(tlbl_x, tlbl_y) = self.midpoint(tl, bl)
(trbr_x, trbr_y) = self.midpoint(tr, br)
d_a = dist.euclidean((tr[0], tr[1]), (bl[0], bl[1]))
d_b = dist.euclidean((tlbl_x, tlbl_y), (trbr_x, trbr_y))
dim_a = d_a / pixels_per_metric
dim_b = d_b / pixels_per_metric
dim_meta = (dim_a, dim_b, tr, trbr_x, trbr_y)
self.draw_box(class_id, round(
x), round(y), round(x+w), round(y+h), dim_meta)
return dim_a, dim_b
def midpoint(self, point_a, point_b):
return ((point_a[0] + point_b[0]) * 0.5, (point_a[1] + point_b[1]) * 0.5)
| true
|
b6d089dd75a52e2cc2d4be98f97b8b18ce7379a9
|
Python
|
alyssapyon/SingHealth-SeleniumTesting
|
/monkey_loggedin_RUN.py
|
UTF-8
| 1,642
| 2.765625
| 3
|
[] |
no_license
|
import os
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import random
from CONSTANTS import *
# set wait time between testcases
waitTime = 1
# file to store registered accounts
filename = registeredAccounts_filepath
# NEED TO EDIT THIS BASED ON WHERE YOUR CHROMEDRIVER IS
PATH = chromeDriver_filepath
driver = webdriver.Chrome(PATH)
# run for x cycles
x = 50
waittime = 1
driver.get("http://127.0.0.1:8000/logout/")
# username
input_username = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.NAME, "username"))
)
input_username.clear()
input_username.send_keys(admin_username)
# password
input_password = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.NAME, "password"))
)
input_password.clear()
input_password.send_keys(admin_password)
# press enter
input_password.send_keys(Keys.RETURN)
def monkey(x):
for i in range(x):
time.sleep(waittime)
links = driver.find_elements_by_tag_name("a")
link = random.choice(links).get_attribute('href')
if len(link) > 0:
try:
if (link != "http://127.0.0.1:8000/logout/"):
print("visited: " + str(link))
driver.get(link)
except:
print("ERROR! Monkey found a problem!")
print("problem link: " + str(link) + "\n")
return
print("monkey has finished running \n")
monkey(x)
| true
|
496bc660814daa2b3e1a54351bab00d7adfb9afe
|
Python
|
peterJbates/euler
|
/148/pascals.py
|
UTF-8
| 356
| 3.578125
| 4
|
[] |
no_license
|
#Solution using Lucas's Theorem
total = 1
def numberToBase(n, b):
if n == 0:
return [0]
digits = []
while n:
digits.append(int(n % b)+1)
n //= b
return digits[::-1]
for n in range(1, 10**9):
product = 1
string = numberToBase(n, 7)
for digit in string:
product *= digit
total += product
print (total)
| true
|
22ff17bb33fae8a2194481849523ded66d1b5a76
|
Python
|
phobson/wqio
|
/wqio/samples.py
|
UTF-8
| 8,494
| 2.65625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
from matplotlib import pyplot
import seaborn
import pandas
from wqio import utils
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
class Parameter(object):
def __init__(self, name, units, usingTex=False):
""" Class representing a single analytical parameter (pollutant).
(Input) Parameters
------------------
name : string
Name of the parameter.
units : string
Units of measure for the parameter.
usingTex : bool, optional (default = False)
If True, all returned values will be optimized for inclusion
in LaTeX documents.
"""
self._name = name
self._units = units
self._usingTex = usingTex
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def units(self):
return self._units
@units.setter
def units(self, value):
self._units = value
@property
def usingTex(self):
return self._usingTex
@usingTex.setter
def usingTex(self, value):
if value in (True, False):
self._usingTex = value
else:
raise ValueError("`usingTex` must be of type `bool`")
def paramunit(self, usecomma=False):
""" Creates a string representation of the parameter and units.
Parameters
----------
usecomma : bool, optional (default = False)
Toggles the format of the returned string attribute. If True
the returned format is "<parameter>, <unit>". Otherwise the
format is "<parameter> (<unit>)".
"""
if usecomma:
paramunit = "{0}, {1}"
else:
paramunit = "{0} ({1})"
n = self.name
u = self.units
return paramunit.format(n, u)
def __repr__(self):
return "<wqio Parameter object> ({})".format(self.paramunit(usecomma=False))
def __str__(self):
return "<wqio Parameter object> ({})".format(self.paramunit(usecomma=False))
class SampleMixin(object):
def __init__(
self,
dataframe,
starttime,
samplefreq=None,
endtime=None,
storm=None,
rescol="res",
qualcol="qual",
dlcol="DL",
unitscol="units",
):
self._wqdata = dataframe
self._startime = pandas.Timestamp(starttime)
self._endtime = pandas.Timestamp(endtime)
self._samplefreq = samplefreq
self._sample_ts = None
self._label = None
self._marker = None
self._markersize = None
self._linestyle = None
self._yfactor = None
self._season = utils.getSeason(self.starttime)
self.storm = storm
@property
def season(self):
return self._season
@season.setter
def season(self, value):
self._season = value
@property
def wqdata(self):
return self._wqdata
@wqdata.setter
def wqdata(self, value):
self._wqdata = value
@property
def starttime(self):
return self._startime
@starttime.setter
def starttime(self, value):
self._startime = value
@property
def endtime(self):
if self._endtime is None:
self._endtime = self._startime
return self._endtime
@endtime.setter
def endtime(self, value):
self._endtime = value
@property
def samplefreq(self):
return self._samplefreq
@samplefreq.setter
def samplefreq(self, value):
self._samplefreq = value
@property
def linestyle(self):
if self._linestyle is None:
self._linestyle = "none"
return self._linestyle
@linestyle.setter
def linestyle(self, value):
self._linestyle = value
@property
def markersize(self):
if self._markersize is None:
self._markersize = 4
return self._markersize
@markersize.setter
def markersize(self, value):
self._markersize = value
@property
def yfactor(self):
if self._yfactor is None:
self._yfactor = 0.25
return self._yfactor
@yfactor.setter
def yfactor(self, value):
self._yfactor = value
def plot_ts(self, ax, isFocus=True, asrug=False):
if self.sample_ts is not None:
if isFocus:
alpha = 0.75
else:
alpha = 0.35
ymax = ax.get_ylim()[-1]
yposition = [self.yfactor * ymax] * len(self.sample_ts)
timeseries = pandas.Series(yposition, index=self.sample_ts)
if asrug:
seaborn.rugplot(self.sample_ts, ax=ax, color="black", alpha=alpha, mew=0.75)
line = pyplot.Line2D(
[0, 0],
[0, 0],
marker="|",
mew=0.75,
color="black",
alpha=alpha,
linestyle="none",
)
else:
timeseries.plot(
ax=ax,
marker=self.marker,
markersize=4,
linestyle=self.linestyle,
color="Black",
zorder=10,
label="_nolegend",
alpha=alpha,
mew=0.75,
)
line = pyplot.Line2D(
[0, 0],
[0, 0],
marker=self.marker,
mew=0.75,
color="black",
alpha=alpha,
linestyle="none",
)
return line
class CompositeSample(SampleMixin):
""" Class for composite samples """
@property
def label(self):
if self._label is None:
self._label = "Composite Sample"
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def marker(self):
if self._marker is None:
self._marker = "x"
return self._marker
@marker.setter
def marker(self, value):
self._marker = value
@property
def sample_ts(self):
if self.starttime is not None and self.endtime is not None:
_sampfreq = self.samplefreq or self.endtime - self.starttime
self._sample_ts = pandas.date_range(
start=self.starttime, end=self.endtime, freq=_sampfreq
)
return self._sample_ts
class GrabSample(SampleMixin):
""" Class for grab (discrete) samples """
@property
def label(self):
if self._label is None:
self._label = "Grab Sample"
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def marker(self):
if self._marker is None:
self._marker = "+"
return self._marker
@marker.setter
def marker(self, value):
self._marker = value
@property
def sample_ts(self):
if self._sample_ts is None and self.starttime is not None:
if self.endtime is None:
self._sample_ts = pandas.DatetimeIndex(data=[self.starttime])
else:
self._sample_ts = pandas.date_range(
start=self.starttime,
end=self.endtime,
freq=self.endtime - self.starttime,
)
return self._sample_ts
_basic_doc = """ {} water quality sample
Container to hold water quality results from many different pollutants
collected at a single point in time.
Parameters
----------
dataframe : pandas.DataFrame
The water quality data.
starttime : datetime-like
The date and time at which the sample collection started.
samplefreq : string, optional
A valid pandas timeoffset string specifying the frequency with which
sample aliquots are collected.
endtime : datetime-like, optional
The date and time at which sample collection ended.
storm : wqio.Storm object, optional
A Storm object (or a subclass or Storm) that triggered sample
collection.
rescol, qualcol, dlcol, unitscol : string, optional
Strings that define the column labels for the results, qualifiers,
detection limits, and units if measure, respectively.
"""
SampleMixin.__doc__ = _basic_doc.format("Basic")
CompositeSample.__doc__ = _basic_doc.format("Composite")
GrabSample.__doc__ = _basic_doc.format("Grab")
| true
|
48d5227ccf22fbfd5f3261c97713c49a604f37e3
|
Python
|
carines/openclassroom_python
|
/3_poo_dev/autres/test.py
|
UTF-8
| 269
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# Coding: utf-8
from ClassTest import *
francis = Personne("Dupond", "Francis", 18, "Londres")
print(francis.lieu_residence)
fabien = Personne("Huitelec", "Fabien", 21)
print(fabien.lieu_residence)
help(Personne)
fabien.bla()
fabien.interne()
| true
|
39051058fc5da92c41c40a2537eca93487b9d058
|
Python
|
OpenGov/grid_walker
|
/gridwalker/grids.py
|
UTF-8
| 21,392
| 2.9375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import collections
import numpy as np
import sys
from datawrap import listwrap
class Grid(collections.MutableMapping):
DimensionParam = collections.namedtuple('DimensionParam', ['first', 'last', 'step'])
def __init__(self, grid_type, *dimensions):
'''
Args:
grid_type: Defines the numpy type associated with this grid.
dim_ranges defines the ranges of values in each dimension
for an arbitrary number of dimensions. The type can be
a slice or a tuple representing the values of a slice
(i.e. (-10, 10, 2) starts from -10 and goes to 10 inclusive
with a step size of 2). Note that slices are normally not
inclusive.
'''
self.dim_ranges = []
self.dim_restrictions = []
self.dim_lengths = []
for dim in dimensions:
dim_param = self._convert_dim_to_param(dim)
self.dim_ranges.append(dim_param)
self.dim_restrictions.append(dim_param)
self.dim_lengths.append(
(dim_param.last + dim_param.step - dim_param.first) / dim_param.step)
# Define our grid now as a numpy array
self.grid = np.zeros(self.dim_lengths, dtype=grid_type)
def _convert_dim_to_param(self, dim, default_params=DimensionParam(None,None,1)):
'''
Converts various dimension specifications into DimensionParam
named tuple for use elsewhere in Grid
'''
first = last = step = None
# Short-Circuit for DimensionParams
if isinstance(dim, self.DimensionParam):
return dim
# Check for tuple type
elif isinstance(dim, tuple):
if len(dim) > 3 or len(dim) < 1:
raise self._bad_dim_error(dim)
elif len(dim) == 1:
if dim[0] < 0:
first = dim[0]
else:
last = dim[0]
elif len(dim) == 2:
first, last = dim
elif len(dim) == 3:
first, last, step = dim
# Check for slice
elif isinstance(dim, slice):
first = dim.start
last = dim.stop
step = dim.step
else:
first = last = dim
# Ensure that we don't have None values
if first == None:
first = default_params.first
if last == None:
last = default_params.last
if step == None:
step = default_params.step
# Check for violations
if (first == None or last == None or step == None or
step < 1 or last < first):
raise self._bad_dim_error(dim)
return self.DimensionParam(first, last, step)
def _bad_dim_error(self, dim):
err = "Dimension range '%s' has an invalid or length range definition" % str(dim)
return ValueError(err)
def _check_against_limits(self, index, dimension):
'''
Checks single dimension restrictions on legal indexes.
'''
restrictions = self.dim_restrictions[dimension]
if isinstance(index, slice):
if ((index.start != None and
(index.start < restrictions.first or
index.start > restrictions.last)) or
(index.stop != None and
(index.stop < restrictions.first or
index.stop > restrictions.last)) or
(index.step and index.step % restrictions.step != 0) or
(index.start != None and index.stop != None and
index.step > index.stop - index.start)):
raise KeyError(index)
else:
if (index < restrictions.first or
index > restrictions.last):
raise KeyError(index)
def _convert_to_array_index(self, index, dimension=0, depth=0):
'''
Converts index values from true locations to internal
data structure values. It also handles slice requests
and multi-dimensional requests (including slice multi-dim).
'''
drange = self.dim_ranges[dimension]
restrictions = self.dim_restrictions[dimension]
if listwrap.non_str_len_no_throw(index) > 0:
# Don't allow searches beyond one recursion
if depth > 0:
raise KeyError(index)
rebuilt_index = []
# Rebuild the index element by element
for dim, sub_index in enumerate(index):
fixed_sub = self._convert_to_array_index(sub_index, dimension+dim, depth+1)
rebuilt_index.append(fixed_sub)
return tuple(rebuilt_index)
else:
# Will raise an exception if we're out of bounds
self._check_against_limits(index, dimension)
if isinstance(index, slice):
# Get start index
if index.start != None:
start = (index.start - drange.first) / drange.step
else:
start = 0
# Get stop index
if index.stop != None:
stop = (index.stop + drange.step - drange.first) / drange.step
else:
stop = self.dim_lengths[dimension]-1
# Get step index
if index.step != None:
step = index.step / drange.step
else:
step = None
result = slice(start, stop, step)
else:
result = (index - drange.first) / drange.step
return result
def _get_single_depth(self, multi_index):
'''
Helper method for determining how many single index entries
there are in a particular multi-index.
'''
single_depth = 0
for sub_index in multi_index:
if isinstance(sub_index, slice):
break
single_depth += 1
return single_depth
def __getitem__(self, index):
'''
Getitem is rather complicated to handle the various cases
of dimensional requests and slicing. All of the following
are legal request types:
single index: grid[0] => grid_type object
(or SubGrid if not last dimension)
compound index: grid[0][1] => grid_type object
(or SubGrid if not last dimension)
multi-index: grid[0, 1] => grid_type object
(or SubGrid if not last dimension)
slice: grid[0:1] => SubGrid
slice with step: grid[0:10:2] => SubGrid
compound slice: grid[0:10][2:8:2] => SubGrid
multi-slice: grid[0:10, 2:8:2] => SubGrid
compound mixed (index+slice): grid[0][2:8:2] => SubGrid
multi-mixed (index+slice): grid[0, 2:8:2] => SubGrid
'''
rebuilt_index = self._convert_to_array_index(index)
index_len = listwrap.non_str_len_no_throw(index)
# SubGrid request
if (len(self.dim_ranges) - index_len > 1 or
(index_len > 0 and isinstance(rebuilt_index[-1], slice)) or
isinstance(rebuilt_index, slice)):
# Multi-index request
if index_len > 0:
single_depth = self._get_single_depth(index)
# Single value indices for first single_depth values
if single_depth > 0:
resolved_dims = rebuilt_index[:single_depth]
# Cut the grid at the depth position
return SubGrid(self.grid[resolved_dims], self.dim_ranges[single_depth:],
self.dim_lengths[single_depth:],
self.dim_restrictions[single_depth:], index[single_depth:])
# First index in multi-index is a slice
else:
return SubGrid(self.grid, self.dim_ranges, self.dim_lengths,
self.dim_restrictions, index)
# Slice request
elif isinstance(rebuilt_index, slice):
return SubGrid(self.grid, self.dim_ranges, self.dim_lengths,
self.dim_restrictions, [index])
# Index request, but with remaining dimension restrictions
# Thus still a SubGrid
else:
return SubGrid(self.grid[rebuilt_index], self.dim_ranges[1:], self.dim_lengths[1:],
self.dim_restrictions[1:])
# Specific index for all dimensions
else:
return self.grid.__getitem__(rebuilt_index)
def __setitem__(self, index, value):
'''
Can set on ranges of values as well individual indices
'''
rebuilt_index = self._convert_to_array_index(index)
self.grid.__setitem__(rebuilt_index, value)
def __delitem__(self, index):
'''
Resets the index to 0 ==> same as __setitem__(self, index, 0)
'''
self.__setitem__(index, 0)
def __len__(self):
'''
Gives the length of the highest dimension
'''
restrict = self.dim_restrictions[0]
return (restrict.last - restrict.first + restrict.step) / restrict.step
def _generate_dim_range(self, dim):
'''
Generates a slice of the range for a particular dimension of
the grid. Thus range(self._generate_dim_range()) produces the
keys over a particular range.
'''
restrict = self.dim_restrictions[dim]
start = restrict.first
step = restrict.step
stop = restrict.last+1
return slice(start, stop, step)
def _generate_mapped_grid_ranges(self):
'''
Generates the slice ranges of all valid keys for the grid at
each dimension level.
'''
mapped_grid_ranges = []
for dim in xrange(len(self.dim_restrictions)):
mapped_grid_ranges.append(self._generate_dim_range(dim))
return mapped_grid_ranges
def _generate_true_grid_ranges(self):
true_grid_ranges = []
for dim in xrange(len(self.dim_restrictions)):
restrict = self.dim_restrictions[dim]
drange = self.dim_ranges[dim]
start = self._convert_to_array_index(restrict.first, dim)
step = restrict.step / drange.step
stop = self._convert_to_array_index(restrict.last, dim)+step
true_grid_ranges.append(slice(start, stop, step))
return true_grid_ranges
def get_raw_data_wrapper(self):
dim_ranges = self._generate_true_grid_ranges()
return listwrap.FixedListSubset(self.grid, *dim_ranges)
def __iter__(self):
dim_slice = self._generate_dim_range(0)
return xrange(dim_slice.start, dim_slice.stop, dim_slice.step).__iter__()
def full_iter(self):
'''
Iterates through the entire grid one cell at a time.
This will touch every dimension point in the grid
and returns the key associated with that iteration.
'''
class Link(object):
def __init__(self, cursor, next_cursor, depth, prior, next_link):
self.value = cursor
self.next_value = next_cursor
self.depth = depth
self.prior = prior
self.next = next_link
class GridIter(object):
def __init__(self, grid):
self.grid = grid
self.key_grid = []
self.cursor_links = []
self.cursor = None
self.current_key = None
prior_cursor = None
# Construct our list of cursors and slices restrictions
for dim, restrict in enumerate(grid.dim_restrictions):
restrict_slice = grid._generate_dim_range(dim)
self.key_grid.append(restrict_slice)
# Build a linked list of cursors
self.cursor = Link(restrict_slice.start,
restrict_slice.start+restrict_slice.step,
dim, prior_cursor, None)
self.cursor_links.append(self.cursor)
if prior_cursor != None:
prior_cursor.next = self.cursor
prior_cursor = self.cursor
# Set the very last cursor so that it initialized to
# the correct value for beginning iteration.
if self.cursor != None:
self.cursor.next_value = self.cursor.value
def __iter__(self):
return self
def next(self):
# If our current cursor makes it to a None, we're done
if self.cursor == None:
raise StopIteration()
# Set our cursor value
self.cursor.value = self.cursor.next_value
# Get the slice for our current dimension
cursor_slice = self.key_grid[self.cursor.depth]
# Grab our key if we're at the lowest level of the cursors
if self.cursor.next == None:
self.current_key = tuple(map(lambda c: c.value, self.cursor_links))
else:
self.current_key = None
# Update our value
self.cursor.next_value += cursor_slice.step
# Check if we've rotated through the whole dimension
if self.cursor.value >= cursor_slice.stop:
# Reset our dimension cursor and go back up the
# cursor list by one
self.cursor.next_value = cursor_slice.start
self.cursor = self.cursor.prior
return self.next()
# Check if we have and children to iterate
elif self.cursor.next != None:
self.cursor = self.cursor.next
return self.next()
# Return the fullkey, value tuple
return self.current_key
return GridIter(self)
def full_iter_keys(self):
return self.full_iter()
def full_iter_values(self):
'''
Like full_iter, but return values instead of keys
'''
class GridValueIter(object):
def __init__(self, grid):
self.grid = grid
self.griditer = grid.full_iter_keys()
def __iter__(self):
return self
def next(self):
fullkey = self.griditer.next()
return self.grid[fullkey]
return GridValueIter(self)
def full_iter_items(self):
'''
Like full_iter, but return key, value pairs instead of just keys
'''
class GridItemIter(object):
def __init__(self, grid):
self.grid = grid
self.griditer = grid.full_iter_keys()
def __iter__(self):
return self
def next(self):
fullkey = self.griditer.next()
return (fullkey, self.grid[fullkey])
return GridItemIter(self)
def arg_max(self, grid_val_func=lambda k,v: v):
'''
Allows for argument maximization across all dimensions of the data.
Args:
grid_val_func: The evaluator for a given cell of the grid.
Evaluators should take the key tuple and value pair and
return a number which scores the given cell.
'''
max_arg = None
max_score = -sys.maxint
for key,val in self.full_iter_items():
check = grid_val_func(key, val)
if (check > max_score):
max_score = check
max_arg = key
return max_arg
def arg_min(self, grid_val_func=lambda k,v: v):
'''
Allows for argument minimization across all dimensions of the data.
Args:
grid_val_func: The evaluator for a given cell of the grid.
Evaluators should take the key tuple and value pair and
return a number which scores the given cell.
'''
return self.arg_max(lambda k,v: -grid_val_func(k,v))
def __str__(self):
'''
This can be expensive for high dimension grids.
'''
mapper = self._generate_dim_range(0)
if len(self) <= 100:
return repr(self)
else:
mapitems = [(key, self[key]) for key in xrange(mapper.start,
mapper.start+(100*mapper.step),
mapper.step)]
return str(mapitems)[:-1] + ", ... ]"
def __repr__(self):
'''
This can be expensive for high dimension lists.
'''
mapper = self._generate_dim_range(0)
mapitems = [(key, self[key]) for key in xrange(mapper.start,
mapper.stop,
mapper.step)]
return repr(mapitems)
class SubGrid(Grid):
'''
Wraps the Grid object with a constructor which builds a
subset of the parent grid. This should only be used by
Grid objects. Edits to this grid affect the parent grid
which constructed this SubGrid.
'''
def __init__(self, grid, dim_ranges, dim_lengths, dim_restrictions, add_restrictions=None):
self.dim_ranges = dim_ranges
self.dim_lengths = dim_lengths
self.dim_restrictions = dim_restrictions
self.grid = grid
# Do this check/assignment After the others
if add_restrictions:
if len(add_restrictions) > self.dim_restrictions:
raise ValueError("Length of restrictions exceeds dimension limits of data")
add_restrictions = self._convert_restrictions(add_restrictions)
self.dim_restrictions = self._combine_restrictions(self.dim_restrictions, add_restrictions)
def _convert_restrictions(self, restrictions):
'''
Helper method to convert all restrictions in the input
list into DimensionParam objects.
'''
converted = []
for i, restrict in enumerate(restrictions):
converted.append(self._convert_dim_to_param(restrict, self.dim_restrictions[i]))
return converted
def _combine_restrictions(self, first_restrictions, second_restrictions):
'''
Combines dimensional restrictions from two sources
'''
combined_params = []
for i in xrange(max(len(first_restrictions), len(second_restrictions))):
first_param = first_restrictions[i] if i < len(first_restrictions) else None
second_param = second_restrictions[i] if i < len(second_restrictions) else None
# Check all 4 cases
if first_param == None and second_param != None:
combined_params.append(second_param)
elif first_param != None and second_param == None:
combined_params.append(first_param)
elif first_param != None and second_param != None:
first = max(first_param.first, second_param.first)
last = min(first_param.last, second_param.last)
step = max(first_param.step, second_param.step)
small_step = min(first_param.step, second_param.step)
if step % small_step != 0:
raise ValueError("Dimension restriction step sizes are not "+
"multiples of each other ("+str(first_param.step)+
", "+str(second_param.step)+")")
param = self.DimensionParam(first, last, step)
if last < first:
raise ValueError("Dimension restriction is NULL set "+str(param))
combined_params.append(param)
else:
# This should never be reached
raise ValueError("Could not combine restrictions")
return combined_params
class FloatGrid(Grid):
'''
Defines a grid with floating values at each dimension point.
'''
def __init__(self, *dimensions):
Grid.__init__(self, np.float, *dimensions)
class IntGrid(Grid):
'''
Defines a grid with integer values at each dimension point.
'''
def __init__(self, *dimensions):
Grid.__init__(self, np.int, *dimensions)
class ObjectGrid(Grid):
'''
Defines a grid with arbitrary object values at each dimension point.
'''
def __init__(self, *dimensions):
Grid.__init__(self, object, *dimensions)
# TODO add dimension mappers as optional argument to allow floating or string indexes
| true
|
018723d06f0feae38f1124b1fe71ecd35ebec4f5
|
Python
|
AHowardC/python101
|
/cChallegeP.py
|
UTF-8
| 503
| 3.796875
| 4
|
[] |
no_license
|
#is prime
def is_prime(num):
if num < 2:
return False;
else:
for i in range(2, int(num**0.5 )+ 1):
if (num % i == 0):
return False;
return True;
#nthPrime number
def nthPrime(n):
numberofPrimes = 0; #this keeps track of the postion
prime =1; #this keeps track of the postion’s number
while(numberofPrimes < n):
prime+=1;
if(is_prime(prime)):
numberofPrimes +=1;
return prime
print nthPrime(6);
| true
|
39599e13627386153eb5207d19f8dac43ba77b52
|
Python
|
jamilemerlin/exercicios_python
|
/CursoemVideo/e088.py
|
UTF-8
| 676
| 3.734375
| 4
|
[] |
no_license
|
from random import randint
from time import sleep
print('-*-' *15)
print(' JOGO DA MEGA SENA')
print('-*-' *15)
lista = list()
jogo = int(input('Quantos jogos você quer que eu sorteie? '))
iniciador = 0
jogos = list()
while iniciador < jogo:
contador = 0
while True:
escolhas = randint(1, 60)
if escolhas not in lista:
lista.append(escolhas)
contador += 1
if contador >= 6:
break
lista.sort()
jogos.append(lista[:])
lista.clear()
iniciador += 1
for numero, lista in enumerate(jogos):
print(f'Jogo {numero + 1}: {lista}')
sleep(0.5)
print('-*-'*5, ' BOA SORTE', '-*-'*6)
| true
|
55ab9f35f18b8dc7aa5493931bfb5d5c0ff8fc44
|
Python
|
Zernach/ML_Flask_Explainer
|
/app.py
|
UTF-8
| 1,241
| 2.578125
| 3
|
[] |
no_license
|
from flask import Flask, render_template
from data import origin_dict, dest_dict, miles_dict
from joblib import load
import pandas as pd
app = Flask(__name__)
trained_machine_learning_model = load('static/assets/pipeline_w_miles.joblib')
@app.route('/')
def home():
return render_template('index.html')
# 4/2/40/1/16
@app.route('/generatepredictions')
@app.route('/generatepredictions/<Quarter>/<Origin>/<Dest>/<NumTicketsOrdered>/<AirlineCompany>')
def predict(Quarter=4, Origin=2, Dest=16, NumTicketsOrdered=1, AirlineCompany=1):
try:
df = pd.DataFrame({
'Quarter': [int(Quarter)],
'Origin': [int(Origin)],
'Dest': [int(Dest)],
'Miles': [miles_dict[f"{origin_dict[int(Origin)]}>{dest_dict[int(Dest)]}"]],
'NumTicketsOrdered': [int(NumTicketsOrdered)],
'AirlineCompany': [int(AirlineCompany)]
})
PricePerTicket = str(trained_machine_learning_model.predict(df)[0])[0:6]
PricePerTicket = f"${PricePerTicket}"
return render_template('predictions.html', pricePerTicket=PricePerTicket)
except:
return render_template('predictions.html', pricePerTicket='RETRY')
if __name__ == '__main__':
app.run()
| true
|
fc1b06fdc64e426a4e414f6c6c1303ccb4f824a4
|
Python
|
MJohnHBerry/Python-MUD
|
/PythonApplication1/PythonApplication1/PythonApplication1.py
|
UTF-8
| 5,305
| 3.640625
| 4
|
[] |
no_license
|
import time, random
#---------------------------------------------------------------#
# Classes
#---------------------------------------------------------------#
class Entity:
def __init__ (self, name, attack, defense, speed, hp, max_hp, is_human, first_strike, is_dead, turn_ended):
self.name = name
self.attack = attack
self.defense = defense
self.speed = speed
self.hp = hp
self.max_hp = max_hp
self.is_human = is_human
self.first_strike = first_strike
self.is_dead = is_dead
self.turn_ended = turn_ended
#---------------------------------------------------------------#
# Globals
#---------------------------------------------------------------#
COUNT = 0
#---------------------------------------------------------------#
# Enemies
#---------------------------------------------------------------#
global npc1
npc1 = Entity("Maximus",
10, # Attack
5, # Defense
6, # Speed
100, # HP
100, # Max HP
True, # Is Human Entity bool
False, # Did he win First Strike bool
False, # Is Entity dead bool
False) # Has Turn Ended bool
#---------------------------------------------------------------#
# Character Creation
#---------------------------------------------------------------#
def game_intro():
print("Welcome to the Battle Arena.")
user_input = input("Your name, gladiator? \n")
global player1
player1 = Entity(user_input,
10, # Attack
5, # Defense
5, # Speed
100, # HP
100, # Max HP
True, # Is Human Entity bool
False, # Did he win First Strike bool
False, # Is Entity dead bool
False) # Has Turn Ended bool
print("Step forth, ", player1.name, ".")
print(player1.name, " steps forward onto the sands of blood to face his opponent, ", npc1.name,"...")
#---------------------------------------------------------------#
# Actions
#---------------------------------------------------------------#
def skill_attack(Attacker, Defender):
rand = random.randint(1, 5)
Defender.hp -= Attacker.attack + rand - Defender.defense
print(Attacker.name, "attacks ", Defender.name, " for ", Attacker.attack + rand - Defender.defense, "points of damage!")
# Could be a separate function
# def is_dead()
if(Defender.hp <= 0):
print(Attacker.name," has slain ",Defender.name)
else:
print(Defender.name, "'s HP is at ", Defender.hp)
def skill_defend(defending):
defending.defense + 3
global COUNT
counter = COUNT
print(defending.name," raises his shield to defend against the oncoming blow.\nDefense increased by 3 for 1 round.")
if(COUNT == counter + 2):
return speedup.defense - 3
def skill_psyche(speedup):
speedup.speed + 4
global COUNT
counter = COUNT
if(COUNT == counter + 4):
return speedup.speed - 4
#---------------------------------------------------------------#
# Combat
#---------------------------------------------------------------#
def turn_counter():
global COUNT
COUNT = COUNT + 1
print("Current combat turn: ", COUNT)
def get_player_combat_choice(Attacker, Defender):
user_input = input("""
(A)ttack
(D)efend
(P)syche Up
""")
if(user_input == "A"):
skill_attack(Attacker, Defender)
Attacker.turn_ended = True
elif(user_input == "D"):
skill_defend(Attacker)
Attacker.turn_ended = True
elif(user_input == "P"):
skill_psyche(Attacker)
Attacker.turn_ended = True
else:
print("Please choose correctly.")
speed_check(Attacker, Defender)
def npc_combat_choice(Attacker, Defender):
rand = random.randint(1, 3)
if rand == 1 :
skill_attack(Defender, Attacker)
Defender.turn_ended = True
elif rand == 2 :
skill_defend(Defender)
Defender.turn_ended = True
else:
print(Defender.name," psyches himself up.")
Defender.turn_ended = True
def speed_check(Attacker, Defender):
if(Attacker.speed >= Defender.speed and Attacker.turn_ended == False):
get_player_combat_choice()
elif(Attacker.speed <= Defender.speed and Attacker.turn_ended == False):
get_p
def is_dead(entity):
if(entity.hp <= 0):
entity.is_dead = True
print("")
#---------------------------------------------------------------#
# Game Loop
#---------------------------------------------------------------#
game_intro()
while(player1.hp > 1 and npc1.hp > 1):
turn_counter()
speed_check(player1, npc1)
| true
|
ce42c965200ba298556e2dfba4b1a99077cc6059
|
Python
|
vkumar19/kaggle
|
/bike_rentals/bike_rental_rforest_2.py
|
UTF-8
| 4,507
| 2.875
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
def weekend(x):
if x.weekday() == 5 or x.weekday() == 6:
return 1
else:
return 0
def year_2011(x):
if x.year == 2011:
return 1
else:
return 0
rental_df = pd.read_csv("train.csv", parse_dates = True)
test_df = pd.read_csv("test.csv", parse_dates = True)
rental_df['datetime'] = pd.to_datetime(rental_df['datetime'])
test_df['datetime'] = pd.to_datetime(test_df['datetime'])
rental_df['date'] = rental_df.datetime.apply(lambda x: x.date())
test_df['date'] = test_df.datetime.apply(lambda x: x.date())
grp_date = rental_df.groupby('date')
grp_test_date = test_df.groupby('date')
def avg_temp(x):
return grp_date.get_group(x)['temp'].mean()
def test_avg_temp(x):
return grp_test_date.get_group(x)['temp'].mean()
def avg_windspeed(x):
return grp_date.get_group(x)['windspeed'].mean()
def test_avg_windspeed(x):
return grp_test_date.get_group(x)['windspeed'].mean()
def avg_humidity(x):
return grp_date.get_group(x)['humidity'].mean()
def test_avg_humidity(x):
return grp_test_date.get_group(x)['humidity'].mean()
rental_df['hour'] = rental_df.datetime.apply(lambda x: x.hour)
test_df['hour'] = test_df.datetime.apply(lambda x: x.hour)
rental_df['avg_temp'] = rental_df['date'].apply(lambda x: avg_temp(x))
test_df['avg_temp'] = test_df['date'].apply(lambda x: test_avg_temp(x))
rental_df['avg_wspeed'] = rental_df['date'].apply(lambda x: avg_windspeed(x))
test_df['avg_wspeed'] = test_df['date'].apply(lambda x: test_avg_windspeed(x))
rental_df['avg_humidity'] = rental_df['date'].apply(lambda x: avg_humidity(x))
test_df['avg_humidity'] = test_df['date'].apply(lambda x: test_avg_humidity(x))
rental_df['2011'] = rental_df.datetime.apply(lambda x: year_2011(x))
test_df['2011'] = test_df.datetime.apply(lambda x: year_2011(x))
df_grouped = rental_df.groupby(['hour'])
df_grpd_test = test_df.groupby(['hour'])
hr_cnt_holiday = [0 for i in range(0,24)]
hr_cnt_workingday = [0 for i in range(0,24)]
for name, group in df_grouped:
norm_hr_holiday = 0
norm_hr_workingday = 0
for i in range(0,len(group.workingday)):
if group.workingday.values[i] == 0:
norm_hr_holiday += 1
hr_cnt_holiday[name] += group['count'].values[i]
else:
norm_hr_workingday += 1
hr_cnt_workingday[name] += group['count'].values[i]
hr_cnt_holiday[name] /= norm_hr_holiday
hr_cnt_workingday[name] /= norm_hr_workingday
rental_df['count_avg_1'] = rental_df[rental_df['workingday'] == 0]['hour'].apply(lambda x: hr_cnt_holiday[x])
rental_df['count_avg_2'] = rental_df[rental_df['workingday'] == 1]['hour'].apply(lambda x: hr_cnt_workingday[x])
rental_df['count_avg'] = rental_df['count_avg_1'].fillna(0) + rental_df['count_avg_2'].fillna(0)
test_df['count_avg_1'] = test_df[test_df['workingday'] == 0]['hour'].apply(lambda x:
hr_cnt_holiday[x])
test_df['count_avg_2'] = test_df[test_df['workingday'] == 1]['hour'].apply(lambda x: hr_cnt_workingday[x])
test_df['count_avg'] = test_df['count_avg_1'].fillna(0) + test_df['count_avg_2'].fillna(0)
ml_df = rental_df.drop(['datetime', 'casual', 'count', 'registered', 'date', 'holiday', 'count_avg_1', 'count_avg_2'], axis = 1)
ml_df_test = test_df.drop(['datetime', 'date', 'holiday','count_avg_1', 'count_avg_2'], axis = 1)
print ml_df.info()
print ml_df_test.info()
ml_df = np.array(ml_df)
ml_df_test = np.array(ml_df_test)
#enc = preprocessing.OneHotEncoder(categorical_features = [0, 3, 8, 9])
#enc.fit(ml_df)
#temp = enc.transform(ml_df).toarray()
Y = np.array(rental_df['count'])
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=500, oob_score=True)
rf.fit(ml_df, Y)
temp = np.log(rf.predict(ml_df)+1) - np.log(Y+1)
temp = temp*temp
RMSLE = np.mean(temp)
RMSLE = RMSLE**0.5
count = rf.predict(ml_df_test)
df_submission = pd.DataFrame(count, test_df.datetime, columns = ['count'])
pd.DataFrame.to_csv(df_submission ,'randomforest_predict.csv')
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(rf.predict(ml_df))
plt.plot(Y)
plt.show()
from sklearn import cross_validation
from sklearn.metrics import make_scorer
def my_custom_loss_func(ground_truth, predictions):
diff = np.log(ground_truth+1) - np.log(predictions+1)
diff = diff*diff
diff = np.mean(diff)
return diff
my_custom_scorer = make_scorer(my_custom_loss_func, greater_is_better=False)
scores = cross_validation.cross_val_score(rf, ml_df, Y, cv=5, scoring=my_custom_scorer)
| true
|
ba30cdb6983c83f3c4724079ffd66119dc49a6b4
|
Python
|
WHJR-G8/G8-C8_SA_1.1_Pair_Template
|
/Template_Pair.py
|
UTF-8
| 950
| 4.1875
| 4
|
[] |
no_license
|
import turtle
def position(x,y,c):
turtle.penup()
turtle.goto(x,y)
turtle.pendown()
turtle.fillcolor(c)
turtle.begin_fill()
def draw_rect(s1,s2):
for i in range(2):
turtle.forward(s1)
turtle.right(90)
turtle.forward(s2)
turtle.right(90)
turtle.end_fill()
#Student 1
#Call the function "position" and pass the parameters as 10, -10, "red"
for i in range(3):
turtle.forward(100)
turtle.left(120)
turtle.end_fill()
#Call the function "position" and pass the parameters as 85, -40, "blue"
#Call the function "draw_rect" and pass the parameters as 40, 150
#Student 2
#Call the function "position" and pass the parameters as 10, -10, "green"
#Call the function "draw_rect" and pass the parameters as 100, 150
#Call the function "position" and pass the parameters as -5, -40, "blue"
#Call the function "draw_rect" and pass the parameters as 40, 150
turtle.ht()
| true
|
5cfaceb7659503dd6ad2b746df883e4716634d0b
|
Python
|
ldhonorato/PPGEC_PDI_2019.2
|
/Lista06/imagem01.py
|
UTF-8
| 1,278
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 09:54:34 2019
@author: timevisao-mk2
"""
import morfologia
import numpy as np
#mport PIL.Image as pil
import cv2
path_original = ["images/image_1a.png", "images/image_1b.png", "images/image_1c.png",
"images/image_1d.png", "images/image_1e.png", "images/image_1f.png"]
img_originais = []
for path in path_original:
img_originais.append(cv2.imread(path, cv2.IMREAD_GRAYSCALE))
#image_1a = 0
#image_1b = 1
#image_1c = 2
#image_1d = 3
#image_1e = 4
#image_1f = 5
res_a_and_b = morfologia.opera(img_originais[0], img_originais[1], morfologia.AND, "res1_a_and_b.png")
res_d_and_e = morfologia.opera(img_originais[3], img_originais[4], morfologia.AND, "res2_d_and_e.png")
not_c = morfologia.opera(img_originais[2], img_originais[2], morfologia.NAND, "res3_not_c.png")
res_not_c_and_res1 = morfologia.opera(res_a_and_b, not_c, morfologia.AND, "res4_not_c_and_res1.png")
res_res2_xor_f = morfologia.opera(res_d_and_e, img_originais[5], morfologia.XOR, "res5_res2_xor_f.png")
not_res4 = morfologia.opera(res_not_c_and_res1, res_not_c_and_res1, morfologia.NAND, "res6_not_res4.png")
final = morfologia.opera(not_res4, res_res2_xor_f, morfologia.NAND, "res7_not_res4_or_res5.png")
| true
|
57b0a47a737e25dcb1d3604b8fea448307a66669
|
Python
|
kukusiik/tour_seller
|
/places/models.py
|
UTF-8
| 848
| 2.796875
| 3
|
[] |
no_license
|
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
class Country(models.Model):
name = models.CharField(max_length=50)
class Meta:
verbose_name_plural = 'countries'
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country)
class Meta:
verbose_name_plural = 'cities'
def __str__(self):
return u'{0},{1}'.format(self.country, self.name)
class Hotel(models.Model):
name = models.CharField(max_length=50)
city = models.ForeignKey(City)
stars = models.SmallIntegerField(
validators=(MinValueValidator(1), MaxValueValidator(5))
)
def __str__(self):
return u'{},{},({} stars)'.format(self.city, self.name, self.stars)
| true
|
1dae3865fa37f3ffce3e106ea3fde6b577cf970e
|
Python
|
OneCircle1/MengProgram2
|
/LPA/nwxlpa.py
|
UTF-8
| 728
| 2.796875
| 3
|
[] |
no_license
|
import networkx as nx
import csv
import matplotlib.pyplot as plt
from networkx.algorithms import community
import random
G = nx.Graph()
with open('/Users/cxy/Downloads/tgh/MengProgram2/pagerank/20180321.csv', 'r') as input_file:
lines = csv.reader(input_file)
for line in lines:
G.add_edge(int(line[0]), int(line[1]))
def label_propagation_community(G):
communities_generator = list(community.label_propagation_communities(G))
m = []
for i in communities_generator:
m.append(list(i))
return m
g=label_propagation_community(G)
for i in range(len(g)):
c = lambda: random.randint(0,255)
nx.draw(G, nodelist=g[i],node_color='#%02X%02X%02X'%(c(),c(),c()), node_size=1)
| true
|
f959f6ac061054b1c95d88f9f1ea1c9ee840f0bf
|
Python
|
MachLearn-PlayGround/ml-scikit
|
/basics.py
|
UTF-8
| 1,618
| 3.875
| 4
|
[] |
no_license
|
"""
This file is a custom implementation of the Introduction to Machine Learning
using Scikit-Learn
Scikit data:
classification:
iris
digits
regression:
boston house dataset
"""
from sklearn import datasets, svm
def load_data():
"""Use iris and digits standard datasets provided by scikit"""
iris = datasets.load_iris()
digits = datasets.load_digits()
return iris, digits
def predict_last(loaded_digits, last_predictions):
"""
Learn from the training data except the last the use the last to test
the prediction
Using the digits datasets, predict given an image,
which digit it represents
Image is a 8 * 8 matrix
sample results 0 - 9 == digits.target
Use an estimator [uses fix(x, y) and predict(T)] to classify unseen samples
:return: predicted value
"""
# SVC implements Support Vector Classification
# gamma and C are parameters of the model and in this case manually set
classifier = svm.SVC(gamma=0.001, C=100.) # The estimator
# Next, this estimator should be fitted/learn to/from the model
# Let's use the whole dataset as a training data except the last which
# we'll predict
classifier.fit(loaded_digits.data[:-1], loaded_digits.target[:-1])
# predict target of last data
predicted_values = classifier.predict(loaded_digits.data[-last_predictions:])
actual_values = loaded_digits.target[-last_predictions:]
return predicted_values, actual_values
if __name__ == "__main__":
iris, digits = load_data()
predicted, actual = predict_last(digits, 10)
print(predicted, actual)
| true
|
b6e16861608e4aae255a10bde7cde28d511ca091
|
Python
|
cash2one/xai
|
/xai/brain/wordbase/nouns/_roller.py
|
UTF-8
| 572
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#calss header
class _ROLLER():
def __init__(self,):
self.name = "ROLLER"
self.definitions = [u'a tube-shaped object in a machine that turns over and over in order to carry things along or press them down or together: ', u'a tube-shaped device, often heated, that women use to curl their hair: ', u'a heavy machine used to make surfaces smooth and flat: ', u'a large, long wave on the sea']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| true
|
dab729c58960a61926b9a64c4122057ca1b899e9
|
Python
|
liwenok/ipaynow_pay_python
|
/ipaynowPythonSdk/ipaynow/md5Faced.py
|
UTF-8
| 774
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- vim:fenc=utf-8:ft=python:et:sw=4:ts=4:sts=4
from ipaynowPythonSdk.ipaynow import utils
__all__ = ['md5calc']
try:
import hashlib
except ImportError:
hashlib = None
if not hashlib:
raise ImportError(
"ipaynow sdk can't import hashlib.please check if it "
)
def md5calc(md5source):
try:
m = hashlib.md5()
m.update(md5source)
return m.hexdigest()
except Exception as e:
print(e.args)
return ""
if __name__ == "__main__":
strout = "MD5 test string :"
strtest = ""
strout += strtest
strmd5 = md5calc(strtest.encode('utf-8'))
strout += "\nMD5 result string :" + strmd5
print(strout)
| true
|
f0485f73589376b7c36d5fbced0880368205d8ea
|
Python
|
storytimeworks/backend
|
/tests/users/test_update_user.py
|
UTF-8
| 5,070
| 2.625
| 3
|
[] |
no_license
|
from app import configure_test_client
from flask import Flask, session
import json, pytest, os, uuid
@pytest.fixture
def app():
application = Flask(__name__)
return configure_test_client(application)
def test_set_settings(app):
# Be a normal user for this test
with app.session_transaction() as session:
session["user_id"] = 2
# Generate random "names" to test with
first_name = str(uuid.uuid4())[0:8]
last_name = str(uuid.uuid4())[0:8]
data = {
"section": "profile",
"data": {
"first_name": first_name,
"last_name": last_name,
"username": "hello",
"email": "test@test.com"
}
}
# Set the first and last names in settings
res = app.put("/users/2", data=json.dumps(data), content_type="application/json")
assert res.status_code == 200
data = json.loads(res.data)
# Ensure that the names were set correctly
assert data["settings"]["profile"]["first_name"] == first_name
assert data["settings"]["profile"]["last_name"] == last_name
def test_missing_parameters(app):
# Be a normal user for this test
with app.session_transaction() as session:
session["user_id"] = 2
# Try to set settings without passing data
res = app.put("/users/2")
assert res.status_code == 400
data = json.loads(res.data)
# Ensure the error is correct
assert data["code"] == 1114
def test_not_authenticated(app):
# Create generic profile data to test with
data = {
"section": "profile",
"data": {
"first_name": "John",
"last_name": "Smith"
}
}
# Try to set the first and last names in settings
res = app.put("/users/2", data=json.dumps(data), content_type="application/json")
assert res.status_code == 401
data = json.loads(res.data)
# Ensure the error is correct
assert data["code"] == 1000
def test_nonexistant_user(app):
# Be an admin, who can change a different user's settings
with app.session_transaction() as session:
session["user_id"] = 1
# Create generic profile data to test with
data = {
"section": "profile",
"data": {
"first_name": "John",
"last_name": "Smith"
}
}
# Try to set the first and last names in settings
res = app.put("/users/12094", data=json.dumps(data), content_type="application/json")
assert res.status_code == 404
data = json.loads(res.data)
# Ensure the error is correct
assert data["code"] == 1103
def test_not_authorized(app):
# Be a normal user for this test
with app.session_transaction() as session:
session["user_id"] = 2
# Create generic profile data to test with
data = {
"section": "profile",
"data": {
"first_name": "John",
"last_name": "Smith"
}
}
# Try to set the first and last names for a different user in settings
res = app.put("/users/1", data=json.dumps(data), content_type="application/json")
assert res.status_code == 403
data = json.loads(res.data)
# Ensure the error is correct
assert data["code"] == 1115
def test_invalid_setting(app):
# Be a normal user for this test
with app.session_transaction() as session:
session["user_id"] = 2
# Create data for a nonexistant section
data = {
"section": "something",
"data": {
"key": "value"
}
}
# Try to set data for an invalid section
res = app.put("/users/2", data=json.dumps(data), content_type="application/json")
assert res.status_code == 400
data = json.loads(res.data)
# Ensure the error is correct
assert data["code"] == 1116
def test_invalid_username(app):
# Be a normal user for this test
with app.session_transaction() as session:
session["user_id"] = 2
# Create profile data with an invalid username
data = {
"section": "profile",
"data": {
"first_name": "John",
"last_name": "Smith",
"username": "-asdf"
}
}
# Try setting the invalid username in settings
res = app.put("/users/2", data=json.dumps(data), content_type="application/json")
assert res.status_code == 400
data = json.loads(res.data)
# Ensure the error is correct
assert data["code"] == 1107
def test_invalid_email(app):
# Be a normal user for this test
with app.session_transaction() as session:
session["user_id"] = 2
# Create profile data with an invalid email address
data = {
"section": "profile",
"data": {
"first_name": "John",
"last_name": "Smith",
"email": "asdf"
}
}
# Try setting the invalid email address in settings
res = app.put("/users/2", data=json.dumps(data), content_type="application/json")
assert res.status_code == 400
data = json.loads(res.data)
# Ensure the error is correct
assert data["code"] == 1112
| true
|
77a5c872c62edea7877a36f330b41c3d3a5ad18e
|
Python
|
RedHenDev/python
|
/ursina dev/python minecraft/meshcraft current/building_system_PREP.py
|
UTF-8
| 1,407
| 2.734375
| 3
|
[] |
no_license
|
"""
Our building system :) 2022
Happy New Year!
"""
from ursina import Vec3, floor
# *** Place this here, to avoid lazy import.
from config import six_cube_dirs
def checkBuild(_bsite,_td,_camF,_pos,_bp):
# def checkBuild(_td,_bp):
# Adjust build site, since build-tool-entity (bte) offset.
# _bsite += Vec3(0,-0.5,0)
# Store in convenient variables and floor.
# Also -- increment y by 1 - since building above!
# ***
# _bsite = _bp
# x = int(_bsite.x)
# y = int(_bsite.y)
# z = int(_bsite.z)
dist = _bsite - _pos
mouseInWorld = _pos + _camF * dist.length()
# ***
mouseInWorld -= _camF * 0.75
x = round(mouseInWorld.x)
y = floor(mouseInWorld.y)
z = round(mouseInWorld.z)
# Oh, but what if we're trying to build inside bte?
# Build 1 above current block!
if _bsite == Vec3(x,y,z):
y+=1
# ***
# _bp.position=Vec3(x,y,z)
# Make sure no block here already...
# ***
if _td.get((x,y,z))!= None and _td.get((x,y,z))[0]!='g':
print("Can't build here, sorry :(")
return None
# If we're here, we can build. Yessssss.
return Vec3(x,y,z)
def gapShell(_td,_bsite):
# from config import six_cube_dirs
for i in range(6):
p = _bsite + six_cube_dirs[i]
if _td.get((floor(p.x),floor(p.y),floor(p.z))) is None:
_td[(floor(p.x),floor(p.y),floor(p.z))]=['g',0]
| true
|
ba94129cdc544cb9970245da5f7414fe81b495ba
|
Python
|
ISPritchin/Olympiad
|
/800/Оформлено/Место в таблице.py
|
UTF-8
| 200
| 2.96875
| 3
|
[] |
no_license
|
n = int(input())
ss = []
for _ in range(n):
s = sum(map(int, input().split()))
ss.append(s)
if _ == 0:
t = s
r = 1
for i in range(1, n):
if ss[i] > t:
r += 1
print(r)
| true
|
e9e785c1e0404420aa8117faea8ef5f425fb70dc
|
Python
|
Damage-group/Max-Damage
|
/py/prob2.py
|
UTF-8
| 7,901
| 2.859375
| 3
|
[] |
no_license
|
import sys
from input import *
from algorithms import *
from measures import *
import numpy
import sequences
def print_help(argv):
print '''
TROLOLOGGER X9000 - HYPER EDITION v3.2
Usage: %s [args]
Example: %s data=./data.txt meta=./meta.txt t=0.2 c=0.3
You can either edit settings.py to handle algorithms variables or use
command line arguments (key=value). Command line arguments override
attribute values defined in settings.py in runtime.
General command line arguments are:
data : data filepath. One row per transaction, items separated by whitespaces.
meta : meta filepath. One row per meta_information. Informations are
grouped together by id-column.
strip : Only take account transactions that have more items than this.
This transaction stripping is done after any meta file spesific
restrictions take place.
t : Minimum support for itemsets considered frequent. ie. \'0.2\'
c : Minimum confidence of accepted rules. ie. \'0.1\'
closed : Use closed itemsets. True/False
max : Use maximal itemsets. True/False
lift : use lift(interest factor) to qualify the rule
IS : use IS measure to qualify the rule
MI : output the mutual information of the rule
CF : output the certainty factor of the rule
Meta file spesific arguments:
\'column=restriction\'
column : Column name as spesified in settings.py META_COLUMN_NAMES.
restriction : Restriction applied to this column. Only variables that
pass all the restrictions are accepted as transaction items.
At the moment there are two types of restrictions:
exact : Take only those variables which have exactly this value in them.
ie. id=100.
interval : Take values which have values in the given interval.
Interval is parsed by splitting the value from '-'. For
example year=1999-2005 would yield closed interval [1999, 2005].
''' % (argv[0], argv[0])
def validate_argv(argv):
''' Validate that command line arguments are in right format. '''
for x in range(1, len(argv)):
arg = argv[x].split("=", 2)
if arg[0] == 'help':
print_help(argv)
sys.exit(0)
if len(arg) !=2:
print "Argument \'%s\' in wrong format. Use \'key=value\'." % (arg[0])
print "For more instructions try \'%s help\'" % (argv[0])
sys.exit(0)
def main(argv):
validate_argv(argv)
# Read command line arguments to settings.
read_argv(argv)
print "\n"
# Read and transform data
all_meta = read_meta_file()
print "\n"
pruned_meta = prune_variables(all_meta)
print "%d variables after applying restrictions." % (len(pruned_meta))
#testInterestingSequences()
seqs = transactionsFromFile(settings.DATA_FILE)
fids = [var.fid for var in pruned_meta]
matrix = to01Matrix(fids, transactionsFromFile(settings.DATA_FILE))
stripped_rows = []
for index, row in enumerate(matrix):
if row.sum(0) > settings.STRIP:
stripped_rows.append(index)
matrix = matrix[stripped_rows,:]
sum = numpy.sum(numpy.sum(matrix, 0), 0)
print "%d transactions after removing transactions with %d <= items" % (matrix.shape[0], settings.STRIP)
print "Mean of items in transcations: %f " % (float(sum) / float(matrix.shape[0]))
# Calculate frequent itemsets and generate rules.
frequent_itemsets = ap_frequent_itemsets(matrix, settings.FREQUENT_ITEMSET_THRESHOLD)
res = []
if settings.CLOSED_ITEMSETS:
frequent_itemsets = ap_closed_frequent_itemsets(frequent_itemsets)
print "\n *** Closed frequent item sets with minimum support %f **** \n" % (settings.FREQUENT_ITEMSET_THRESHOLD)
elif settings.MAXIMAL_ITEMSETS:
frequent_itemsets = ap_max_frequent_itemsets(frequent_itemsets)
print "\n *** Maximal frequent item sets with minimum support %f **** \n" % (settings.FREQUENT_ITEMSET_THRESHOLD)
else:
print "\n *** Frequent item sets with minimum support %f **** \n" % (settings.FREQUENT_ITEMSET_THRESHOLD)
for k in frequent_itemsets:
for S in frequent_itemsets[k]:
res.append( (S, frequent_itemsets[k][S].frequency) )
res.sort(cmp=lambda a,b: -1 if a[1] < b[1] else 1 if a[1] > b[1] else 0)
for S,f in res:
print "%s (%f)" % (' '.join(["%s:%s" % (pruned_meta[j].fid, pruned_meta[j].name) for j in S]), f)
#rule generation fails for closed/maximal sets due how frequency computation works.
#should we generate rules for all frequent item sets only anyway?
if not(settings.CLOSED_ITEMSETS or settings.MAXIMAL_ITEMSETS):
print "\n *** Rules with minimum confidence %f **** \n" % (settings.RULE_MIN_CONFIDENCE)
for k in xrange(2, len(frequent_itemsets)):
rules = ap_rule_generation(frequent_itemsets, k, settings.RULE_MIN_CONFIDENCE)
for rule in rules:
print "%s --> %s %f" % (" ".join([pruned_meta[i].name for i in rule[0]]), " ".join([pruned_meta[i].name for i in rule[1]]), rule[2])
if settings.Lift:
Lift_value = interest_factor(frequent_itemsets,rules)
Lift_value.sort(cmp=lambda a,b: -1 if a[2] < b[2] else 1 if a[2] > b[2] else 0)
print("******Interest Factor Measure result:******")
for item in Lift_value:
print "%s --> %s %f" % (" ".join([pruned_meta[i].name for i in item[0]]), " ".join([pruned_meta[i].name for i in item[1]]), item[2])
elif settings.IS:
IS_value = IS(frequent_itemsets,rules)
IS_value.sort(cmp=lambda a,b: -1 if a[2] < b[2] else 1 if a[2] > b[2] else 0)
print("******IS measure result:******")
for item in IS_value:
print "%s --> %s %f" % (" ".join([pruned_meta[i].name for i in item[0]]), " ".join([pruned_meta[i].name for i in item[1]]), item[2])
elif settings.MutualInfo:
MI_value = mutual_information(frequent_itemsets,rules)
MI_value.sort(cmp=lambda a,b: -1 if a[2] < b[2] else 1 if a[2] > b[2] else 0)
print("******Mutual Information Measure result:******")
for item in MI_value:
print "%s --> %s %f" % (" ".join([pruned_meta[i].name for i in item[0]]), " ".join([pruned_meta[i].name for i in item[1]]), item[2])
elif settings.CertaintyFactor:
CF_value = mutual_information(frequent_itemsets,rules)
CF_value.sort(cmp=lambda a,b: -1 if a[2] < b[2] else 1 if a[2] > b[2] else 0)
print("******Certainty Factor Measure result:******")
for item in CF_value:
print "%s --> %s %f" % (" ".join([pruned_meta[i].name for i in item[0]]), " ".join([pruned_meta[i].name for i in item[1]]), item[2])
def testInterestingSequences(seqs):
#interesting sequences
#s9 = [(103,), (220,), (222,)]
s9 = [(103,220), (222,)]
#s10 = [(103,), (227,), (222,)]
s10 = [(103,227), (222,)]
#s11 = [(104,), (220,)]
s11 = [(104,220)]
i, j, k = 0,0,0
s9list = []
s10list = []
s11list = []
for s in seqs:
if sequences.is_subsequence(s9, s):
i = i+ 1
s9list.append(s)
if sequences.is_subsequence(s10, s):
j = j + 1
s10list.append(s)
if sequences.is_subsequence(s11, s):
k = k + 1
s10list.append(s)
print "%d %d %d" % (i, j, k)
print s9list
print s10list
print s11list
if __name__ == '__main__':
main(sys.argv)
| true
|
4a81bbaf4124bc2aa83608251ae36bf8a886800d
|
Python
|
amilkh/haircolorGAN
|
/data/hair_testmode_dataset.py
|
UTF-8
| 2,515
| 2.921875
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
import os.path
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
import torch
class HairTestModeDataset(BaseDataset):
"""A dataset class for dataset of pairs {portrait image, target hair color}.
It assumes that the directory '/path/to/data/test' contains image pairs in the form of {portrait image,color}.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_images_hair = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.image_hair_paths = sorted(make_dataset(self.dir_images_hair, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.transform = get_transform(opt)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an input image
B (tensor) - - a dummy tensor that is not used for test mode
orig_color_A_img (tensor) - - a dummy tensor that is not used for test mode
orig_color_B_img (tensor) - - a dummy tensor that is not used for test mode
target_hair_color_img (tensor) - - the target hair color for the input image
path (str) - - image path
"""
# read a image given a random integer index
path = self.image_hair_paths[index]
img_and_hair = Image.open(path).convert('RGB')
# split img_and_hair image into two images (one of them of the target hair color)
w, h = img_and_hair.size
w2 = int(w / 2)
img = img_and_hair.crop((0, 0, w2, h))
hair = img_and_hair.crop((w2, 0, w, h))
A = self.transform(img)
target_hair_color_img = self.transform(hair)
dummy = torch.zeros(1,1,1,1)
return {'A': A, 'B': dummy, 'orig_color_A_img': dummy, 'orig_color_B_img': dummy, 'target_hair_color_img':target_hair_color_img, 'path': path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.image_hair_paths)
| true
|
8d2ae4ab4efed2c72672327390fe912c63efae41
|
Python
|
irish3725/Korby
|
/ui.py
|
UTF-8
| 757
| 3.0625
| 3
|
[] |
no_license
|
import os
import final_game as game
class ui:
## @param cal - calendar object that will be interated with
def __init__(self, player):
self.p = player
## main ui loop
def run(self):
# value to read input into
val = ''
while val != 'q' and val != 'quit' and val != 'exit':
# output = self.p.location.getDirections() + '(w/a/s/d/f/r/q) > '
output = '(w/a/s/d/f/r/q) > '
val = input(output).lower()
# adding an event
if val == 'w' or val == 'a' or val == 's' or val == 'd' or val == 'f' or val == 'r':
os.system('cls' if os.name == 'nt' else 'clear')
print('\n\n' + self.p.action(val))
val = ''
| true
|
0087d82819e5502e0d1a966508eededa72650752
|
Python
|
jingong171/jingong-homework
|
/马一舒/第一次作业/第一次作业 金工17-1 2017310417 马一舒/第一题.py
|
UTF-8
| 376
| 2.78125
| 3
|
[] |
no_license
|
Python 3.5.3 (v3.5.3:1880cb95a742, Jan 16 2017, 15:51:26) [MSC v.1900 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> w=52
>>> h=1.68
>>> t=w/h**2
>>> t
18.42403628117914
>>> if t<18:
print ("低体重.")
elif t<=25:
print ("正常体重.")
elif t<=27:
print("超重体重.")
elif t>27:
print("肥胖.")
正常体重.
>>>
| true
|
59debff4975793203d908fe982e84f57856143a0
|
Python
|
xiaoch2004/mandarin_tone_tracker
|
/plotresults.py
|
UTF-8
| 4,276
| 2.78125
| 3
|
[] |
no_license
|
import os
import matplotlib.pyplot as plt
import numpy as np
import crepe
from scipy.io.wavfile import read
import base64
INT16_FAC = (2**15)-1
INT32_FAC = (2**31)-1
INT64_FAC = (2**63)-1
norm_fact = {'int16': INT16_FAC, 'int32': INT32_FAC,
'int64': INT64_FAC, 'float32': 1.0, 'float64': 1.0}
def wavread(filename):
"""
Read a sound file and convert it to a normalized floating point array
filename: name of file to read
returns fs: sampling rate of file, x: floating point array
"""
if (os.path.isfile(filename) == False):
# raise error if wrong input file
raise ValueError("Input file is wrong")
fs, x = read(filename)
if (len(x.shape) != 1):
# raise error if more than one channel
raise ValueError("Audio file should be mono")
# convert audio into floating point number in range of -1 to 1
x = np.float32(x)/norm_fact[x.dtype.name]
return fs, x
def processTable(table):
# Abandon the low-confidence parts
confidenceLevel = 0.30
target_mean = 300
table[np.where(table[:, 2] < confidenceLevel), 1] = np.nan
# Abandon the low-frequency parts
table[np.where(table[:, 1] < 100.0), 1] = np.nan
table[np.where(table[:, 1] > 700.0), 1] = np.nan
# Adjust the mean
mean = np.nanmean(table[:, 1], axis=0)
std = np.nanstd(table[:, 1], axis=0)
diff = target_mean - mean
table[:, 1] = (table[:, 1] + diff)
return table
def getFrameEnergy(time, x, sr):
timestamps = time*sr
timestamps = timestamps.astype(int)
timestamps = np.append(timestamps, x.size-1)
energy = [np.mean(abs(x[timestamps[i]:timestamps[i+1]])**2)
for i in range(timestamps.size-1)]
print(energy)
return np.array(energy)
def preProcess(x, sr, time, freq, confi):
if (time.size != freq.size) or (time.size != confi.size):
raise ValueError(
'time, frequency and confidence array sizes does not match')
confidenceThreshold = 0.30
target_mean = 300
energyThreshold = 5e-4
# Abandon low energy values
E = getFrameEnergy(time, x, sr)
freq[np.where(E < energyThreshold)] = np.nan
# Abandon low confidence values
freq[np.where(confi < confidenceThreshold)] = np.nan
# Abandon low frequency values
freq[np.where(freq < 100.0)] = np.nan
freq[np.where(freq > 700.0)] = np.nan
# Adjust the mean
mean = np.nanmean(freq)
diff = 300 - mean
freq = freq + diff
return (time, freq)
def plotFourFigures(paths):
plotnum = 0
plt.figure()
for file in paths:
print(file)
table = np.genfromtxt(file, delimiter=',')
table = table[1:]
time = table[:, 0]
table = processTable(table)
# if plotnum <= 4:
# print(table)
# print("mean:", np.nanmean(table[:, 1]))
plt.subplot(2, 2, plotnum+1)
plt.plot(time, table[:, 1])
plt.title(file[8:-7])
plt.xlim(time[0], time[-1])
plt.ylim(100, 600)
plotnum += 1
plt.show()
def wavPitchCrepeAnal(filepath):
sr, x = wavread(filepath)
time, frequency, confidence, activation = crepe.predict(
x, sr)
return (time, frequency, confidence, x, sr)
def genFig(filepath):
time, frequency, confidence, x, sr = wavPitchCrepeAnal(filepath)
filename = os.path.basename(filepath)
dirname = os.path.dirname(filepath)
time, frequency = preProcess(x, sr, time, frequency, confidence)
output = np.column_stack((time, frequency))
# np.savetxt('result/' + filename[:-3] + 'csv', output, delimiter=',')
plt.plot(time, frequency)
plt.xlim(time[0], time[-1])
plt.ylim(100, 500)
plt.axis('off')
plt.savefig('docs/resource/figure/'+filename[:-3] + 'png')
plt.close()
if __name__ == "__main__":
test = False
# dirname = 'docs/resource/audio/'
dirname = 'audio-archive/chinesewoman1/'
files = os.listdir(dirname)
paths = [dirname+file for file in files]
if test:
dirname = 'test/'
files = os.listdir(dirname)
paths = [dirname+file for file in files]
plotFourFigures(paths[0:4])
else:
filepath = paths[1]
print(filepath)
for path in paths:
genFig(path)
| true
|
d2f40281ad2e5d3dbcf60e2b656ac0a5ed19cf34
|
Python
|
MRannala/Tab-removal
|
/Remove_Tabs.py
|
UTF-8
| 1,162
| 3.5
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 20 11:32:57 2016
This script will allow input of a file. It then creates a secondfile where tabs have been replaced with single
spaces. The new filename will be the same as the initial but with a '+' appended.
@author: Magnus Rannala
"""
import os
import sys
import tkinter as tk
import tkFileDialog
# Open interactive window
filepath = tkFileDialog.askopenfilename()
filename = filepath.split('/')
filename = filename[-1]
# filename = "test1.txt"
# Test if file name exists and exit if not
if os.path.isfile(filename) == False:
print " "
print "No Such File in Directory! "
print " "
sys.exit()
else:
pass
data_array = []
# open file for read
ifile = open(filename, 'r')
for line in ifile:
data_array.append(line)
# close input file
ifile.close()
# Create new file name using + appended to input filename
ofilename = filename.rsplit(".")[0] + "+." + filename.rsplit(".")[1]
# open output file
ofile = open(ofilename, 'w')
for i in range(len(data_array)):
ofile.write(data_array[i].replace("\t", " "))
ofile.close()
| true
|
6e5192545016a992d2525f9baae53d513629524b
|
Python
|
tripathyas/Online-Hackathon
|
/form_group.py
|
UTF-8
| 499
| 3.34375
| 3
|
[] |
no_license
|
def solution(A):
# write your code in Python 2.7
group = []
temp = A[0]
for i in range(1, len(A)):
if A[i] > A[i - 1]:
group.append((A[i - 1], temp))
temp = A[i]
group.append((A[-1], temp))
group.sort()
print(group)
for j in range(1, len(group)):
item = group[j]
prev = group[j - 1]
if prev[1] > item[0]:
return 1
return len(group)
print(solution([1, 5, 4, 9, 8, 7, 14, 13, 15, 20, 25, 21]))
| true
|
50a253a3200dd5c16f0ad68df5784cc0bf180dac
|
Python
|
michaeltnguyen/Pokedex
|
/database/scripts/create_pokedex_sql.py
|
UTF-8
| 14,584
| 2.8125
| 3
|
[] |
no_license
|
import json
import csv
import sqlite3
import os
# output database
path = '../../app/src/main/assets/databases/pokedex.db'
def create_gens(cursor):
# create generations table
cursor.execute('create table if not exists gens('
'id integer primary key, '
'name text not null unique '
');')
# add a few entries
cursor.execute('insert or ignore into gens(name)'
'values ("xy"), ("oras");')
def create_abilities(cursor):
abilities = '../data/abilities.json'
# create abilities table
cursor.execute('create table if not exists abilities('
'id integer primary key, '
'name text not null unique, '
'description text not null, '
'gen_id integer not null, '
'foreign key(gen_id) references gens(id)'
');')
with open(abilities) as f:
data = json.load(f)
for p in data['result']:
name = p['name']
description = p['description']
gen = (p['gen'],)
cursor.execute('select id from gens where name=?', gen)
gen_id = cursor.fetchone()[0]
values = (name, description, gen_id)
cursor.execute('insert or ignore into abilities(name, description, gen_id)'
'values (?,?,?);', values)
def create_types(cursor):
types = '../data/types.csv'
cursor.execute('create table if not exists types('
'id integer primary key, '
'name text not null unique, '
'grad_start_color text not null, '
'grad_end_color text not null, '
'border_color text not null'
');')
with open(types) as f:
data = csv.reader(f)
next(data) #skip header row
for t in data:
name = t[0]
grad_start_color = t[1]
grad_end_color = t[2]
border_color = t[3]
values = (name, grad_start_color, grad_end_color, border_color)
cursor.execute('insert or ignore into types(name, grad_start_color,'
'grad_end_color, border_color) '
'values (?,?,?,?);', values)
def create_items(cursor):
machines = '../data/machines.csv'
cursor.execute('create table if not exists machines('
'id integer primary key, '
'name text not null, '
'move_id integer not null, '
'location text not null, '
'gen_id integer not null, '
'youtube_id text, '
'start_time integer, '
'end_time integer, '
'foreign key(gen_id) references gens(id), '
'foreign key(move_id) references moves(id)'
');')
with open(machines) as f:
data = csv.reader(f)
next(data) #skip header row
for m in data:
name = m[0]
move = m[1]
location = m[2]
gen = m[3]
youtube_id = m[4]
start_time = m[5]
end_time = m[6]
cursor.execute('select id from moves where name=?', (move,))
move_id = cursor.fetchone()[0]
cursor.execute('select id from gens where name=?', (gen,))
gen_id = cursor.fetchone()[0]
values = (name, move_id, location, gen_id, youtube_id, start_time, end_time)
cursor.execute('insert or ignore into machines('
'name, move_id, location, gen_id, youtube_id, start_time, end_time)'
'values (?,?,?,?,?,?,?);', values)
def create_moves(cursor):
moves = '../data/moves.csv'
cursor.execute('create table if not exists move_categories('
'id integer primary key, '
'name text not null unique);')
cursor.execute('insert or ignore into move_categories(name)'
'values ("Special"), ("Physical"), ("Non-Damaging");')
cursor.execute('create table if not exists moves('
'id integer primary key, '
'name text not null unique, '
'type_id integer not null, '
'accuracy integer not null, '
'power integer not null, '
'pp integer not null, '
'move_category_id integer not null, '
'description text not null, '
'foreign key(type_id) references types(id), '
'foreign key(move_category_id) references move_categories(id)'
');')
with open(moves) as f:
data = csv.reader(f)
#no header row
for m in data:
name = m[0]
move_type = m[1]
accuracy = m[2]
power = m[3]
pp = m[4]
move_category = m[5]
description = m[6]
cursor.execute('select id from types where name=?', (move_type,))
move_type_id = cursor.fetchone()[0]
cursor.execute('select id from move_categories where name=?', (move_category,))
move_category_id = cursor.fetchone()[0]
values = (name, move_type_id, accuracy, power, pp, move_category_id, description)
cursor.execute('insert or ignore into moves('
'name, type_id, accuracy, power, pp, move_category_id, description)'
'values (?,?,?,?,?,?,?);', values)
def create_pokemon(cursor):
pokemon = '../data/pokemon.json'
oras_megas = '../data/oras_megas.csv'
pokemon_level_moves = '../data/oras_level-up_moves.txt'
cursor.execute('create table if not exists pokemon('
'id integer primary key, '
'name text unique not null, '
'national_id integer unique not null, '
'hp integer not null, '
'atk integer not null, '
'def integer not null, '
'spatk integer not null, '
'spdef integer not null, '
'speed integer not null, '
'image_resource_name text not null'
');')
cursor.execute('create table if not exists pokemon_abilities('
'id integer primary key, '
'pokemon_id integer not null, '
'ability_id integer not null, '
'foreign key(pokemon_id) references pokemon(id), '
'foreign key(ability_id) references abilities(id)'
');')
cursor.execute('create table if not exists pokemon_types('
'id integer primary key, '
'pokemon_id integer not null, '
'type_id integer not null, '
'foreign key(pokemon_id) references pokemon(id), '
'foreign key(type_id) references types(id)'
');')
cursor.execute('create table if not exists pokemon_machine_moves('
'id integer primary key, '
'pokemon_id integer not null, '
'machine_id integer not null, '
'foreign key(pokemon_id) references pokemon(id), '
'foreign key(machine_id) references machines(id)'
');')
cursor.execute('create table if not exists pokemon_level_moves('
'id integer primary key, '
'pokemon_id integer not null, '
'move_id integer not null, '
'level integer not null, '
'foreign key(pokemon_id) references pokemon(id), '
'foreign key(move_id) references moves(id)'
');')
with open(pokemon) as f:
data = json.load(f)
for p in data:
name = p['name'].title()
hp = p['hp']
national_id = p['national_id']
patk = p['attack']
pdef = p['defense']
spatk = p['sp_atk']
spdef = p['sp_def']
spe = p['speed']
if "Farfetch'D" == name:
name = "Farfetch'd"
#Hard-code resources for Pumpkaboo and Gourgeist sizes.
if "Pumpkaboo" in name:
res = 'ic_pokemon_710'
elif "Gourgeist" in name:
res = 'ic_pokemon_711'
else:
res = 'ic_pokemon_' + str(national_id)
values = (name, national_id, hp, patk, pdef, spatk, spdef, spe, res)
sprite_file = os.path.join('../../app/src/main/res/drawable/', res + '.png')
if not os.path.isfile(sprite_file):
print('WARNING: sprite does not exist for', name, national_id)
cursor.execute('insert into pokemon('
'name, national_id, hp, atk, def, spatk, spdef, speed, image_resource_name)'
'values (?,?,?,?,?,?,?,?,?);', values)
cursor.execute('select id from pokemon where name=?', (name,))
pokemon_id = cursor.fetchone()[0]
for a in p['abilities']:
ability = (a['name'].replace('-',' ').title(),)
cursor.execute('select id from abilities where name=?', ability)
ability_id = cursor.fetchone()[0]
values = (pokemon_id, ability_id)
cursor.execute('insert or ignore into pokemon_abilities('
'pokemon_id, ability_id)'
'values (?,?);', values)
for t in p['types']:
poke_type = (t['name'].title(),)
cursor.execute('select id from types where name=?', poke_type)
type_id = cursor.fetchone()[0]
values = (pokemon_id, type_id)
cursor.execute('insert or ignore into pokemon_types('
'pokemon_id, type_id)'
'values (?,?);', values)
# read in pokemon that pokeapi is missing.
with open(oras_megas) as f:
data = csv.reader(f)
next(data) #skip header row
for m in data:
name = m[0]
national_id = m[1]
ability = m[2]
type1 = m[3]
type2 = m[4]
hp = m[5]
patk = m[6]
pdef = m[7]
spatk = m[8]
spdef = m[9]
speed = m[10]
res = 'ic_pokemon_' + str(national_id)
values = (name, national_id, hp, patk, pdef, spatk, spdef, spe, res)
sprite_file = os.path.join('../../app/src/main/res/drawable/', res + '.png')
if not os.path.isfile(sprite_file):
print('WARNING: sprite does not exist for', name, national_id)
cursor.execute('insert or ignore into pokemon('
'name, national_id, hp, atk, def, spatk, spdef, speed, image_resource_name)'
'values (?,?,?,?,?,?,?,?,?);', values)
cursor.execute('select id from pokemon where name=?', (name,))
pokemon_id = cursor.fetchone()[0]
cursor.execute('select id from abilities where name=?', (ability,))
ability_id = cursor.fetchone()[0]
values = (pokemon_id, ability_id)
cursor.execute('insert or ignore into pokemon_abilities('
'pokemon_id, ability_id)'
'values (?,?);', values)
cursor.execute('select id from types where name=?', (type1,))
type_id = cursor.fetchone()[0]
values = (pokemon_id, type_id)
cursor.execute('insert or ignore into pokemon_types('
'pokemon_id, type_id)'
'values (?,?);', values)
if type2 is not "":
cursor.execute('select id from types where name=?', (type2,))
type_id = cursor.fetchone()[0]
values = (pokemon_id, type_id)
cursor.execute('insert or ignore into pokemon_types('
'pokemon_id, type_id)'
'values (?,?);', values)
with open(pokemon_level_moves) as f:
delim = '========'
lines = f.readlines()
i = 0
#really fragile parsing
while i < len(lines):
line = lines[i].rstrip('\n')
if line == delim:
#if the current line is a delimiter, the next lines are:
# 1. pokemon name
# 2. delimiter
# 3-N. list of moves
# N+1. delimiter
poke_name = lines[i+1].rstrip('\n').split(' - ')[0]
cursor.execute('select id from pokemon where name=?', (poke_name,))
poke_id = cursor.fetchone()[0]
# Ignore 4 lines (delimiter, poke name, delimiter, count).
j = i + 4
while j < len(lines) and lines[j].rstrip('\n') != delim:
move_line = lines[j].rstrip('\n')
split = move_line.split(':')
level = split[0].split(' ')[1]
move_name = split[1].lstrip()
cursor.execute('select id from moves where name=?', (move_name,))
move_id = cursor.fetchone()[0]
values = (poke_id, move_id, level)
cursor.execute('insert into pokemon_level_moves('
'pokemon_id, move_id, level) '
'values(?,?,?)', values)
j = j + 1
i = j
else:
i = i + 1
# Warn for missing movesets
cursor.execute('select pokemon.name from pokemon where pokemon.id not in '
'(select pokemon_id from pokemon_level_moves);')
missing = cursor.fetchall()
if len(missing) > 0:
print('WARNING: missing Level up sets:',cursor.fetchall())
os.remove(path)
conn = sqlite3.connect(path)
c = conn.cursor()
create_gens(c)
create_abilities(c)
create_types(c)
create_moves(c)
create_items(c)
create_pokemon(c)
conn.commit()
| true
|
a6421d6ebfa6010fabc8ecf6bd673012b9dcac24
|
Python
|
Dhanooja/SentimentalAnalysis
|
/CheckingSentiment.py
|
UTF-8
| 191
| 2.875
| 3
|
[] |
no_license
|
from textblob import TextBlob
lst=[]
with open('Adverb.txt','r') as f:
for i in f.readlines():
word=i.strip('\n')
text=TextBlob(word)
print(word,text.sentiment.polarity)
| true
|
2af2f46a9f90e62b1be609136089f322b84c1f6e
|
Python
|
louis172000/PYTHON_2
|
/TP11/ex4.py
|
UTF-8
| 943
| 3.921875
| 4
|
[] |
no_license
|
class duree:
def __init__(self, heure, minute, seconde):
self.__heure = heure
self.__minute = minute
self.__seconde = seconde
def __str__(self):
return str(self.__heure)+"h"+str(self.__minute)+"m"+str(self.__seconde)+"s"
def __add__(self, autre):
if isinstance(autre, duree) == True:
min = 0
h = 0
seconde = self.__seconde+autre.__seconde
if seconde >= 60:
min += 1
seconde -= 60
min += self.__minute+autre.__minute
if min >= 60:
h +=1
min -= 60
h += self.__heure+autre.__heure
return duree(h, min, seconde)
else:
print("Les 2 objets doivent être de la même classe")
if __name__== '__main__':
c1 = duree(5, 54, 30)
c2 = duree(5, 6, 34)
c3 = c1 + c2
print(c3)
| true
|