blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
81bcfc83e897b71f7bd1a8268d44ebdfc6cf87a4 | Python | seanrobinson1114/IBM_Data_Science_Specialization | /Data_Visulization_with_Python/crime_rate.py | UTF-8 | 1,066 | 2.765625 | 3 | [] | no_license | # Imports
import pandas as pd
import folium
# load data into dataframe
crime_rates_df = pd.read_csv(
"/home/sean/software/coursera/IBM_Data_Science/Data_Visualization_with_Python/final_project/IBM_Data_Science_Specialization/Police_Department_Incidents_-_Previous_Year__2016_.csv"
)
print(crime_rates_df)
# initialize data
data = [['CENTRAL',17666],['NORTHERN',20100],['PARK',8699],['SOUTHERN',28445],['MISSION',19503],['TENDERLOIN',9942],['RICHMOND',8922],['TARAVAL',11325],['INGLESIDE',11594],['BAYVIEW',14303]]
# Create the pandas DataFrame
df = pd.DataFrame(data, columns = ['Neighborhood', 'Count'])
print(df)
world_map = folium.Map(location=[0,0], zoom_start=2, tiles='Mapbox Bright')
world_map.choropleth(
geo_data="/home/sean/software/coursera/IBM_Data_Science/Data_Visualization_with_Python/final_project/IBM_Data_Science_Specialization/san-francisco.geojson",
data=data,
columns=['Neighborhood', 'Count'],
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Crime Rates'
)
print(world_map)
| true |
9a2b809a86df401bc6d7850671fb90c39359f6f9 | Python | vinny0965/phyton | /1P/1_VA_VINICIUS_ONORATO - Copia/util.py | UTF-8 | 1,079 | 2.84375 | 3 | [] | no_license | from datetime import datetime
import bancodeDados
def imprimir_clientes():
print('Clientes')
count = 1
clientes = bancodeDados.get_clientes()
for c in clientes:
print('Cliente:', count)
print('CPF:', c[0])
print('Nome:', c[1])
print('Sexo:', c[2])
print('Telefone:', c[3])
print('-------*--------')
count +=1
def imprimir_contratos():
print('Contratos>>>')
count = 1
contratos = bancodeDados.get_contratos()
for c in contratos:
print('Contrato:' , count)
print('Codigo:', c[0])
print('Valor total Contrato', c[1])
print('Quantidade Parcelas', c[2])
print('---------*----------')
count +=1
def imprimir_boletos():
print('Boletos>>>')
count = 1
boletos = bancodeDados.get_boletos()
for c in boletos:
print('Boleto:' , count)
print('Codigo:', c[0])
print('Valor Parcela', c[1])
print('Situaรงรฃo', c[2])
print('Vencimento', c[3])
print('---------*----------')
count +=1
| true |
562049a0218a28c777fbe54b0bdb4e1e3b938a28 | Python | GTSupplyChainAR/RFID-Study-Task-Generation | /main.py | UTF-8 | 7,751 | 3.03125 | 3 | [] | no_license | import numpy
import json
import os
# This is the version number of the JSON file.
# Use this number to keep all copies of the tasks in sync across the study methods.
VERSION = "1.2"
# The Google Glass HUD client can only fit these number of orders on the screen at once.
MAX_ORDERS_PER_RACK = 6
# These are the names of all of the methods in this study.
# These names should be clean to use as file names in any OS
STUDY_METHODS = [
'pick-by-light_button',
'pick-by-hud_rfid',
'pick-by-paper_none',
'pick-by-paper_barcode',
]
# This is the number of tasks to include in the training files
NUM_TRAINING_TASKS = 5
# This is the number of tasks to include in testing files
NUM_TESTING_TASKS = 10
class Bin(object):
""" A simple structure to hold information about a source bin. """
def __init__(self, rack, row_number, column_number):
"""
:param rack: A or B
:param row_number: 1 to 4
:param column_number: 1 to 3
"""
self.rack = rack
self.row_number = row_number
self.column_number = column_number
@property
def tag(self):
return "%s%s%s" % (self.rack, self.row_number, self.column_number)
def __str__(self):
return self.tag
def __eq__(self, other):
return self.tag == other.tag
def generate_bins():
""" Generates all bins in our layout """
bins = []
racks = ('A', 'B')
for rack in racks:
for row_number in range(1, 5):
for column_number in range(1, 4):
bins.append(Bin(rack, row_number, column_number))
return bins
BINS = generate_bins()
def get_source_bins_for_order(racks_and_num_source_bins):
"""
Randomly selects n source bins that subjects will pick from in an order.
Parameters
----------
num_source_bins: int
The number of source bins in this order
Returns
-------
A list of dictionaries with keys 'binTag' and 'numItems'.
Entries are sorted alphabetically by each 'binTag'.
Examples
--------
>>> get_source_bins_for_order({'A': 2, 'B': 1})
[
{
'binTag': 'A32',
'numItems': 2
},
{
'binTag': 'B12',
'numItems': 1
}
]
"""
source_bins = []
for rack, num_source_bins in racks_and_num_source_bins.iteritems():
# Get bins that are in this rack
bins_in_rack = [bin for bin in BINS if bin.rack == rack]
# Then, randomly select num_source_bins from that rack
randomly_selected_bins = numpy.random.choice(
a=bins_in_rack,
size=num_source_bins,
replace=False, # select a unique set of bins
)
for bin in randomly_selected_bins:
source_bins.append({
'binTag': bin.tag,
'numItems': numpy.random.choice(
a=[1, 2, 3], # the number of items in this bin
size=None, # select one value
replace=False,
p=[0.87, 0.08, 0.05], # with this probability distribution
)
})
# Sort (in-place) the source_bins by their tags
source_bins.sort(key=lambda sb: sb['binTag'])
assert len(set([sb['binTag'] for sb in source_bins])) == len(source_bins), \
"There is a duplicated source bin tag which shouldn't happened!"
for rack in racks_and_num_source_bins:
bins_in_rack = [bin for bin in source_bins if bin['binTag'][0] == rack]
assert len(bins_in_rack) <= MAX_ORDERS_PER_RACK, bins_in_rack
return source_bins
def get_orders_for_task():
receiving_bin_tags = ['C11', 'C12', 'C13']
orders = []
for i, receiving_bin_tag in enumerate(receiving_bin_tags):
orders.append({
'orderId': i + 1,
'sourceBins': get_source_bins_for_order({
'A': numpy.random.choice(
a=[4, 5, 6],
p=[0.90, 0.05, 0.05]
),
'B': numpy.random.choice(
a=[4, 5, 6],
p=[0.90, 0.05, 0.05]
),
}),
'receivingBinTag': receiving_bin_tag,
})
return orders
def get_tasks_for_method(num_training_tasks, num_testing_tasks):
""" Returns a tuple of training tasks and testing tasks with the specified counts. """
training_tasks = []
# Increment this in each loop
task_id = 1
while task_id <= num_training_tasks:
task = {
'taskId': task_id,
'orders': get_orders_for_task()
}
training_tasks.append(task)
task_id += 1
testing_tasks = []
while task_id <= num_training_tasks + num_testing_tasks:
task = {
'taskId': task_id,
'orders': get_orders_for_task()
}
testing_tasks.append(task)
task_id += 1
# Ensure the lengths of the tasks lists are as expected. Very important!
assert len(training_tasks) == num_training_tasks
assert len(testing_tasks) == num_testing_tasks
return training_tasks, testing_tasks
def write_tasks_to_output_file(tasks, is_training_task_list, study_method):
""" Writes the given tasks to the given output file """
# This is just the name of the file
output_file_name = "%s-%s-%s.json" % ('tasks', study_method, 'training' if is_training_task_list else 'testing')
# Create the output directory if it doesn't already exist
output_file_dir = os.path.join('output', study_method)
if not os.path.isdir(output_file_dir):
os.mkdir(output_file_dir)
# Print out the task IDs
print_task_ordering(tasks, is_training_task_list=is_training_task_list)
# Write to the output file
output_file_name = os.path.join(output_file_dir, output_file_name)
with open(output_file_name, mode='w+') as f:
obj = {
'version': VERSION,
'tasks': tasks,
}
json.dump(obj, f, indent=4)
def print_task_ordering(tasks, is_training_task_list):
""" Simply prints out all task IDs"""
task_list_str = str([task['taskId'] for task in tasks])
if is_training_task_list:
print("Training: " + task_list_str)
else:
print("Testing: " + task_list_str)
if __name__ == '__main__':
# Change this seed to alter what pick paths are generated
numpy.random.seed(1)
# Generate all the tasks, separated into training and testing tasks
training_tasks, testing_tasks = get_tasks_for_method(
num_training_tasks=NUM_TRAINING_TASKS,
num_testing_tasks=NUM_TESTING_TASKS,
)
# Create an output directory, if it doesn't already exist
if not os.path.isdir('output'):
os.mkdir('output')
# Write the "master" list of tasks
write_tasks_to_output_file(training_tasks, is_training_task_list=True, study_method='MASTER')
write_tasks_to_output_file(testing_tasks, is_training_task_list=False, study_method='MASTER')
# For each method, shuffle the tasks and write them to an output file
for method_name in STUDY_METHODS:
# Create a folder for each method
method_dir_name = os.path.join('output', method_name)
if not os.path.isdir(method_dir_name):
os.mkdir(method_dir_name)
# Shuffle the tasks, in-place
numpy.random.shuffle(training_tasks)
numpy.random.shuffle(testing_tasks)
# Write the new tasks
write_tasks_to_output_file(training_tasks, is_training_task_list=True, study_method=method_name)
write_tasks_to_output_file(testing_tasks, is_training_task_list=False, study_method=method_name)
| true |
3d19c7ac08e0a84f6c4d610bf6b3085837803867 | Python | Aasthaengg/IBMdataset | /Python_codes/p02693/s801267442.py | UTF-8 | 193 | 3.203125 | 3 | [] | no_license | k= int(input())
a, b= input().split()
a= int(a)
b= int(b)
cont=0
while 1:
cont+= k
if a<= cont <= b:
print("OK")
break
if cont > b:
print("NG")
break | true |
4b8cb17e17df31731db0f2233d7d48e862c67b54 | Python | YooTimmy/promo_code_scraper | /promo_code_scraper.py | UTF-8 | 1,972 | 2.796875 | 3 | [] | no_license | from flask import *
import pandas as pd
import requests
import bs4
app = Flask(__name__)
website = "https://www.sgdtips.com/grabfood-promo-codes"
res = requests.get(website)
soup = bs4.BeautifulSoup(res.text, "lxml")
def promo_code_scraper(soup):
"""
The web scraper code is very specific for each individual website, so likely it won't work if you change the
website link to another one, or the website changes its format subsequently.
:param soup: the output from beautifulsoup module
:return: the voucher_df in pandas df format.
"""
voucher_desc_lst = []
voucher_code_lst = []
voucher_detail_lst = []
for item in soup.select(".item.procoupon_item--voucher"):
for voucher in item.select(".sgdtpro_voucher-content"):
for voucher_desc in voucher.select('.sgdt-brief-promo.promo-code-h3'):
voucher_desc_lst.append(voucher_desc.text)
if not voucher.select('.sgdt_code-value'):
voucher_code_lst.append('NA')
else:
for code in voucher.select('.sgdt_code-value'):
# Here the get method helps to obtain the value filed in the hidden tag (.text is not working..)
voucher_code_lst.append(code.get('value'))
for details in item.select(".sgdtpro_content-detail"):
if not details.text.replace('\n', ''):
voucher_detail_lst.append('NA')
else:
voucher_detail_lst.append(details.text.replace('\n', ''))
voucher_df = pd.DataFrame(list(zip(voucher_desc_lst, voucher_code_lst, voucher_detail_lst)),
columns=['Description', 'Code', 'Details'])
return voucher_df
voucher_tbl = promo_code_scraper(soup)
@app.route('/')
def show_tables():
return render_template('basic_table.html', data = voucher_tbl.to_dict(orient='records'), title = 'Grab Voucher Code')
if __name__ == '__main__':
app.run(debug=True) | true |
1f006b6cd0da4f49144b40994ca08b09e91db3ff | Python | CodecoolBP20172/pbwp-3rd-si-game-statistics-BenceLovas | /reports.py | UTF-8 | 2,778 | 3.703125 | 4 | [] | no_license | # Report functions
def separate_data(file_name):
'''
In the data file every line contains properties of a game.
Properties are separated by a tab character and
lines are separated by line break characters.
The order of properties:
0 = Title (str)
1 = Total copies sold (million) (float)
2 = Release date (year) (int)
3 = Genre (str)
4 = Publisher (str)
'''
with open(file_name, "r") as f:
read_game_list = f.readlines()
game_list = [game.strip().split("\t") for game in read_game_list]
for game in game_list:
try:
game[1] = float(game[1])
except ValueError:
raise ValueError("Total copies sold value: {}, is not a float.".format(game[1]))
try:
game[2] = int(game[2])
except ValueError:
raise ValueError("Release date value: {}, is not an intiger.".format(game[2]))
return game_list
def count_games(file_name):
with open(file_name, "r") as f:
return len(f.readlines())
def decide(file_name, year):
game_list = separate_data(file_name)
return bool([game[2] for game in game_list if game[2] == year])
def get_latest(file_name):
game_list = separate_data(file_name)
return sorted(game_list, key=lambda x: x[2], reverse=True)[0][0]
def count_by_genre(file_name, genre):
game_list = separate_data(file_name)
return sum([game.count(genre) for game in game_list])
def get_line_number_by_title(file_name, title):
game_list = separate_data(file_name)
line_number = [index for index in range(len(game_list)) if title in game_list[index]]
if line_number:
return line_number[0] + 1
raise ValueError("Title not found.")
def alphabetical_order(list_of_strings):
'''Bubble sorting with strings'''
length = len(list_of_strings) - 1
sorted = False
while not sorted:
sorted = True
for i in range(length):
if list_of_strings[i].lower() > list_of_strings[i + 1].lower():
sorted = False
list_of_strings[i], list_of_strings[i + 1] = list_of_strings[i + 1], list_of_strings[i]
return list_of_strings
def sort_abc(file_name):
game_list = separate_data(file_name)
return alphabetical_order([game[0] for game in game_list])
def get_genres(file_name):
game_list = separate_data(file_name)
return alphabetical_order(list(set([game[3] for game in game_list])))
def when_was_top_sold_fps(file_name):
game_list = separate_data(file_name)
data = {game[2]: game[1] for game in game_list if game[3] == "First-person shooter"}
if data:
return max(data.keys(), key=lambda x: data[x])
raise ValueError("There's no FPS game.")
| true |
ec091f556974437742226f8e97ef307f3a11a778 | Python | dheerajgm/daily-coding-problem | /solutions/problem_350.py | UTF-8 | 811 | 3.375 | 3 | [
"MIT"
] | permissive | import sys
def get_sum_sq(target, squares):
if target == 0:
return 0
elif not squares:
return sys.maxsize
original_tgt = target
biggest_sq = squares.pop()
tally = 0
while target >= biggest_sq:
tally += 1
target -= biggest_sq
if tally:
return min(
tally + get_sum_sq(target, squares.copy()),
get_sum_sq(original_tgt, squares.copy())
)
else:
return get_sum_sq(original_tgt, squares.copy())
def get_min_squares(target):
num, sq = 1, 1
squares = list()
while sq <= target:
squares.append(sq)
num += 1
sq = num * num
return get_sum_sq(target, squares)
# Tests
assert get_min_squares(4) == 1
assert get_min_squares(17) == 2
assert get_min_squares(18) == 2
| true |
bf646c10c1e2e21972f8bba0bcc63b4fa1c55c46 | Python | jose-gilberto/mitx-python | /lect-01/variables.py | UTF-8 | 167 | 2.625 | 3 | [] | no_license | # Variables
pi = 3.14159
pi # 3.14159
pi_approx = 22/7
# reusing variables
radius = 2.2
area = pi*(radius**2)
# updating radius
radius = radius + 1
# radius += 1
| true |
697373c5d2865f324b1aa3312865a0290a6e30cc | Python | akihanari/AtCoder | /ABC166/C.py | UTF-8 | 280 | 2.8125 | 3 | [] | no_license | # coding: utf-8
N, M = map(int, input().split())
H = list(map(int, input().split()))
lst = [1 for i in range(N)]
for i in range(M):
A, B = map(int, input().split())
if H[A-1] <= H[B-1]:
lst[A-1] = 0
if H[B-1] <= H[A-1]:
lst[B-1] = 0
print(sum(lst))
| true |
ff23e601a54f2e5493f2aa3968ae4a2f2dec953a | Python | Harsh-2420/airbnb | /main.py | UTF-8 | 1,246 | 2.875 | 3 | [] | no_license | import pandas as pd
import numpy as np
tr_filepath = "/Users/harshjhunjhunwala/Desktop/github_datasets/airbnb_data/train_users_2.csv"
df_train = pd.read_csv(tr_filepath, header=0, index_col=None)
te_filepath = "/Users/harshjhunjhunwala/Desktop/github_datasets/airbnb_data/test_users.csv"
df_test = pd.read_csv(te_filepath, header=0, index_col=None)
df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True)
# Fix the datetime formats in the date column
df_all['date_account_created'] = pd.to_datetime(df_all['date_account_created'], format='%Y-%m-%d')
df_all['timestamp_first_active'] = pd.to_datetime(df_all['timestamp_first_active'], format='%Y%m%d%H%M%S')
df_all['date_account_created'].fillna(df_all.timestamp_first_active, inplace=True)
# Drop date_first_booking to avoid creating an incorrect model
df_all.drop('date_first_booking', axis=1, inplace=True)
# Fixing age column
def remove_outliers(df, column, min_val, max_val):
col_values = df[column.values]
df[column] = np.where(np.logical_or(col_values <= min_val, col_values >= max_val), np.NaN, col_values)
return df
df_all = remove_outliers(df_all, 'age', 15, 90)
df_all['age'].fillna(-1, inplace=True)
df_all['first_affiliate_tracked'].fillna(-1, inplace=True) | true |
3b450a31d1aa9dede545daf25d14ed02e03afc54 | Python | marcoguastalli/my_python | /app_prices/app_prices.py | UTF-8 | 6,223 | 2.640625 | 3 | [
"MIT"
] | permissive | import asyncio
import os
import sys
import time
from datetime import datetime
import aioschedule as schedule
from colorama import Fore, Style
from bnc_api_client.get_ticker import GetTicker as BncGetTicker
from cdc_api_client.get_ticker import GetTicker as CdcGetTicker
from db_client.create_connection import create_connection
from db_client.execute_query import execute_query
from db_client.select_query import select_query
from model.price import Price
from utils.variation_utils import calculate_variation_amount
from utils.variation_utils import print_variation_with_colorama
USE_CDC = True
USE_BNC = False
async def main():
start = time.time()
database = "/Users/marcoguastalli/opt/sqlite/prices.sqlite"
conn = create_connection(database)
try:
if conn is not None:
prices_dict = {}
# read SQLite table 'prices' and create a dictionary with Price object
price_table_rows = select_query(conn, "SELECT source, instrument, price_from, price_to, variation, created, updated"
" FROM prices"
" ORDER BY source, instrument, created ASC")
if price_table_rows is not None:
for row in price_table_rows:
price = Price(row[0], row[1], row[2], row[3], row[4], row[5], row[6])
prices_dict[price.get_key()] = price
if USE_BNC:
# call BNC API and update the dictionary with Price object
await create_prices_from_bnc_api(conn, prices_dict)
if USE_CDC:
# call CDC API and update the dictionary with Price object
await create_prices_from_cdc_api(conn, prices_dict)
# log time
print(Style.RESET_ALL + "At " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") + " the process end in: ", time.time() - start, "seconds")
else:
print(Fore.RED + "Error Connection to DDBB:" + database)
finally:
if conn is not None:
conn.close()
async def create_prices_from_bnc_api(conn, prices_dict):
source = "BNC"
# call BCN API
tickers_list = await get_prices_from_bnc_api()
# insert the instrument-name and the price of the latest trade
for ticker in tickers_list:
instrument = ticker['symbol']
price_key = source + "_" + instrument
price_object_from_dict = prices_dict.get(price_key)
if price_object_from_dict is None:
# first loop
price_from = float(ticker['lastPrice'])
price_to = float(ticker['lastPrice'])
variation = 0
created = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
query_insert = f"INSERT INTO prices (source, instrument, price_from, price_to, variation, created) " \
f"VALUES ('{source}', '{instrument}', {price_from}, {price_to}, {variation}, '{created}')"
execute_query(conn, query_insert)
else:
price_from = price_object_from_dict.get_price_from()
price_to = float(ticker['lastPrice'])
variation = calculate_variation_amount(price_from, price_to)
if variation is None:
variation = 0
updated = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
query_update = f"UPDATE prices SET price_from={price_from}, price_to={price_to}, variation={variation}, updated='{updated}'" \
f" WHERE source='{source}' AND instrument='{instrument}'"
execute_query(conn, query_update)
# log variation
print_variation_with_colorama(updated, instrument, variation)
# commit
conn.commit()
pass
async def create_prices_from_cdc_api(conn, prices_dict):
source = "CDC"
# call CDC API
tickers_list = await get_prices_from_cdc_api()
# insert the instrument-name and the price of the latest trade
for ticker in tickers_list:
instrument = ticker['i']
price_key = source + "_" + instrument
price_object_from_dict = prices_dict.get(price_key)
if price_object_from_dict is None:
# first loop
price_from = ticker['a']
price_to = ticker['a']
variation = 0
created = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
query_insert = f"INSERT INTO prices (source, instrument, price_from, price_to, variation, created) " \
f"VALUES ('{source}', '{instrument}', {price_from}, {price_to}, {variation}, '{created}')"
execute_query(conn, query_insert)
else:
price_from = price_object_from_dict.get_price_from()
price_to = ticker['a']
variation = calculate_variation_amount(price_from, price_to)
if variation is None:
variation = 0
updated = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
query_update = f"UPDATE prices SET price_from={price_from}, price_to={price_to}, variation={variation}, updated='{updated}'" \
f" WHERE source='{source}' AND instrument='{instrument}'"
execute_query(conn, query_update)
# log variation
print_variation_with_colorama(updated, instrument, variation)
# commit
conn.commit()
pass
async def get_prices_from_bnc_api():
ticker = BncGetTicker('https://api.binance.com/api/v3/ticker/24hr')
response = ticker.do_get()
tickers_list = response.json()
return tickers_list
async def get_prices_from_cdc_api():
ticker = CdcGetTicker('https://api.crypto.com/v2/public/get-ticker')
response = ticker.do_get()
tickers_list = response.json()['result']['data']
return tickers_list
if __name__ == '__main__':
try:
schedule.every(1).seconds.do(main)
loop = asyncio.get_event_loop()
while True:
loop.run_until_complete(schedule.run_pending())
time.sleep(1)
except KeyboardInterrupt:
print('Process Interrupted!')
try:
sys.exit(0)
except SystemExit:
os.error()
| true |
706ea0454e479715615d66febf2c0b53064ea913 | Python | Kenjart/Phonebook-Program | /phone_module.py | UTF-8 | 569 | 2.6875 | 3 | [] | no_license | """
Module containing phone numbers
"""
a = ("Joe's Mining Company main contact hotline (mincrft) : 1-800-4206-9133")
b = ("Sydney's Beauty Inc customer support hotline (not an MLM we swear) : 1-800-1234-5678")
c = ("Dave's Gas Company secondary contact hotline (ruining the enviroment one day at a time) : 1-800-3928-8738")
d = ("Steve Woodworking internet and cable hotline (my wife filed for divorce and now I'm sad :( ) : 1-800-8939-7193")
e = ("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ : 1-800-8219-2898") | true |
7e5c4df9e52aa06b68ec309343aac3a504375f4b | Python | aliash98/CS229-Stanford-Problems | /Problem-Set-1/Problem-1-b/main.py | UTF-8 | 2,851 | 3.140625 | 3 | [] | no_license | import utility
import numpy as np
def main():
epsilon = 0.00000001
directory = './data/ds1_train.csv'
x_1, x_2, y = utility.load_csv(directory)
x_inputs = np.array([x_1, x_2])
x_1 = np.array(x_1)
x_2 = np.array(x_2)
y = np.array(y)
# -------- Data set is ready in array --------
theta_0 = 0.01
theta_1 = 0.01
theta_2 = 0.01
while True:
gradient = gradient_calculator(x_1, x_2, y, theta_0, theta_1, theta_2)
hessian = hessian_calculator(x_1, x_2, theta_0, theta_1, theta_2)
hessian_inv = inverse_calculator(hessian)
result = np.matmul(hessian_inv, gradient)
if np.sum(result) < epsilon:
break
theta_2 += result[0]
theta_1 += result[1]
theta_0 += result[2]
print(theta_2, " ", theta_1, " ", theta_0, "\n")
utility.show_plot(x_1, x_2, y, theta_0, theta_1, theta_2, './plot.png')
# ------ Training is finished -----------
test_directory = './data/ds1_valid.csv'
x_1, x_2, y = utility.load_csv(test_directory)
x_1 = np.array(x_1)
x_2 = np.array(x_2)
correct_prediction = 0
valid_results = sigmoid_function(theta_0, theta_1, theta_2, x_1, x_2)
f = open("results.txt", "w")
for i in range(len(valid_results)):
f.write(str(valid_results[i]))
f.write(" ----- ")
if valid_results[i] > 0.5:
f.write("1\n")
if y[i] == 1:
correct_prediction += 1
else:
f.write("0\n")
if y[i] == 0:
correct_prediction += 1
f.close()
print(correct_prediction)
def sigmoid_function(theta_0, theta_1, theta_2, x_1, x_2):
return 1/(1 + np.exp(theta_0 + theta_1 * x_1 + theta_2 * x_2))
def gradient_calculator(x_1, x_2, y, theta_0, theta_1, theta_2):
sigmoid_result = np.array(sigmoid_function(theta_0, theta_1, theta_2, x_1, x_2))
g_1 = np.sum((y - sigmoid_result) * x_2)
g_2 = np.sum((y - sigmoid_result) * x_1)
g_3 = np.sum(y - sigmoid_result)
gradient = np.array([g_1, g_2, g_3])
return gradient
def hessian_calculator(x_1, x_2, theta_0, theta_1, theta_2):
sigmoid_result = np.array(sigmoid_function(theta_0, theta_1, theta_2, x_1, x_2))
h_11 = - np.sum(sigmoid_result * (1 - sigmoid_result) * x_2 * x_2)
h_12 = - np.sum(sigmoid_result * (1 - sigmoid_result) * x_2 * x_1)
h_22 = - np.sum(sigmoid_result * (1 - sigmoid_result) * x_1 * x_1)
h_13 = - np.sum(sigmoid_result * (1 - sigmoid_result) * x_2)
h_23 = - np.sum(sigmoid_result * (1 - sigmoid_result) * x_1)
h_33 = - np.sum(sigmoid_result * (1 - sigmoid_result))
hessian = np.array([[h_11, h_12, h_13], [h_12, h_22, h_23], [h_13, h_23, h_33]])
return hessian
def inverse_calculator(matrix):
return np.linalg.inv(matrix)
if __name__ == '__main__':
main()
| true |
c21c9afb55f3a90383ff0364c9277a96d61e95df | Python | tarikaltuncu/distanceclosure | /distanceclosure/dijkstra.py | UTF-8 | 17,299 | 2.765625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Dijkstra algorithm
===================
Implementation of a generalized version of the Dijkstra algorithm using Heap Queue and multiprocessing to compute transitive closure.
This algorithm uses a modification to the path length to compute both `metric` and `ultrametric` closure.
Warning:
There is no need for this class to be called directly, since it will be called from :meth:`~distanceclosure.closure`.
This algoritm currentrly only works on undirected networks.
Note:
A very good introductory tutorial of how the Djikstra algorithm works can be seen `here
<https://www.youtube.com/watch?v=U9Raj6rAqqs>`_.
"""
# Copyright (C) 2015 by
# Rion Brattig Correia <rionbr@gmail.com>
# Luis Rocha <rocha@indiana.edu>
# Thiago Simas <@.>
# All rights reserved.
# MIT license.
from distanceclosure.utils import dict2sparse
import numpy as np
import heapq
from joblib import Parallel, delayed
#import dill
from distanceclosure._dijkstra import _py_single_source_shortest_distances, _py_single_source_complete_paths
from distanceclosure.cython._dijkstra import _cy_single_source_shortest_distances, _cy_single_source_complete_paths
__name__ = 'distanceclosure'
__author__ = """\n""".join(['Rion Brattig Correia <rionbr@gmail.com>',
'Luis Rocha <rocha@indiana.com>',
'Thiago Simas <@.>'])
__all__ = ['Dijkstra']
__operators__ = {
'metric': (min,sum),
'ultrametric':(min,max)
}
class Dijkstra(object):
"""
This is the class that handles the computation of the Distance Closure using a generalization of the Djikstra Algorithm.
Under the hood it has two implementations, in Cython and Python, both using a priority heap queue.
"""
def __init__(self, N=list(), E=dict(), neighbours=dict(), node_names=list(), directed=False, verbose=0):
self.N = N # nodes
self.E = E # edges
self.node_names = node_names # node names
self.neighbours = neighbours # dict of neighbouring edges for every node
self.directed = directed # is graph directed?
#
self.verbose = verbose
#
self.shortest_distances = {k:None for k in N} # will be populated by `all_pairs_shortest_paths` or `single_source_shortest_paths`
self.local_paths = {k:None for k in N}
self.shortest_paths = {k:None for k in N}
def __str__(self):
return "<Dijkstra Format Network(n_nodes=%d n_edges=%d, directed=%s, verbose=%s)>" % ( len(self.N) , len(self.E) , self.directed , self.verbose )
def all_pairs_shortest_distances(self, kind='metric', n_jobs=1, engine='cython', verbose=0):
"""
Computes All Pairs Shortest Distances (APSD).
Args:
kind (string): The metric type. 'metric' or 'ultrametric' are currently accepted.
n_jobs (int, optional): The number of CPUs to use to do the computation. ``-1`` means 'all CPUs'.
Only available for 'python' engine.
engine (string): The implementation to use. Either ``cython`` or ``python``.
verbose (int, optional): The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout. The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
Returns:
shortest_distances (dict): A dict-of-dicts of distances between all pair of nodes.
Ex: ``{'a':{'c':0.1,'b':0.3}, ...}``.
local_paths (dict): A dict-of-dicts-of-list of the shortest local path between all pair of nodes.
Ex: ``{'a': {'b': [0.1, 'a'], 'c': [0.3, 'b']} , ...}``.
Note:
The same as calling :func:`single_source_shortest_distances` for every node in `Parallel`.
"""
if self.verbose:
print('Calculating APSD - All Pairs Shortest Distances')
try:
operators = __operators__[kind]
except Exception as e:
raise AttributeError("kind parameter must be either 'metric' or 'ultrametric'")
# Shortest Distances in Parallel
if engine == 'python':
poolresults = Parallel(n_jobs=n_jobs,verbose=verbose)(delayed(_py_single_source_shortest_distances)(node, self.N, self.E, self.neighbours, operators, verbose) for node in self.N)
elif engine == 'cython':
# cython uses its own sum and max functions. So let's just pass their names.
operators = (operators[0].__name__ , operators[1].__name__)
#
poolresults = range(len(self.N))
for node in self.N:
poolresults[node] = _cy_single_source_shortest_distances(node, self.N, self.E, self.neighbours, operators, verbose)
# PoolResults returns a tuple, separate the two variables
shortest_distances, local_paths = map(list, zip(*poolresults))
# Then turn them into dict-of-dicts
self.shortest_distances = dict(zip(self.N, shortest_distances))
self.local_paths = dict(zip(self.N, local_paths))
return self.shortest_distances, self.local_paths
def all_pairs_shortest_paths(self, n_jobs=1, engine='cython', verbose=0, *args, **kwargs):
"""
Computes All Pair Shortest Paths (APSP)
Args:
n_jobs (int, optional): The number of CPUs to use to do the computation. ``-1`` means 'all CPUs'.
engine (string): The implementation to use. Either ``cython`` or ``python``.
verbose (int, optional): The verbosity level: if non zero, progress messages are printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level. If it more than 10, all iterations are reported.
Returns:
shortest_paths (dict): A dict-of-dicts-of-list of the shortest path between all pair of nodes.
Ex: ``{'a':{'c':['a','b','c']}}``.
"""
if self.verbose:
print('Calculating APSP - All Pairs Shortest Paths')
for path in self.local_paths:
if path is None:
raise Exception("Shortest distances and local paths must be calculated first. Run `all_pairs_shortest_distances`.")
if engine == 'python':
poolresults = Parallel(n_jobs=n_jobs,verbose=verbose)(delayed(_py_single_source_complete_paths)(node, self.N, self.local_paths[node]) for node in self.N)
elif engine == 'cython':
#
poolresults = range(len(self.N))
for node in self.N:
poolresults[node] = _cy_single_source_complete_paths(node, self.N, self.local_paths[node])
# PoolResults returns a list, map into a dict of nodes
self.shortest_paths = dict( zip( self.N , poolresults ) )
return self.shortest_paths
def single_source_shortest_distances(self, source, kind='metric', engine='cython'):
"""
Computes Single Source Shortest Distances (SSSD)
Args:
source (int or string): the source node to compute shortest distances to every other node.
kind (string): The metric type. 'metric' or 'ultrametric' are currently accepted.
engine (string): The implementation to use. Either ``cython`` or ``python``.
Returns:
shortest_distances (dict): A dict of distances between the source all other nodes.
Ex: ``{'c':0.1,'b':0.3}``.
shortest_paths (dict): A dict-of-list of the shortest distance path between the source and all other nodes.
Ex: ``{'b': [0.1, 'a'], 'c': [0.3, 'b']}``.
"""
if not isinstance(source, int):
source = self.node_names.index(source)
try:
operators = __operators__[kind]
except Exception as e:
raise AttributeError("kind parameter must be either 'metric' or 'ultrametric'")
# Shortest Distances
if engine == 'python':
shortest_distances, local_paths = _py_single_source_shortest_distances(source, self.N, self.E, self.neighbours, operators)
elif engine == 'cython':
operators = (operators[0].__name__ , operators[1].__name__)
shortest_distances, local_paths = _cy_single_source_shortest_distances(source, self.N, self.E, self.neighbours, operators)
# Save to object
self.shortest_distances[source] = shortest_distances
self.local_paths[source] = local_paths
return shortest_distances, local_paths
def single_source_shortest_paths(self, source, engine='cython', *args, **kwargs):
"""
Computes Single Source Shortest Paths (SSSP)
Args:
source (int or string): the source node to compute the shortest paths to every other node.
engine (string): The implementation to use. Either ``cython`` or ``python``.
Returns:
shortest_paths (dict): A dict-of-list of the shortest distance path between the source and all other nodes. Ex: ``{'c':['a','b','c']}}``
"""
if not isinstance(source, int):
source = self.node_names.index(source)
if self.local_paths[source] is None:
# Calculates the local paths in case it hasn't been calculated.
raise Exception ("Shortest distances and local paths must be calculated first. Run `single_source_shortest_distances` or `all_pairs_shortest_distances`.")
# Shortest Paths
if engine == 'python':
shortest_paths = _py_single_source_complete_paths(source, self.N, self.local_paths[source])
elif engine == 'cython':
shortest_paths = _cy_single_source_complete_paths(source, self.N, self.local_paths[source])
# Save to object
self.shortest_paths[source] = shortest_paths
return shortest_paths
@classmethod
def from_edgelist(self, edgelist, directed=False, *args, **kwargs):
"""
Instanciantes the class from a edgelist dictionary.
Args:
edgelist (dict): Distance graph edgelist adjacency matrix.
Examples:
>>> edgelist_luis = {
('s','b'):.9,
('s','c'):.1,
('b','c'):.8,
('c','d'):.6,
}
>>> dij = Dijkstra.from_edgelist(edgelist_luis,verbose=True)
"""
N = list()
E = dict()
v1i = None; v2i = None; i = 0
node_names = list()
neighbours = dict()
# Create dictionaries to be used to compute single-source-shortest-paths
for (v1,v2),d in edgelist.items():
try:
d = float(d)
except:
raise TypeError('Edge weights must numeric (int or float).')
# Node 1
if v1 not in node_names:
v1i = i
node_names.append(v1)
neighbours[i] = list()
i += 1
else:
v1i = node_names.index(v1)
# Node 2
if v2 not in node_names:
v2i = i
node_names.append(v2)
neighbours[i] = list()
i += 1
else:
v2i = node_names.index(v2)
# Edges
if not directed:
# If indirected graph, include both directions
E[ (v2i,v1i) ] = d
neighbours[v2i].append(v1i)
E[ (v1i,v2i) ] = d
neighbours[v1i].append(v2i)
N = range(len(node_names))
return Dijkstra(N, E, neighbours, node_names, directed, *args, **kwargs)
@classmethod
def from_numpy_matrix(self, matrix, node_names=None, directed=False, *args, **kwargs):
"""
Instanciantes the class from a Numpy adjacency matrix.
Args:
matrix (matrix): Distance graph Numpy adjacency matrix.
Examples:
>>> P = np.array([
[1.,.9,.1,0.],
[.9,1.,.8,0.],
[.1,.8,1.,.6],
[0.,0.,.6,1.],
], dtype=float)
>>> D = prox2dist(P)
>>> dij = Dijkstra.from_numpy_matrix(D)
"""
N = list()
E = dict()
neighbours = dict()
# Assert Square Adjacency Matrix
if matrix.shape[0] != matrix.shape[1]:
raise ValueError('Adjacency Matrix not square')
#matrix = matrix.A
N = list( np.arange(matrix.shape[0]) )
for i, row in enumerate(matrix,start=0):
neighbours[i] = []
for j, value in enumerate(row,start=0):
# the diagonal is (must be) always zero (distance = 0)
if i==j:
continue
# infinite distance doesn't have to be calculated
elif value == np.inf:
continue
else:
E[ (i,j) ] = float(value)
neighbours[i].append(j)
return Dijkstra(N, E, neighbours, node_names, directed, *args, **kwargs)
@classmethod
def from_sparse_matrix(self, matrix, node_names=None, directed=False, *args, **kwargs):
"""
Instanciantes the algorithm from a Scipy sparse adjacency matrix.
Args:
matrix (sparse matrix) : Distance graph Scipy sparse adjacency matrix.
Examples:
>>> Dsp = csr_matrix(D)
>>> dij = Dijkstra.from_sparse_matrix(Dsp,verbose=True)
"""
N = list()
E = dict()
neighbours = dict()
# Assert Square Adjacency Matrix
if matrix.shape[0] != matrix.shape[1]:
raise ValueError('Adjacency Matrix not square')
N = list( np.arange(matrix.shape[0]) )
neighbours = {i:[] for i in np.arange(matrix.shape[0])}
#
rows,cols = matrix.nonzero()
for i,j in zip(rows,cols):
# the diagonal is (must be) always zero (distance = 0)
if i==j:
continue
# infinite distance doesn't have to be calculated
elif matrix[i,j] == np.inf:
continue
else:
E[ (i,j) ] = float(matrix[i,j])
neighbours[i].append(j)
return Dijkstra(N, E, neighbours, node_names, directed, *args, **kwargs)
def get_shortest_distances(self, source=None, translate=False, format='dict'):
"""
After the computation of APSD, returns the shortest distances.
Args:
source (int/name, optional): Return distances only from a specific source.
translate (bool, optional): Translate node indices into the specified node names.
This translation can ge quite expensive.
format (string, optional): The returning format. Default is ``dict``.
Returns:
M (dict/matrix) : Returns the format specified on ``format`` arg.
"""
if source is not None:
if not isinstance(source, int):
source = self.node_names.index(source)
sd = self.shortest_distances[source]
else:
sd = self.shortest_distances
# Translate indices into node names
if (self.node_names is not None) and (translate == True):
sd = self._translate_indices_to_node_names(sd, self.node_names)
# Format Conversion
if format == 'sparse':
return dict2sparse(sd)
else:
return sd
def get_shortest_paths(self, source=None, translate=False):
"""
After the computation of APSP, returns the shortest distances.
Args:
source (int/name, optional): Return paths only from a specific source.
translate (bool, optional): Translate node indices into the specified node names.
This translation can ge quite expensive.
Returns:
M (dict/matrix) : Returns the format specified on ``format`` arg.
"""
if source is not None:
if not isinstance(source,int):
source = self.node_names.index(source)
sp = self.shortest_paths[source]
else:
sp = self.shortest_paths
# Translate indices into node names
if (self.node_names is not None) and (translate == True):
sp = self._translate_indices_to_node_names(sp, self.node_names)
return sp
def _translate_indices_to_node_names(self, d, names):
"""
Translates a dict-of-dict, from keys of numeric indices to keys of name strings.
Args:
d (dict): a dict of dicts.
names (list): a list of strings with the names to be translated.
Returns:
d (dict): a translated dict of dicts
"""
def __translate(obj, names):
""" Recursive translate indices into node names """
if isinstance(obj, int):
return names[obj]
elif isinstance(obj, list):
return [__translate(x, names) for x in obj]
elif isinstance(obj, dict):
new_obj = {}
for k,v in obj.items():
new_obj[__translate(k, names)] = __translate(v, names)
return new_obj
else:
return obj
new_dict = __translate(d, names)
return new_dict
if __name__ == '__main__':
import networkx as nx
from distanceclosure.utils import prox2dist
from scipy.sparse import csr_matrix
# edge list
#https://www.youtube.com/watch?v=U9Raj6rAqqs
edgelist = {
('s','a'):8.,
('s','c'):6.,
('s','d'):5.,
('a','d'):2.,
('a','e'):1.,
('b','e'):6.,
('c','d'):3.,
('c','f'):9.,
('d','f'):4.,
('e','g'):4.,
('f','g'):0.,
}
"""
edgelist = {
('s','b'):.9,
('s','c'):.1,
('b','c'):.8,
('c','d'):.6,
}
"""
matrix = np.array([
[1.,.9,.1,0.],
[.9,1.,.8,0.],
[.1,.8,1.,.6],
[0.,0.,.6,1.],
], dtype=float)
matrix = prox2dist(matrix)
sparse = csr_matrix(matrix)
source = 's'
# NX
G = nx.from_edgelist(edgelist)
nx.set_edge_attributes(G, 'weight', edgelist)
#G = nx.from_numpy_matrix(matrix)
nx_lenghts = nx.single_source_dijkstra_path_length(G, source=source, weight='weight')
nx_paths = nx.single_source_dijkstra_path(G, source=source, weight='weight')
d = Dijkstra.from_edgelist(edgelist, directed=False, verbose=True)
#d = Dijkstra.from_numpy_matrix(matrix, directed=False, verbose=True)
#d = Dijkstra.from_sparse_matrix(sparse, directed=False, verbose=True)
print( '=== SSSP ===')
print( '> Source:',source)
print( '---')
dc_lenghts, dc_paths = d.single_source_shortest_distances(source=source, kind='metric', engine='python')
dc_paths = d.single_source_shortest_paths(source=source, engine='python')
print( '-- NX Results: --')
print( '> Lenghts:',nx_lenghts)
print( '> Paths:',nx_paths)
print( '--- DC Results ---')
print( '> Lenghts:',d.get_shortest_distances(source=source, translate=True))
print( '> Paths:',d.get_shortest_paths(source=source, translate=True))
assert (nx_lenghts == d.get_shortest_distances(source=source, translate=True))
#assert (nx_paths == dc_paths)
print( '=== APSP ===' )
nx_all_complete_paths = nx.all_pairs_dijkstra_path(G, 'weight')
dc_all_lenghts, dc_all_local_paths = d.all_pairs_shortest_distances(n_jobs=2, engine='python')
dc_all_complete_paths = d.all_pairs_shortest_paths(n_jobs=2, engine='python')
print( '-- NX Results: --')
print( '> Paths:',nx_all_complete_paths)
print( '-- DC Results: --')
print( '> Lenghts;',dc_all_lenghts)
print( '> Paths:',dc_all_complete_paths)
print( '===')
print( nx_all_complete_paths['s'])
#print( dc_all_complete_paths[0])
print( d.get_shortest_paths(translate=True)['s'])
assert (nx_all_complete_paths == d.get_shortest_paths(translate=True))
| true |
7e8d11a5d1107a7ac16885ec7979663be8b3df1c | Python | shenbakeshk/parking-lot | /parking_lot/parking_lot_builder.py | UTF-8 | 2,158 | 2.953125 | 3 | [
"MIT"
] | permissive | from abc import ABC, abstractmethod
from parking_lot.parking_lot import FourWheelerParkingLot, ParkingLot
from parking_lot.parking_spot import FourWheelerSpot
class ParkingLotBuilder(ABC):
@abstractmethod
def add_four_wheeler_parking_spots(self):
pass
@abstractmethod
def init_parking_lot_data_store(self):
pass
@abstractmethod
def get_parking_lot(self):
pass
class FourWheelerParkingLotBuilder(ParkingLotBuilder):
def __init__(self):
self._parking_lot = FourWheelerParkingLot()
def add_four_wheeler_parking_spots(
self, max_four_wheeler_spots: int
) -> None:
"""
Add four wheeler spots to parking_lot.
"""
four_wheeler_spots = [None] * max_four_wheeler_spots
for i, __ in enumerate(range(max_four_wheeler_spots), 0):
four_wheeler_spots[i] = FourWheelerSpot()
# initialize four wheeler parking-spots config
self._parking_lot._four_wheeler_spots = four_wheeler_spots
self._parking_lot._max_four_wheeler_spots = max_four_wheeler_spots
self._parking_lot._curr_four_wheelers_parked = 0
self._parking_lot._next_four_wheeler_spot = 0
def init_parking_lot_data_store(self) -> None:
"""
Initialize local data store.
"""
self._parking_lot.initialize_color_vehicles_map()
self._parking_lot.initialize_parked_vehicles()
def get_parking_lot(self):
return self._parking_lot
class ParkingLotDirector:
def __init__(self, parking_lot_builder: ParkingLotBuilder):
self.parking_lot_builder = parking_lot_builder
self.parking_lot = None
def build_parking_lot(self, max_four_wheeler_spots: int) -> ParkingLot:
"""
Build parking-lot.
"""
self.parking_lot_builder.add_four_wheeler_parking_spots(max_four_wheeler_spots)
self.parking_lot_builder.init_parking_lot_data_store()
self.parking_lot = self.parking_lot_builder.get_parking_lot()
def get_parking_lot(self) -> ParkingLot:
"""
Return built parking-lot.
"""
return self.parking_lot
| true |
3c239dc4dc321b7a4ba96b1b84a1bb890a6ea523 | Python | aleksanb/MastersThesis | /a.py | UTF-8 | 1,277 | 3.328125 | 3 | [] | no_license | optimumsP3 = [(5, 10), (10, 20), (15, 30), (15, 40), (30, 50), (30, 60), (40, 70), (50, 80), (50, 90), (55, 100)]
optimumsP5 = [(15, 30), (20, 40), (20, 50), (30, 60), (30, 70), (40, 80), (45, 90), (50, 100), (55, 110), (60, 120), (70, 130), (70, 140)]
optimumsD3 = [(2, 5), (5, 10), (5, 15), (10, 20), (10, 25), (15, 30)]
optimumsD5 = [(5, 10), (5, 15), (10, 20), (10, 25), (15, 30), (15, 35), (15, 40), (20, 45), (25, 50), (25, 55), (25, 60), (30, 65)]
def average(optimums):
percentages = [a/b for a,b in optimums]
return sum(percentages) / len(percentages)
#print("OptimumP3", average(optimumsP3))
#print("OptimumP5", average(optimumsP5))
#print("OptimumD3", average(optimumsD3))
#print("OptimumD5", average(optimumsD5))
#for (a,b) in optimumsP5:
# print "% {} of {}".format(a,b)
#
#for a in map(lambda (x,y): (x/5, y), optimumsP5):
# print a
for i in range(5, 145, 5):
p3 = filter(lambda (x,y): y==i, optimumsP3)
p5 = filter(lambda (x,y): y==i, optimumsP5)
d3 = filter(lambda (x,y): y==i, optimumsD3)
d5 = filter(lambda (x,y): y==i, optimumsD5)
nums = [str(i)]
for arr in [p3, p5, d3, d5]:
if arr:
nums.append(str(arr[0][0]))
else:
nums.append("-")
print " & ".join(nums) + "\\\\"
| true |
4ec504497373c60c2a7f1a82a175460c4c56926b | Python | FWSquatch/tk-mosquitto | /mq1.py | UTF-8 | 641 | 2.796875 | 3 | [] | no_license | #!/usr/bin/python3
# Simple MQTT client that subscribes to a topic and prints messages
import paho.mqtt.client as mqtt
myBroker = 'XXX.XXX.XXX.XXX' # Your MQTT Server address goes here
def on_message(myClient, obj, msg):
print('Message in topic:',msg.topic)
print('Message:',msg.payload.decode('UTF-8'))
myClient = mqtt.Client()
# Connect to a broker
print('Connecting to:', myBroker)
myClient.connect(myBroker)
# Subscribe to kitchen lights
myClient.subscribe('house/kitchen/lights')
#myClient.subscribe('+/kitchen/+') # Uncomment to subscribe to all kitchen topics
myClient.on_message = on_message
myClient.loop_forever()
| true |
82550cb7970b1107b09bbd650ff339f9d84c28af | Python | Aryia-Behroziuan/optmization | /lambda.py | UTF-8 | 134 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon May 6 06:02:48 2019
@author: Faradars-pc2
"""
x = lambda a : a + 10
print(x(50)) | true |
1234acf6de539d852af172af681c3d9b4f57ada7 | Python | stoltzmaniac/LegLobber | /bill_grabber.py | UTF-8 | 1,855 | 2.859375 | 3 | [
"MIT"
] | permissive | import json
from urllib.parse import urlencode
from urllib.request import urlopen
import pandas as pd
# using the legiscan api: https://legiscan.com/legiscan (free tier)
api_creds = json.load(open('api_credentials.json'))
legiscan_token = api_creds['LEGISCAN_TOKEN']
base_url = "https://api.legiscan.com/?"
def get_bills_data(list_of_tuples):
parameter_dict = {"key": legiscan_token}
# list_of_tuples represents key value pair in request
# example: https://api.com/?happy=new_year&merry=christmas
# would need: [('happy', 'new_year'), ('merry', 'christmas')]
for parameter in list_of_tuples:
parameter_dict[parameter[0]] = parameter[1]
parameters = urlencode(parameter_dict)
request = "{}{}".format(base_url, parameters)
response = urlopen(request)
raw_data = response.read()
data = json.loads(raw_data.decode('utf-8'))
return data
def get_session_list(state):
bills = get_bills_data([('op', 'getSessionList'), ('state', state)])
if bills['status'] == 'OK':
return pd.DataFrame(bills['sessions'])
else:
return "Error with API call"
def get_master_list(state, session_id=None):
# default session_id gives most recent year
parameter_list = [('op','getMasterList'), ('state', state)]
if session_id:
parameter_list.append(('id', session_id))
bills = get_bills_data(parameter_list)
if bills['status'] == 'OK':
bills = bills['masterlist']
del(bills['session'])
bills = [bill for bill in bills.values()]
return pd.DataFrame(bills)
else:
return "Error with API call"
def get_bill(bill_id):
parameter_list = [('op', 'getBill'), ('id', bill_id)]
bills = get_bills_data(parameter_list)
if bills['status'] == 'OK':
return bills['bill']
else:
return "Error with API call"
| true |
9d9545044966e3692b6d73cedf68611a63ba1967 | Python | mcxu/code-sandbox | /PythonSandbox/src/leetcode/lc443_string_compression.py | UTF-8 | 2,771 | 3.421875 | 3 | [] | no_license | # https://leetcode.com/problems/string-compression/
class Solution:
def compress(self, chars) -> int:
charsOrigLen = len(chars)
count = 1
lo = 0
hi = 0
i = 0
while i < len(chars)-1:
#print("--- i=",i)
char = chars[i]
nextchar = chars[i+1]
if char == nextchar:
count += 1
hi += 1
#print("hi updated: ", hi)
else:
# create comperessed substring
compressedSubstr = chars[hi] + str(count)
#print("compressedSubstr: ", compressedSubstr)
#print("lo: {}, hi: {}".format(lo, hi))
j = lo
while j <= hi:
chars[j] = compressedSubstr[j-lo]
#print("A j: ",j)
j += 1
i = j-1
#print("* i=", i)
# reset count
count = 1
lo = j
hi = lo
#print("lo updated: {} and hi updated: {}".format(lo, hi))
#print("count: ", count)
i += 1
# print("chars: ", chars)
# print("count after while: ", count)
# print("i after while= ",i)
# print("lo: {}, hi: {}".format(lo, hi))
compressedSubstr = chars[hi] + str(count)
chars = chars[:lo]
for j in range(len(compressedSubstr)):
chars.append(compressedSubstr[j])
print("chars final: ", chars)
if len(chars) < charsOrigLen:
return len(chars)
return charsOrigLen
def test1(self):
arr = ["a","a","b","b","c","c","c"]
#arr= ["a","a","b","c","c","c"]
#arr = ["a","b","c"]
arr = ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
res=self.compress(arr)
print("test1 res: ", res)
class Solution2:
def compress(self, chars: [str]) -> int:
count = 0
out = []
currChar = chars[0]
for i,ch in enumerate(chars):
#print("i={}, ch={}, currChar:{}".format(i,ch,currChar))
if ch == currChar:
count += 1
else:
out += [currChar]
if count > 1:
out += [c for c in str(count)]
#print("out updated: ", out)
currChar = ch
count = 1
#print("currChar updated: ", currChar)
# for last char segment
out += [currChar]
if count > 1:
out += [c for c in str(count)]
chars.clear()
chars += [c for c in out]
return len(chars)
s = Solution()
s.test1()
| true |
5bad1def000baf31bcec4099c2bd915e1758a67b | Python | misha-sauchuk/python130218-HW1 | /homework_11/ex11_02.py | UTF-8 | 7,789 | 3.9375 | 4 | [] | no_license | """ะ ะตะฐะปะธะทะพะฒะฐัั ะฟัะพะณัะฐะผะผั ะฟะพะดััะตัะฐ ะฟะปะพัะฐะดะธ, ะฟะตัะธะผะตััะฐ, ะพะฑัะตะผะฐ ะณะตะพะผะตััะธัะตัะบะธั
ัะธะณัั
(ััะตัะณะพะปัะฝะธะบ, ะฟััะผะพัะณะพะปัะฝะธะบ, ะบะฒะฐะดัะฐั, ััะฐะฟะตัะธั, ะพะบััะถะฝะพััั).
ะัะปะธ ะพะดะฝะฐ ะธะท ัะธะณัั ะฝะต ะฟะพะดะดะตัะถะธะฒะฐะตั ะฒััะธัะปะตะฝะธะต ะพะดะฝะพะณะพ ะธะท ัะฒะพะนััะฒ, ะฒ ะฟัะพะณัะฐะผะผะต ะดะพะปะถะฝะพ ะฑััั ะฒัะทะฒะฐะฝะพ ะธัะบะปััะตะฝะธะต
ั ัะตะปะพะฒะตะบะพ-ัะธัะฐะฑะตะปัะฝัะผ ัะพะพะฑัะตะฝะธะตะผ ะธ ะบะพััะตะบัะฝะพ ะพะฑัะฐะฑะพัะฐะฝะพ."""
# import modules for use math methods and decorators
import math
import functools
# create decorator to catch exception in some functions
def try_deco(func):
@functools.wraps(func)
def inner(*args, **kwargs):
try:
func(*args, **kwargs)
except (NotImplementedError, TypeError) as er:
print('This figure does\'t have such property\n ERROR: ', er)
return func
return inner
# create abstract class
class Figure:
def square(self): # define the function that should be complemented in subclass
raise NotImplementedError # raise the Exception if the function was not complemented in subclass
def perimeter(self):
raise NotImplementedError
def volume(self):
raise NotImplementedError
@try_deco
def print_square(self):
print('The square of {fig} is {result:.2f}'.format(fig=self.name, result=self.square()))
@try_deco
def print_perimeter(self):
print('The perimeter of {fig} is {result:.2f}'.format(fig=self.name, result=self.perimeter()))
@try_deco
def print_volume(self):
print('The volume of {fig} is {result:.2f}'.format(fig=self.name, result=self.volume()))
# create subclass of class Figure
class Circle(Figure):
def __init__(self, name):
self.name = name
self.diameter = float(input('Please, enter the diameter: '))
def square(self):
square = math.pi * self.diameter ** 2
print()
return square
def perimeter(self):
perimeter = math.pi * self.diameter
return perimeter
# create subclass of class Figure
class SquareFig(Figure):
def __init__(self, name):
self.name = name
self.side_a = float(input('Please, enter the side "a": '))
def square(self):
square = self.side_a ** 2
return square
def perimeter(self):
perimeter = self.side_a * 4
return perimeter
# create subclass of class SquareFig
class Rectangle(SquareFig):
def __init__(self, name):
super().__init__(name)
self.name = name
self.side_b = float(input('Please, enter the side "b": '))
def square(self):
square = self.side_a * self.side_b
return square
def perimeter(self):
perimeter = (self.side_a + self.side_b) * 2
return perimeter
# create subclass of class Rectangle
class Trapeze(Rectangle):
def __init__(self, name):
super().__init__(name)
self.name = name
self.side_c = float(input('Please, enter the side "c": '))
self.side_d = float(input('Please, enter the side "d": '))
self.height = float(input('Please, enter the height "h": '))
self.check_trapeze()
def check_trapeze(self): # check if there is a trapeze with input values
trap_sides = [self.side_a, self.side_b, self.side_c, self.side_d]
max_side = max(trap_sides)
trap_sides.pop(trap_sides.index(max_side))
if max_side >= sum(trap_sides):
print('There is no figure with input sizes. Please, try again.')
self.__init__(self.name)
def square(self):
square = (self.side_a + self.side_b) / 2 * self.height
return square
def perimeter(self):
perimeter = self.side_a + self.side_b + self.side_c + self.side_d
return perimeter
# create subclass of class Figure
class Triangle(Figure):
def __init__(self, name):
self.name = name
self.side_a = float(input('Please, enter the side "a": '))
self.side_b = float(input('Please, enter the side "b": '))
self.side_c = float(input('Please, enter the side "c": '))
self.check_triangle()
def check_triangle(self): # check if there is a triangle with input values
triangle_sides = [self.side_a, self.side_b, self.side_c]
max_side = max(triangle_sides)
triangle_sides.pop(triangle_sides.index(max_side))
if max_side >= sum(triangle_sides):
print('There is no figure with input sizes. Please, try again.')
self.__init__(self.name)
def square(self):
p = self.perimeter() / 2
square = math.sqrt(p * (p - self.side_a) * (p - self.side_b) * (p - self.side_c))
return square
def perimeter(self):
perimeter = self.side_a + self.side_b + self.side_c
return perimeter
# make request to user to input data or to exit
while True:
figure = input('Please, choose figure:\n' # request to input type of figure or to exit
'1. Circle\n'
'2. Square\n'
'3. Rectangle\n'
'4. Trapeze\n'
'5. Triangle\n'
'6. Exit\n')
if figure == '1' or figure.upper() == 'CIRCLE':
figure = Circle('Circle')
elif figure == '2' or figure.upper() == 'SQUARE':
figure = SquareFig('Square')
elif figure == '3' or figure.upper() == 'RECTANGLE':
figure = Rectangle('Rectangle')
elif figure == '4' or figure.upper() == 'TRAPEZE':
figure = Trapeze('Trapeze')
elif figure == '5' or figure.upper() == 'TRIANGLE':
figure = Triangle('Triangle')
elif figure == '6' or figure.upper() == 'EXIT':
break
else:
print('Invalid input')
continue
to_do = input('Please, choose what would you like to calculate:\n' # request about operation to do with figure
'1. Square\n'
'2. Perimeter\n'
'3. Volume\n'
'4. Square & Perimeter\n')
if to_do == '1' or to_do.upper() == 'SQUARE':
figure.print_square()
elif to_do == '2' or to_do.upper() == 'PERIMETER':
figure.print_perimeter()
elif to_do == '3' or to_do.upper() == 'VOLUME':
figure.print_volume()
elif to_do == '4' or to_do.upper() == 'SQUARE & PERIMETER':
figure.print_square()
figure.print_perimeter()
if __name__ == '__main__':
def test_circle(name):
fig = Circle(name)
fig.diameter = 10
if '{:.2f}'.format(fig.square()) == '314.16':
print('--test_circle Ok--')
def test_square(name):
fig = SquareFig(name)
fig.side_a = 10
if '{:.2f}'.format(fig.square()) == '100.00':
print('--test_square Ok--')
def test_rectangle(name):
fig = Rectangle(name)
fig.side_a = 10
fig.side_b = 13
if '{:.2f}'.format(fig.perimeter()) == '46.00':
print('--test_rectangle Ok--')
def test_trapeze(name):
fig = Trapeze(name)
fig.side_a = 4
fig.side_b = 6
fig.side_c = 4
fig.side_d = 3
fig.height = 2.828
if '{:.2f}'.format(fig.square()) == '14.14':
print('--test_rectangle Ok--')
def test_triangle(name):
fig = Triangle(name)
fig.side_a = 3
fig.side_b = 4
fig.side_c = 5
if '{:.2f}'.format(fig.square()) == '6.00':
print('--test_triangle Ok--')
test_circle('circle_test')
test_triangle('triangle_test')
test_square('test_square')
test_rectangle('test_rectangle')
test_trapeze('test_trapeze')
| true |
80e85f71529219b1dcf3cc01f69f8cb64898e96a | Python | zointblackbriar/QuestionAnswering | /AlgorithmQuestionAnswering/ParseTree/matcher.py | UTF-8 | 7,787 | 3.109375 | 3 | [
"MIT"
] | permissive | #Reference: https://github.com/ayoungprogrammer/Lango
from nltk import Tree
import logging
logger = logging.getLogger(__name__)
class MatcherContext():
def __init__(self):
pass
def match_rules(self, tree, rules, functionCalls=None, allMatchedContext=False):
"""Matches a Tree Structure with the given query rules."""
"""tree (Tree): Parsed tree structure """
"""rules (dict): A dictionary of query rules"""
"""functionCalls (function): Function to call with context {set to None if you want to return context"""
"""allMatchedContext(Bool): IF True, returns all matched contexts, else returns first matched context"""
if allMatchedContext:
logger.info("allMatchedContext")
context = self.match_rules_context_multi(tree, rules)
else:
logger.info("allMatchedContext is None")
context = self.match_rules_context(tree, rules)
if not context:
return None
if functionCalls:
args = functionCalls.__code__.co_varnames
if allMatchedContext:
res = []
for c in context:
action_context = {}
for arg in args:
if arg in c:
action_context[arg] = c[arg]
res.append(functionCalls(**action_context))
return res
else:
action_context = {}
for arg in args:
if arg in context:
action_context[arg] = context[arg]
return functionCalls(**action_context)
else:
return context
#send some rules and get back the results if there is a match
def match_rules_context(self, tree, rules, parent_context={}):
for template, match_rules in rules.items():
context = parent_context.copy()
if self.match_template(tree, template, context):
for key, child_rules in match_rules.items():
child_context = self.match_rules_context(context[key], child_rules, context)
if child_context:
for k, v in child_context.items():
context[k] = v
else:
return None
return context
return None
def match_rules_context_multi(self, tree, rules, parent_context={}):
"""Recursively matches a Tree structure with rules and returns context"""
"""parent_context (dict) : Context of parent call"""
all_contexts = []
for template, match_rules in rules.items():
context = parent_context.copy()
if self.match_template(tree, template, context):
parsed_contexts = []
if not match_rules:
all_contexts += [context]
else:
for key, child_rules in match_rules.items():
parsed_contexts.append(self.match_rules_context_multi(context[key], child_rules, context))
all_contexts += self.cross_context(parsed_contexts)
return all_contexts
def cross_context(self, context):
"""Cross product of all contexts"""
if not context:
return[]
product = [{}]
for contexts in context:
temp_product = []
for item in contexts:
for iteminnerloop in product:
copyItem = item.copy()
copyItem.update(iteminnerloop)
temp_product.append(copyItem)
product = temp_product
return product
def match_template(self, tree, template, args=None):
"""Match string matches Tree Structure or not"""
"""tree (Tree): Parsed Tree structure of a sentence"""
"""template (str): String template to match. Example: (S (NP) ) """
tokens = self.get_tokens(template.split())
cur_args = {}
if self.match_tokens(tree, tokens, cur_args):
if args is not None:
for k ,v in cur_args.items():
args[k] = v
logger.debug('MATCHED : {0}'.format(template))
return True
else:
return False
def match_tokens(self, tree, tokens, args):
"""tree : Parsed tree structure"""
"""tokens: Stack of tokens"""
arg_type_to_func = {
'r': self.get_raw_lower,
'R': self.get_raw,
'o': self.get_object_lower,
'O': self.get_object,
}
if len(tokens) == 0:
return True
if not isinstance(tree, Tree):
return False
root_token = tokens[0]
# Equality
if root_token.find('=') >= 0:
eq_tokens = root_token.split('=')[1].lower().split('|')
root_token = root_token.split('=')[0]
word = self.get_raw_lower(tree)
if word not in eq_tokens:
return False
# Get arg
if root_token.find(':') >= 0:
arg_tokens = root_token.split(':')[1].split('-')
if len(arg_tokens) == 1:
arg_name = arg_tokens[0]
args[arg_name] = tree
else:
arg_name = arg_tokens[0]
arg_type = arg_tokens[1]
args[arg_name] = arg_type_to_func[arg_type](tree)
root_token = root_token.split(':')[0]
# Does not match wild card and label does not match
if root_token != '.' and tree.label() not in root_token.split('/'):
return False
# Check end symbol
if tokens[-1] == '$':
if len(tree) != len(tokens[:-1]) - 1:
return False
else:
tokens = tokens[:-1]
# Check # of tokens
if len(tree) < len(tokens) - 1:
return False
for i in range(len(tokens) - 1):
if not self.match_tokens(tree[i], tokens[i + 1], args):
return False
return True
def get_object_lower(self, tree):
return self.get_object(tree).lower()
def get_object(self, tree):
"""Get the object in the tree object"""
"""tree : parsed tree structure"""
if isinstance(tree, Tree):
#POS Tagger condition
if tree.label() == 'DT' or tree.label() =='POS':
return ''
words = []
for child in tree:
words.append(self.get_object(child))
return ''.join([_f for _f in words if _f])
else:
return tree
def get_raw(self, tree):
if isinstance(tree, Tree):
words = []
for child in tree:
words.append(self.get_raw(child))
return ' '.join(words)
else:
return tree
def get_raw_lower(self, tree):
return self.get_raw(tree).lower()
def get_tokens(self, tokens):
tokens = tokens[1:-1]
ret = []
start = 0
stack = 0
for i in range(len(tokens)):
if tokens[i] == '(':
if stack == 0:
start = i
stack += 1
elif tokens[i] == ')':
stack -= 1
if stack < 0:
raise Exception('Bracket mismatch: ' + str(tokens))
if stack == 0:
ret.append(self.get_tokens(tokens[start:i + 1]))
else:
if stack == 0:
ret.append(tokens[i])
if stack != 0:
raise Exception('Bracket mismatch: ' + str(tokens))
return ret
| true |
3a330602dba98d746300ab16b66b5db00842483e | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_74/1265.py | UTF-8 | 2,815 | 2.671875 | 3 | [] | no_license | import math
import sys
class node():
def __init__(self):
b_action = None
o_action = None
def main():
case = 1
passed_first = False
for l in sys.stdin:
if passed_first == False:
passed_first = True
continue
l = l.strip()
l2 = l.split(' ')
line = ""
for i in range(1, len(l2), 2):
line += ' '.join(l2[i: i+2]) + ", "
line = line[0:-2]
string = line.strip()
buttons = string.split(',')
buttons = [(b.strip().split(' ')[0], int(b.strip().split(' ')[1])) for b in buttons]
all_actions = expandActions(buttons)
o_arr = [b for b in all_actions if b[0] == 'O']
b_arr = [b for b in all_actions if b[0] == 'B']
o, b = processActions(buttons, o_arr, b_arr)
print "Case #" + str(case) + ":", len(o)
case += 1
def processActions(buttons, o_acts, b_acts):
button_index = 0
p = "PUSH"
m = "MOVE"
o_final = []
b_final = []
o_len = len(o_acts)
b_len = len(b_acts)
o = 0
b = 0
while o < o_len or b < b_len:
pushed = 0
if o >= o_len:
if len(o_acts) == 0:
o_final.append(("STAY", 1))
else:
o_final.append(("STAY", o_acts[-1][-1]))
else:
#Moving
if o_acts[o][1] == "MOVE":
o_final.append(("MOVE", o_acts[o][2]))
o += 1
#Pushing buttons or staying
elif buttons[button_index][0] == "B" and o_acts[o][1] == "PUSH":
o_final.append(("STAY", o_acts[o][2]))
elif buttons[button_index][0] == "O" and o_acts[o][1] == "PUSH":
o_final.append(("PUSH", o_acts[o][2]))
o += 1
pushed = 1
if b >= b_len:
if len(b_acts) == 0:
b_final.append(("STAY", 1))
else:
b_final.append(("STAY", b_acts[-1][-1]))
else:
#Moving
if b_acts[b][1] == "MOVE":
b_final.append(("MOVE", b_acts[b][2]))
b += 1
#Pushing buttons or staying
elif buttons[button_index][0] == "O" and b_acts[b][1] == "PUSH":
b_final.append(("STAY", b_acts[b][2]))
elif buttons[button_index][0] == "B" and b_acts[b][1] == "PUSH":
b_final.append(("PUSH", b_acts[b][2]))
b += 1
pushed = 1
button_index += pushed
return(o_final, b_final)
def expandActions(buttons):
last_o = 1
last_b = 1
new_buttons = []
for b in buttons:
if b[0] == 'O':
new_button = b[1]
delta = int(math.fabs((new_button - last_o)))
for i in range(delta):
d = 0
if new_button < last_o:
d = -1
else:
d = 1
new_buttons.append(("O", "MOVE", last_o + d*i + 1))
new_buttons.append(("O", "PUSH", new_button))
last_o = new_button
else:
new_button = b[1]
delta = int(math.fabs((new_button - last_b)))
for i in range(delta):
d = 0
if new_button < last_b:
d = -1
else:
d = 1
new_buttons.append(("B", "MOVE", last_b + d*i + 1))
new_buttons.append(("B", "PUSH", new_button))
last_b = new_button
return new_buttons
if __name__ == '__main__':
main()
| true |
99ead79577d0951db4787aaf5653dae911dc967b | Python | LiorAvrahami/surface-tension-measurement-project | /cropped photos/draw_all.py | UTF-8 | 735 | 2.609375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import os
import addcopyfighandler
for fn in sorted(os.listdir()):
if os.path.splitext(fn)[1] != ".png":
continue
data = plt.imread(fn)
point_f_name = os.path.join(os.path.dirname(fn), os.path.splitext(os.path.split(fn)[1])[0] + "edge_point_data.txt")
with open(point_f_name) as point_f:
lines = point_f.readlines()
(x0,y0),(x1,y1) = [[float(v) for v in line.split(",")] for line in lines]
plt.figure(figsize=(12,7))
plt.imshow(data)
plt.plot([x0,x1],[y0,y1],"r--o")
def on_escape(event):
if event.key == "escape":
plt.close()
plt.gcf().canvas.mpl_connect("key_press_event", on_escape)
plt.title(fn)
plt.show() | true |
6654f7447789b6a364b81c137b73de87585acaaa | Python | liyingxuan89/whetherCollect | /whetherCollect.py | UTF-8 | 1,313 | 2.703125 | 3 | [] | no_license | #!/bin/env python
import urllib
import urllib2
import sys
import os
from lxml import etree
import pandas as pd
import numpy as np
def parser(html):
content = etree.HTML(html)
pattern = '//div[@class="observation-table"]'
head = pattern + "//thead"
body = pattern + "//tbody"
data = content.xpath(head)
if data:
return data[0].xpath('string()')
class whetherCollect(object):
def __init__(self, city, timeList, parser):
self.city = city
self.time = timeList
self.parser = parser
self.url = "https://www.wunderground.com/history/monthly/cn/" + self.city + "/date/"
self.headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
}
def dataSaver(self, savePath):
data = 0
with open(savePath, 'w') as f:
f.write(data)
def download(self):
for year in self.time:
realUrls = [self.url + str(year) + "-" + str(x) for x in range(1,2)]
for url in realUrls:
request = urllib2.Request(url, headers=self.headers)
response = urllib2.urlopen(request)
html = response.read()
data = self.parser(html)
print data
def main():
whether = whetherCollect('chengdu', [2018], parser)
whether.download()
if __name__ == "__main__":
main()
| true |
424a86a16cf59c3b572bd4ea33f8fd70cd9e19ac | Python | Sebuliba-Adrian/SelfLearningClinic-Day3 | /WordCountLab/app/wordcount.py | UTF-8 | 903 | 4.40625 | 4 | [] | no_license | def words(text):
"""Check for a string in the text sentence"""
if isinstance(text, str):
#Split the sentence by the spaces assign each string to variable words
words = text.split()
#Create a dictionary word_dicts that will store the word count
wordsDict = dict()
#Loops through each word in words list
for word in words:
#Checks to see if the string is a number and converts to an integer
if word.isdigit():
word = int(word)
#Check to see if word is a key in words_dict and increase it's value by 1
if word in wordsDict.keys():
wordsDict[word] += 1
#Add word as a key in wordsDict and it holds a value 1
else:
wordsDict[word] = 1
#Returns a words dictionary
return wordsDict
else:
return TypeError | true |
fdb63b65ee401c1fad9a6f321f3b9ba4a8f254f4 | Python | louisza/CarND-Behavioral-Cloning-P3 | /model.py | UTF-8 | 13,897 | 2.734375 | 3 | [] | no_license | from keras.models import Sequential, Model
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, Conv2D
from keras.regularizers import l2
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, Callback
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import numpy as np
import cv2
import matplotlib.pyplot as plt
from os import getcwd
import csv
from keras.layers.merge import Concatenate
from keras.engine import Input
def displayCV2(img):
'''
Display a CV2 Image
'''
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def process_img_for_visualization(image, angle, pred_angle, frame):
'''
Used by visualize_dataset method to format image prior to displaying. Converts colorspace back to original BGR,
applies text to display steering angle and frame number (within batch to be visualized),
and applies lines representing steering angle and model-predicted steering angle (if available) to image.
'''
font = cv2.FONT_HERSHEY_SIMPLEX
img = cv2.cvtColor(image, cv2.COLOR_YUV2BGR)
img = cv2.resize(img, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)
h, w = img.shape[0:2]
# apply text for frame number and steering angle
cv2.putText(img, 'frame: ' + str(frame), org=(2, 18), fontFace=font, fontScale=.5, color=(200, 100, 100),
thickness=1)
cv2.putText(img, 'angle: ' + str(angle), org=(2, 33), fontFace=font, fontScale=.5, color=(200, 100, 100),
thickness=1)
# apply a line representing the steering angle
cv2.line(img, (int(w / 2), int(h)), (int(w / 2 + angle * w / 4), int(h / 2)), (0, 255, 0), thickness=4)
if pred_angle is not None:
cv2.line(img, (int(w / 2), int(h)), (int(w / 2 + pred_angle * w / 4), int(h / 2)), (0, 0, 255), thickness=4)
return img
def visualize_dataset(X, y, y_pred=None):
'''
format the data from the dataset (image, steering angle) and display
'''
for i in range(len(X)):
if y_pred is not None:
img = process_img_for_visualization(X[i], y[i], y_pred[i], i)
else:
img = process_img_for_visualization(X[i], y[i], None, i)
displayCV2(img)
def preprocess_image(img):
'''
Method for preprocessing images: this method is the same used in drive.py, except this version uses
BGR to YUV and drive.py uses RGB to YUV (due to using cv2 to read the image here, where drive.py images are
received in RGB)
'''
new_img = img[50:140, :, :]
# apply subtle blur
new_img = cv2.GaussianBlur(new_img, (3, 3), 0)
# scale to 66x200x3 (same as nVidia)
new_img = cv2.resize(new_img, (200, 66), interpolation=cv2.INTER_AREA)
# convert to YUV color space (as nVidia paper suggests)
new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2YUV)
return new_img
def random_distort(img, angle):
'''
method for adding random distortion to dataset images, including random brightness adjust, and a random
vertical shift of the horizon position
'''
new_img = img.astype(float)
# random brightness - the mask bit keeps values from going beyond (0,255)
value = np.random.randint(-28, 28)
if value > 0:
mask = (new_img[:, :, 0] + value) > 255
if value <= 0:
mask = (new_img[:, :, 0] + value) < 0
new_img[:, :, 0] += np.where(mask, 0, value)
# random shadow - full height, random left/right side, random darkening
h, w = new_img.shape[0:2]
mid = np.random.randint(0, w)
factor = np.random.uniform(0.6, 0.8)
if np.random.rand() > .5:
new_img[:, 0:mid, 0] *= factor
else:
new_img[:, mid:w, 0] *= factor
# randomly shift horizon
h, w, _ = new_img.shape
horizon = 2 * h / 5
v_shift = np.random.randint(-h / 8, h / 8)
pts1 = np.float32([[0, horizon], [w, horizon], [0, h], [w, h]])
pts2 = np.float32([[0, horizon + v_shift], [w, horizon + v_shift], [0, h], [w, h]])
M = cv2.getPerspectiveTransform(pts1, pts2)
new_img = cv2.warpPerspective(new_img, M, (w, h), borderMode=cv2.BORDER_REPLICATE)
return (new_img.astype(np.uint8), angle)
def generate_training_data(image_paths, angles, batch_size=128, validation_flag=False):
'''
method for the model training data generator to load, process, and distort images, then yield them to the
model. if 'validation_flag' is true the image is not distorted. also flips images with turning angle magnitudes
of greater than 0.33, as to give more weight to them and mitigate bias toward low and zero turning angles
'''
image_paths, angles = shuffle(image_paths, angles)
X, y = ([], [])
while True:
for i in range(len(angles)):
img = cv2.imread(image_paths[i])
angle = angles[i]
img = preprocess_image(img)
if not validation_flag:
img, angle = random_distort(img, angle)
X.append(img)
y.append(angle)
if len(X) == batch_size:
yield (np.array(X), np.array(y))
X, y = ([], [])
image_paths, angles = shuffle(image_paths, angles)
# flip horizontally and invert steer angle, if magnitude is > 0.33
if abs(angle) > 0:
img = cv2.flip(img, 1)
angle *= -1
X.append(img)
y.append(angle)
if len(X) == batch_size:
yield (np.array(X), np.array(y))
X, y = ([], [])
image_paths, angles = shuffle(image_paths, angles)
# extra flip for adverse steering angles
if abs(angle) > 0.33:
img = cv2.flip(img, 1)
angle *= -1
X.append(img)
y.append(angle)
if len(X) == batch_size:
yield (np.array(X), np.array(y))
X, y = ([], [])
image_paths, angles = shuffle(image_paths, angles)
def generate_training_data_for_visualization(image_paths, angles, batch_size=20, validation_flag=False):
'''
method for loading, processing, and distorting images
if 'validation_flag' is true the image is not distorted
'''
X = []
y = []
image_paths, angles = shuffle(image_paths, angles)
for i in range(batch_size):
img = cv2.imread(image_paths[i])
angle = angles[i]
img = preprocess_image(img)
if not validation_flag:
img, angle = random_distort(img, angle)
X.append(img)
y.append(angle)
return (np.array(X), np.array(y))
RunModel = True
My_Data = True
U_Data = True
data_to_use = [My_Data, U_Data]
img_path_prepend = ['', getcwd() + '/data/']
csv_path = ['./data/Recordings/driving_log.csv', './data/driving_log.csv']
image_paths = []
angles = []
for j in range(2):
if not data_to_use[j]:
# 0 = my own data, 1 = Udacity supplied data
print('not using dataset ', j)
continue
# Import driving data from csv
with open(csv_path[j], newline='') as f:
driving_data = list(csv.reader(f, skipinitialspace=True, delimiter=',', quoting=csv.QUOTE_NONE))
# Gather data - image paths and angles for center, left, right cameras in each row
for row in driving_data[1:]:
# skip it if ~0 speed - not representative of driving behavior
if float(row[6]) < 0.1 :
continue
# get center image path and angle
image_paths.append(img_path_prepend[j] + row[0])
angles.append(float(row[3]))
# get left image path and angle
image_paths.append(img_path_prepend[j] + row[1])
angles.append(float(row[3])+0.15)
# get left image path and angle
image_paths.append(img_path_prepend[j] + row[2])
angles.append(float(row[3])-0.15)
image_paths = np.array(image_paths)
angles = np.array(angles)
print('Before:', image_paths.shape, angles.shape)
# print a histogram to see which steering angle ranges are most overrepresented
num_bins = 23
avg_samples_per_bin = len(angles)/num_bins
hist, bins = np.histogram(angles, num_bins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
#plt.bar(center, hist, align='center', width=width)
#plt.plot((np.min(angles), np.max(angles)), (avg_samples_per_bin, avg_samples_per_bin), 'k-')
#plt.show()
# determine keep probability for each bin: if below avg_samples_per_bin, keep all; otherwise keep prob is proportional
# to number of samples above the average, so as to bring the number of samples for that bin down to the average
keep_probs = []
target = avg_samples_per_bin * .5
for i in range(num_bins):
if hist[i] < target:
keep_probs.append(1.)
else:
keep_probs.append(1./(hist[i]/target))
remove_list = []
for i in range(len(angles)):
for j in range(num_bins):
if angles[i] > bins[j] and angles[i] <= bins[j+1]:
# delete from X and y with probability 1 - keep_probs[j]
if np.random.rand() > keep_probs[j]:
remove_list.append(i)
image_paths = np.delete(image_paths, remove_list, axis=0)
angles = np.delete(angles, remove_list)
# print histogram again to show more even distribution of steering angles
hist, bins = np.histogram(angles, num_bins)
plt.bar(center, hist, align='center', width=width)
plt.plot((np.min(angles), np.max(angles)), (avg_samples_per_bin, avg_samples_per_bin), 'k-')
plt.show()
print('After:', image_paths.shape, angles.shape)
# visualize a single batch of the data
X,y = generate_training_data_for_visualization(image_paths, angles)
visualize_dataset(X,y)
# split into train/test sets
image_paths_train, image_paths_test, angles_train, angles_test = train_test_split(image_paths, angles,
test_size=0.05, random_state=42)
print('Train:', image_paths_train.shape, angles_train.shape)
print('Test:', image_paths_test.shape, angles_test.shape)
if RunModel:
# setup model input and the hard coded normalisation
inputs = Input(shape=(66,200,3))
nimg = Lambda(lambda x: x / 127.5 - 1.0)(inputs)
# -- Start of the NVIDIA portion of the model
# First 3 convolutional layers with 5x5 kernel each and filter size 24,36 and 48, stride (2,2)
# ELU activation layers and l2 kernel_regularizers
x = Conv2D(filters=24,kernel_size=(5,5),strides=(2,2),padding='valid',activation='elu',
kernel_regularizer=l2(0.001))(nimg)
x = Conv2D(filters=36, kernel_size=(5, 5), strides=(2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(0.001))(x)
x = Conv2D(filters=48, kernel_size=(5, 5), strides=(2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(0.001))(x)
# End f irst three layers with a dropout layer
x = Dropout(0.5)(x)
# Two more convolutional layers with kernel size (3,3) and filter 64
x = Conv2D(filters=64, kernel_size=(3, 3), padding='valid', activation='elu',
kernel_regularizer=l2(0.001))(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='valid', activation='elu',
kernel_regularizer=l2(0.001))(x)
x = Dropout(0.5)(x)
# Add a flatten layer
x = Flatten()(x)
# -- Start of the comma.ai model
# three convolutional layers (note that this branch also starts with the normalised image)
y = Conv2D(filters=16, kernel_size=(8, 8),strides=(4,4), padding='same', activation='elu')(nimg)
y = Conv2D(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='same', activation='elu')(y)
y = Conv2D(filters=64, kernel_size=(5, 5), strides=(2, 2), padding='same', activation='elu')(y)
# Add Dropout layer
y = Dropout(0.5)(y)
# Flatten the output
y = Flatten()(y)
# Concatenate two models
z = Concatenate(axis=-1)([x,y])
# Add three fully connected layers (depth 512, 100, 50, 10), elu activation (and dropouts?)
z = Dense(512,activation='elu',kernel_regularizer=l2(0.001))(z)
# add a dropout layer
z = Dropout(0.5)(z)
z = Dense(100, activation='elu', kernel_regularizer=l2(0.001))(z)
z = Dense(50, activation='elu', kernel_regularizer=l2(0.001))(z)
z = Dense(10, activation='elu', kernel_regularizer=l2(0.001))(z)
# Add a fully connected output layer
SteerAngle = Dense(1, activation='elu')(z)
# Compile and train the model,
# model.compile
#print(SteerAngle)
model = Model(inputs=inputs,outputs=SteerAngle)
#model.summary()
model.compile(optimizer=Adam(lr=1e-4), loss='mse')
# initialize generators
train_gen = generate_training_data(image_paths_train, angles_train, validation_flag=False, batch_size=64)
val_gen = generate_training_data(image_paths_train, angles_train, validation_flag=True, batch_size=64)
test_gen = generate_training_data(image_paths_test, angles_test, validation_flag=True, batch_size=64)
checkpoint = ModelCheckpoint('model{epoch:02d}.h5')
history = model.fit_generator(train_gen,epochs=8,steps_per_epoch=2000,verbose=2,callbacks=[checkpoint],
validation_data=val_gen,validation_steps=25)
print(model.summary())
# visualize some predictions
n = 12
X_test, y_test = generate_training_data_for_visualization(image_paths_test[:n], angles_test[:n], batch_size=n,
validation_flag=True)
y_pred = model.predict(X_test, n, verbose=2)
#visualize_dataset(X_test, y_test, y_pred)
# Save model data
model.save_weights('./model_w.h5')
model.save('model_clone_combo.h5')
json_string = model.to_json()
with open('./model.json', 'w') as f:
f.write(json_string)
| true |
e59317e64e8cf270c6c28d59ee52874ecf83dedd | Python | abusamrah2005/Python | /Week-5/Day-29.py | UTF-8 | 584 | 4.4375 | 4 | [
"Unlicense"
] | permissive | # # Python Week-5 Day-29
# Python Loops 2
print("---- The For Loops ----")
fruits = ["apple", "banana", "cherry"]
for x in fruits:
print(x)
print("--- Looping Through a String ----")
for x in "cherry":
print(x)
print("--- The break Statement ---")
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
break
print(x)
print("---")
for x in fruits:
print(x)
if x == "banana":
break
print("--- The continue Statement ---")
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
continue
print(x)
| true |
efc24a5dbaa9c91ab42e59e907a2a2b0643bb57e | Python | mengqi0315/predict_car_sales_price | /workspace_for_python/Day04/2.็ปงๆฟ.py | UTF-8 | 563 | 4.5 | 4 | [] | no_license | # ็ปงๆฟ๏ผๅญ็ฑป็ปงๆฟ็ถ็ฑป๏ผๅ
ฑไบซ็ถ็ฑป็ๅฑๆงๅๆนๆณ๏ผๅนถไธ่ชๅทฑๅฏไปฅๅๆฉๅฑ
class Dog:
def __init__(self,name):
self.name = name
print('ๆ็็ฑป็ๆ้ ๆนๆณ')
def eat(self):
print('%s็ๅไธ่ฅฟ'%(self.name))
class XTQ(Dog): # ่ฎฉXTQ็ฑป็ปงๆฟDog็ฑป
def eat(self): # ๆนๆณ็้ๅ
print("%s็ๅ่ ๆก"%(self.name))
xtq=XTQ("ๅฎๅคฉ็ฌ")
print(xtq.name)
xtq.eat() # ๅญ็ฑป่ฐ็จๅฑๆงๆๆนๆณๆถๅ
่ฆไปๅญ็ฑปๆฌ่บซๅปๅฏปๆพ๏ผ่ฅๅญ็ฑปๆฒกๆๅฐฑไผไป็ถ็ฑปไธญๅฏปๆพ
| true |
4a58005e8f6a81e98f01fdaee5556eae41f037fc | Python | DemiFr/SD201 | /TP/TP1102_YantingLI/kFoldCross.py | UTF-8 | 2,268 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 2 09:25:59 2016
@author: yali
"""
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn import datasets
from sklearn import svm
import random
import sklearn as sk
dataFiles = []
#Categories = ['company','company','company','company','company','fruit','fruit','fruit','fruit','fruit']
Categories = ['fruit','fruit','company','company','company',
'company','company','company','fruit','fruit',
'company','company','company','fruit','company',
'company','fruit','company','fruit','fruit']
for i in range(20):
f = open('b'+str(i))
lines = f.readlines()
doc = ''
for line in lines:
doc = doc + line
dataFiles.append(doc)
#Transfer the Array into Matrix
count_vect = CountVectorizer()
vectors = count_vect.fit_transform(dataFiles)
X=vectors.toarray()
print X
#print vectors
#K-fold
# Etape1 split
a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]
a=np.array(a)
a=np.random.permutation(a)
#print a
n_splits=10
temp = int(len(dataFiles)/n_splits)
K_array = []
Cat_array = []
K = []
Cat = []
t = 0
for i in range(n_splits):
for j in range(temp):
K_array.append(dataFiles[a[t]])
Cat_array.append(Categories[a[t]])
t = t+1
#print K_array
#print Cat_array
K = np.reshape(K_array, (n_splits, temp))
Cat = np.reshape(Cat_array, (n_splits, temp))
#print K
#print Cat
# Etape 2
for i in range(n_splits):
TrainingSet = []
TestSet = []
TrainingCat = []
TestCat = []
Score_ary = []
for k in range(temp):
TestSet.append(K[i][k])
for j in range(n_splits):
if i != j:
for k in range(temp):
TrainingSet.append(K[j][k])
TrainingCat.append(Cat[j][k])
#print TrainingSet
#print TestSet
TrainingSet1 = [TrainingSet[i:i+temp]for i in range(0, len(TrainingSet), temp)]
print TrainingSet1
#Etape 3
# clf = MultinomialNB()
# clf.fit(TrainingSet1, TrainingCat)
# print(clf.predict(TestSet))
# for x in range(len(TrainingSet[t])):
# Score_ary.append(clf.score(TestSet[x], TestCat[x]))
# print Score_ary
| true |
15aeadc7c95e1b8a9bda474da4e5cff0c00a2e82 | Python | songlin1994/my-edu | /day02/list_demo.py | UTF-8 | 1,856 | 4.28125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# ๅ
็ด
alist = [ 'ๆต่ฏ',2,'ไฝ ๅฅฝ',6,'test',1,3.2 ]
# list็่ฏปๅ,ๅ็
def list_sel():
# ้่ฟ็ดขๅผ/ไธๆ ๅๅ
็ด
# ๆญฃๅบๅ ไป้ถๅผๅงๆฐ
print(alist[0])
# ๅๅๅ ไป-1 ๅผๅงๆฐ
print(alist[-1])
# ๅ็
print(alist[1:5])
# list ๅๆ
print(alist[::-1])
def list_del():
# list.pop() : ้ป่ฎคๅ ๆๆๅไธไฝๅ
็ด , ๅนถ่ฟๅๅ ้ค็้ฃไธชๅ
็ด
ele = alist.pop()
print(alist)
print(ele)
# ๅกซๅๆฐ: ๅๆฐไธบ ็ดขๅผๅผ , ๅกซๅชไธชๅ ๅชไธช
alist.pop(2)
print(alist)
## ๅขๅ ๅ
็ด
def list_add():
# list.append(ๅ
็ด ๅผ)
alist.append("ysl")
print(alist)
# ๅๅนถไธคไธชlist
blist = [8,8.8]
alist.extend(blist)
print(alist)
alist.append(blist)
print(alist)
def list_update():
qlist = [1,2,6,4,5]
# ๆดๆฐๅ่กจไธญ็ๅ
็ด , ๆ นๆฎ็ดขๅผ่ฟ่กๆดๆฐ,ๅผๅๅจ= ๅ้ข ๅฐฑๅฏไปฅไบ
qlist[0] = 100
print(qlist)
# ๆดๆฐ็ฌฌไธไฝ ,ๆนไธบ200
qlist[2] = 200
print(qlist)
def list_order_by():
qlist = [1, 2, 6, 4, 5,5]
# ไปๅฐๅฐๅคงๆๅบ
qlist.sort()
print(qlist)
# ไปๅคงๅฐๅฐๆๅบ # ๆๅฎๅๆฐๅ
ฅๅ: reverse=True
qlist.sort(reverse=True)
print(qlist)
def list_distinct():
vlist = [1,2,2,6,6,4,5]
# set() : ๅป้ๅฝๆฐs
s = set(vlist)
print(s)
print( type( s ) )
l = list(s)
print(type(l))
print(l)
# len() : ๆฅ็้ฟๅบฆ
print(len(l))
# # set(vlist) : ไฝฟ็จset ๆนๆณๅฏน list่ฟ่กๅป้,ๅป้ๅไธๆฏlist็ฑปๅ,็จlist() ๆนๆณ ๅฐ่ฟไธชๆฐๆฎ่ฝฌๆขๆlist็ฑปๅ
# print(type(set(vlist)))
# vlist = list(set(vlist))
# print(vlist)
#
# # len(): ่ทๅๅ่กจ็้ฟๅบฆ,ๆๅ ไธชๅ
็ด ๅฐฑ ่ฟๅๅ
# print(len(vlist))
if __name__ == '__main__':
list_distinct() | true |
f1800374a377ddeac58775ec269657755ce2efc2 | Python | boffomarco/InternshipCode | /RapidMinerCode/DL2Alloy/DL2Alloy3.py | UTF-8 | 15,783 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# [TODO] Fix the encoding issues
# Based on:
# https://www.researchgate.net/publication/272763162_A_Non-Standard_Approach_for_the_OWL_Ontologies_Checking_and_Reasoning
# https://www.researchgate.net/publication/252772655_Model_Generation_in_Description_Logics_What_Can_We_Learn_From_Software_Engineering
import os
import pandas as pd
import ontospy
import re
def nameOf(text):
return (str(text).split("/"))[-1].split("#")[-1]
def domains(property_):
property_domains = ""
if(property_.domains):
for domain_ in property_.domains:
property_domains = property_domains + str(domain_.uri) + " "
elif(property_.parents()):
for property_parent in property_.parents():
property_domains = property_domains + " " + " ".join(domains(property_parent))
return property_domains.split()
def ranges(property_):
property_ranges = ""
if(property_.ranges):
for range_ in property_.ranges:
property_ranges = property_ranges + str(range_.uri) + " "
elif(property_.parents()):
for property_parent in property_.parents():
property_ranges = property_ranges + " " + " ".join(ranges(property_parent))
return property_ranges.split()
def brackets(complete):
#print(complete)
tmp = filter(None, complete.split("(") )
bracketed = ""
c = 0 # Count level of opened brackets
for t in tmp: # Iterate over the string to build the original one
if(c):
bracketed = bracketed + " ( "
c = c + 1
for i in t: # Iterate over every element to identify closed brackets
if i == ')': # If a bracket is closed then update the opened brackets counter
c = c - 1
if c < 1: # If all the brackets has been closed then return the result
return bracketed
bracketed = bracketed + i
while c > 1: # If there are less closed brackets add the remaining ones
bracketed = bracketed + " ) "
c = c - 1
return bracketed # Return correctly formatted bracketed result
# Return next expression inside brackets if the first string open a bracket
def nextBrackets(next, complete):
if("(" in next):
return brackets(" ".join(complete))
else:
return brackets(next)
def DLAxiomtoAlloy(axiom, level):
# TBOX
if("\xe2\x89\xa1" in str(axiom)):
tmp = axiom.split("\xe2\x89\xa1")
#tmp = axiom.split("โก")
print(axiom)
print(tmp)
return "fact { " + DLAxiomtoAlloy( tmp[0] , level + 1 ) + " = " + DLAxiomtoAlloy( tmp[1] , level + 1 ) + " }"
elif("=" in axiom):
tmp = axiom.split("=")
if(len(re.findall('\d+', tmp[1]))):
tmp = tmp[1]
n = re.findall('\d+', tmp)[0]
tmp = tmp.replace(str(n),"")
if("." in tmp):
tmp = tmp.split(".")
#print(n)
#print(tmp)
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp[0].replace("(","").replace(")","") , level + 1) + " :> " + DLAxiomtoAlloy(tmp[1].replace("(","").replace(")","") , level + 1) + " ) ) => " + n + "}"
else:
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp.replace("(","").replace(")","") , level + 1) + " ) ) = " + n + "}"
#print(tmp)
return "fact { " + DLAxiomtoAlloy( tmp[0] , level + 1) + " = " + DLAxiomtoAlloy( tmp[1] , level + 1 ) + " }"
elif("โ" in axiom):
tmp = axiom.split("โ")
#print(tmp)
return "fact { " + DLAxiomtoAlloy( tmp[0] , level + 1 ) + " in ( " + DLAxiomtoAlloy( tmp[1] , level + 1 ) + " ) }"
# (ALC) concept
elif("โ" in axiom):
tmps = axiom.split("โ")
#print(tmps)
final = " "
for tmp in tmps:
final = final + DLAxiomtoAlloy(tmp , level + 1) + " + "
final = final[0:-2]
return final
elif("โ" in axiom):
tmps = axiom.split("โ")
#print(tmps)
final = " "
for tmp in tmps:
final = final + DLAxiomtoAlloy(tmp , level + 1) + " & "
final = final[0:-2]
return final
elif("โ" in axiom):
tmp = axiom.replace('โ', '').split(".")
#print(tmp)
return " ( univ - ( " + DLAxiomtoAlloy(tmp[0] , level + 1) + ".( univ - " + DLAxiomtoAlloy(nextBrackets(".".join(tmp[1:]).split()[0], ".".join(tmp[1:]).split()) , level + 1) + " ) ) ) "
elif("โ" in axiom):
tmp = axiom.replace('โ', '')
#print(tmp)
return DLAxiomtoAlloy(tmp , level + 1)
elif("ยฌ" in axiom):
tmp = axiom.split("ยฌ")
#print(tmp)
return "( univ - " + DLAxiomtoAlloy(nextBrackets(tmp[1].split()[0], tmp[1].split()) , level + 1) + " ) "
elif("โป" in axiom):
#print(axiom)
for tmp in axiom.split():
if("โป" in tmp):
return "( ( TOP -> TOP ) - " + DLAxiomtoAlloy(tmp.replace("โป", "") , level + 1) + " ) "
# (SHIQ) concept
elif("โค" in axiom):
tmp = axiom.split("โค")
#print(tmp)
tmp = tmp[1]
n = re.findall('\d+', tmp)[0]
tmp = tmp.replace(str(n),"")
if("." in tmp):
tmp = tmp.split(".")
#print(n)
#print(tmp)
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp[0].replace("(","").replace(")","") , level + 1) + " :> " + DLAxiomtoAlloy(tmp[1].replace("(","").replace(")","") , level + 1) + " ) ) =< " + n + "}"
else:
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp.replace("(","").replace(")","") , level + 1) + " ) ) =< " + n + "}"
elif("โฅ" in axiom):
tmp = axiom.split("โฅ")
#print(tmp)
tmp = tmp[1]
n = re.findall('\d+', tmp)[0]
tmp = tmp.replace(str(n),"")
if("." in tmp):
tmp = tmp.split(".")
#print(n)
#print(tmp)
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp[0].replace("(","").replace(")","") , level + 1) + " :> " + DLAxiomtoAlloy(tmp[1].replace("(","").replace(")","") , level + 1) + " ) ) => " + n + "}"
else:
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp.replace("(","").replace(")","") , level + 1) + " ) ) => " + n + "}"
elif("=" in axiom and level > 0):
tmp = axiom.split("=")
#print(tmp)
tmp = tmp[1]
n = re.findall('\d+', tmp)[0]
tmp = tmp.replace(str(n),"")
if("." in tmp):
tmp = tmp.split(".")
#print(n)
#print(tmp)
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp[0].replace("(","").replace(")","") , level + 1) + " :> " + DLAxiomtoAlloy(tmp[1].replace("(","").replace(")","") , level + 1) + " ) ) => " + n + "}"
else:
return "{ a : univ | #( a.( " + DLAxiomtoAlloy(tmp.replace("(","").replace(")","") , level + 1) + " ) ) = " + n + "}"
elif("INV." in axiom):
tmp = axiom.replace("INV.", "~")
#print(tmp)
return DLAxiomtoAlloy(tmp , level + 1)
elif("(" in axiom and ")" in axiom and level == 0):
tmp = axiom.split("(")
C = tmp[0].split()[-1]
tmp = tmp[1].split(")")[0]
tmp = tmp.split(",")
if(len(tmp)==1):
return "fact { " + DLAxiomtoAlloy(tmp[0] , level + 1) + " in " + C + " }"
elif(len(tmp)==2):
return "fact { " + DLAxiomtoAlloy(tmp[0] , level + 1) + " -> " + DLAxiomtoAlloy(tmp[1] , level + 1) + " in " + C + " }"
return axiom#"fact { " + DLAxiomtoAlloy(tmp[0] , level + 1) + " -> " + DLAxiomtoAlloy(tmp[1] , level + 1) + " in " + C + " }"
return axiom.replace("(","").replace(")","")
def rm_main(dataDL):
# Create the directory in which store the new vocabulary
#outputDirectory = "%{outputDirectory}"
if not os.path.isdir(outputDirectory):
os.makedirs(outputDirectory)
# Define Ontology Analyser
o = ontospy.Ontospy()
# Load Ontology
#inputFile = "%{inputFile}" #, people.owl, Animal.owl, schema_2020-03-10.n3
o.load_rdf(inputFile)
o.build_all()
moduleName = ((str(inputFile).split("/")[-1]).split("."))[-2] + "DL"
fileName = outputDirectory + moduleName + ".als"
AlloyModel = "module " + moduleName + "\n\n"
usedProperties = set()
AlloySignatures = "\n// Signatures\n"
# Add Classes & Properties to Alloy
for class_ in o.all_classes:
#print("Class: " + str(class_.uri))
className = nameOf(class_.uri)
AlloyClass = "sig " + className + " in TOP "
"""
for subClassOf in class_.parents():
subClassOfName = nameOf(subClassOf.uri)
AlloyClass = AlloyClass + " extends " + subClassOfName
"""
AlloyClass = AlloyClass + " { \n\t"
for property_ in o.all_properties:
#print("Property: " + str(property_.uri))
domains_ = domains(property_)
property_Name = nameOf(property_.uri)
for domain_ in domains_:
if(domain_ == str(class_.uri)):
#print("Domain: " + str(domain_))
ranges_ = ranges(property_)
for range_ in ranges_:
#print("Range: " + str(range_))
AlloyClass = AlloyClass + property_Name + ": " + nameOf(range_) + ",\n\t"
usedProperties.add(property_Name)
AlloyClass = AlloyClass[0:-3] + "} \n"
AlloySignatures = AlloySignatures + AlloyClass
#print()
AlloyModel = AlloyModel + "abstract sig TOP { \n"
for property_ in o.all_properties:
property_Name = nameOf(property_.uri)
if(property_Name not in usedProperties):
#print(property_Name)
AlloyModel = AlloyModel + property_Name + " : set TOP,\n"
AlloyModel = AlloyModel[0:-2] + "}\n"
AlloyModel = AlloyModel + "sig BOTTOM in TOP {} fact { #BOTTOM = 0 } \n\n"
AlloyAxioms = "\n// Axioms\n"
# Iterate for every DL Axioms
for index, row in dataDL.iterrows():
if (row["DLAxioms"]):
axiom = row["DLAxioms"].encode('utf-8').strip()
AlloyAxiom = DLAxiomtoAlloy(str(axiom).replace("โค", "TOP").replace(",", ""), 0)
if (AlloyAxiom[0] == "{"):
AlloyAxiom = "fact " + AlloyAxiom
#print(AlloyAxiom)
if("fact {" in AlloyAxiom[0:6]):
AlloyAxioms = AlloyAxioms + AlloyAxiom + "\n"
#print("")
AlloyPredicates = "\n// Predicates\n"
for subject, predicate, object_ in o.rdflib_graph:
predicateName = nameOf(predicate.encode('utf-8').strip())
if(predicateName != "type"):
#print(subject, predicate, object_)
#print()
subj = o.get_any_entity(uri=subject.encode('utf-8').strip())
pred = o.get_any_entity(uri=predicate.encode('utf-8').strip())
obj = o.get_any_entity(uri=object_.encode('utf-8').strip())
if(subj and obj):
if predicateName == "subPropertyOf":
subj_range = ""
if("Property" == str(subj)[1:9] and subj.ranges):
#print(len(subj.ranges))
subj_range = subj.ranges[0].uri
elif("Class" == str(subj)[1:6] and subj.range_of):
#print(len(subj.range_of))
subj_range = subj.range_of[0].uri
AlloyModel = AlloyModel + "// subPropertyOf as Figure4\n"
if(nameOf(subj_range) and nameOf(subj.uri) and nameOf(obj.uri)):
AlloyModel = AlloyModel + "pred subPropertyOf{all a:" + nameOf(subj_range) + " | a." + nameOf(subj.uri) + " in a." + nameOf(obj.uri) + "}" + "\n"
obj_range = ""
if("Property" == str(obj)[1:9] and obj.ranges):
#print(len(obj.ranges))
obj_range = obj.ranges[0].uri
elif("Class" == str(obj)[1:6] and obj.range_of):
#print(len(obj.range_of))
obj_range = obj.range_of[0].uri
if(nameOf(subj_range) and nameOf(obj_range)):
AlloyModel = AlloyModel + "// subPropertyOf as TABLE I\n"
AlloyModel = AlloyModel + "pred subPropertyOf{all r:" + nameOf(subj_range) + " | r in " + nameOf(obj_range) + "}" + "\n"
elif predicateName == "inverseOf":
AlloyModel = AlloyModel + "pred inverseOf{" + nameOf(subj.uri) + " = ~" + nameOf(obj.uri) + "}" + "\n"
elif predicateName == "disjointWith":
if(subj.parents() and obj.parents() and subj.parents()[0] != obj.parents()[0]):
AlloyModel = AlloyModel + "pred { no c1:" + nameOf(subj.uri) + ", c2:" + nameOf(obj.uri) + "| c1 = c2}" + "\n"
elif predicateName == "complementOf":
C = "{"
for class_ in o.all_classes:
if(nameOf(obj.uri) != nameOf(class_.uri)):
C = C + nameOf(class_.uri)
C = C + "}"
AlloyModel = AlloyModel + "pred { " + nameOf(subj.uri) + " = " + str(C) + "}" + "\n"
elif predicateName == "equivalentClass":
AlloyModel = AlloyModel + "pred equivalentClass{ " + nameOf(subj.uri) + " = " + nameOf(obj.uri) + "}" + "\n"
elif predicateName == "equivalentProperty":
AlloyModel = AlloyModel + "pred equivalentProperty{ " + nameOf(subj.uri) + " = " + nameOf(obj.uri) + "}" + "\n"
elif predicateName == "TransitiveProperty":
AlloyModel = AlloyModel + "pred TransitiveProperty{ a,b,c โ " + nameOf(subj.uri) + " / a.(" + nameOf(predicate) + ") = b && b.(" + nameOf(predicate) + ") = c โ a.(" + nameOf(predicate) + ") = c }" + "\n"
elif predicateName == "hasValue":
if(("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)):
AlloyModel = AlloyModel + "pred hasValue{ #( " + pred.ranges[0] + " ) = 1}" + "\n"
elif predicateName == "cardinality":
if(("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)):
AlloyModel = AlloyModel + "pred cardinality{ #( " + pred.ranges[0] + " ) = " + nameOf(obj.uri) + "}" + "\n"
elif predicateName == "maxCardinality":
if(("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)):
AlloyModel = AlloyModel + "pred maxCardinality{ #( " + pred.ranges[0] + " ) <= " + nameOf(obj.uri) + "}" + "\n"
elif predicateName == "minCardinality":
if(("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)):
AlloyModel = AlloyModel + "pred minCardinality{ #( " + pred.ranges[0] + " ) >= " + nameOf(obj.uri) + "}" + "\n"
elif predicateName == "SymmetricProperty":
if((("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)) and (("Property" == str(pred)[1:9] and pred.domains) or ("Class" == str(pred)[1:6] and pred.domain_of))):
AlloyModel = AlloyModel + "pred SymmetricProperty{ a โ " + pred.domains[0] + " && b โ " + pred.ranges[0] + " / a.(" + nameOf(predicate) + ") = b โ b.(" + nameOf(predicate) + ") }" + "\n"
elif predicateName == "FunctionalProperty":
if(("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)):
AlloyModel = AlloyModel + "pred FunctionalProperty{ #(" + pred.ranges[0] + ") = 1}" + "\n"
elif predicateName == "InverseFunctionalProperty":
if(("Property" == str(pred)[1:9] and pred.domains) or ("Class" == str(pred)[1:6] and pred.domain_of)):
AlloyModel = AlloyModel + "pred InverseFunctionalProperty{ #(" + pred.domains[0] + ") = 1}" + "\n"
elif predicateName == "allValuesFrom":
if(("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)):
AlloyModel = AlloyModel + "pred allValuesFrom{ " + nameOf(pred.ranges[0]) + " in " + nameOf(obj.uri) + "}" + "\n"
elif predicateName == "someValuesFrom":
if(("Property" == str(pred)[1:9] and pred.ranges) or ("Class" == str(pred)[1:6] and pred.range_of)):
AlloyModel = AlloyModel + "pred allValuesFrom{ some r: " + nameOf(pred.ranges[0]) + " | r in " + nameOf(obj.uri) + "}" + "\n"
with open(fileName, "w+") as Alloy:
Alloy.write(AlloyModel)
Alloy.write(AlloySignatures)
Alloy.write(AlloyAxioms)
Alloy.write(AlloyPredicates)
AlloyUtils = ""
#AlloyUtilsFile = "%{AlloyUtilsFile}"
with open(AlloyUtilsFile, "r") as AlloyUtilsFileRead:
AlloyUtils = AlloyUtilsFileRead.read()
with open(fileName, "a+") as Alloy:
Alloy.write("\n")
Alloy.write(AlloyUtils)
#print(AlloyModel)
AlloyUtilsFile = "/home/marco/Desktop/Alloy/AlloyUtils.als"
fileName = "gufo" #, people.owl, Animal.owl, schema_2020-03-10.n3
inputFile = "/home/marco/Desktop/Alloy/" + fileName + ".owl"
outputDirectory = "/home/marco/Desktop/Alloy/results/"
test = pd.read_excel("/home/marco/Desktop/Alloy/peopleDL.xlsx")
#test = pd.read_excel("/home/marco/Desktop/Alloy/gufoDL.xlsx")
rm_main(test)
#print(DLAxiomtoAlloy("โ partitions.โค โ (Type โ (ยฌAbstractIndividualType) โ (ยฌConcreteIndividualType))",0)) | true |
04f05c52318f97199e22f3c90ae3a0692e21220c | Python | lishuhuakai/demo | /key_arg.py | UTF-8 | 509 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python3
def person(name, age, **kw):
if 'city' in kw:
pass
if 'job' in kw:
pass
print('name', name, 'age', age, 'other', kw)
person('Mick', 30)
person('Jack', 40, city='Beijing')
person('Adam', 45, gender='M', job='Engineer')
extra = {'city':'ไธๆตท', 'job':'็จๅบ็ฟ'}
person('Tiger', 29, **extra)
person('Rose', 90, city='NewYork', addr='ๅคฉๅ ', zipcode=12345)
def person(name, age, *, city, job):
print(name, age, city, job)
person("jack", 24, city='Beijing', job="assistance")
| true |
23c2577ab37b2e649a1d649bbc5e25279d43ecc0 | Python | Brockfrancom/pythonProjects | /src/pythonBasics/lotteryGame.py | UTF-8 | 1,043 | 4.21875 | 4 | [] | no_license | """
Brock Francom
A02052161
CS-1400-001
Douglas Galarus
2/15/2018
hw6 - Exercise 5.34
"""
def run():
#import random and assign variables
import random
lotteryDigit1 = 0
lotteryDigit2 = 0
#loop is used to generate 2 different digits
while lotteryDigit1 == lotteryDigit2:
lottery = random.randint(00, 99)
lotteryDigit1 = lottery // 10
lotteryDigit2 = lottery % 10
#user input
user = int(input("Enter a number (2 digits): "))
#evaluate user input
userDigit1 = user // 10
userDigit2 = user % 10
#print results
if user == lottery:
print("Exact match! You win $10,000!")
elif userDigit1 == lotteryDigit2 and userDigit2 == lotteryDigit1:
print("Matched all digits! You win $3,000!")
elif userDigit1 == lotteryDigit1 or userDigit1 == lotteryDigit2 \
or userDigit2 == lotteryDigit1 or userDigit2 == lotteryDigit2:
print("Matched 1 digit. You win $1,000!")
else:
print("Sorry no match.")
| true |
ae529cf4dea7a39bc7612fd47bc13a021950ec9e | Python | rharshith2410/filename-extension | /Filename.py | UTF-8 | 281 | 3.203125 | 3 | [] | no_license | filename = input("enter the filename")
ext = filename.split('.')
def checkKey(dict,key)
if key in dict:
print("extension of file is:" , dict[key])
else:
print("no such file exists!")
dict = { "py":"python , "txt":"text" , "zip":"zipfile")
key = ext[1]
checkKey(dict, key)
| true |
0f44eef2c0c42b24cd1c7d0e7326f3fc394e3f21 | Python | ryu577/survival | /survival/distributions/weibull.py | UTF-8 | 18,401 | 2.859375 | 3 | [
"MIT"
] | permissive | import numpy as np
from scipy.stats import exponweib
from survival.misc.sigmoid import *
from survival.distributions.basemodel import *
from survival.optimization.optimizn import *
from survival.misc.sigmoid import *
from survival.distributions.basemodel import *
from survival.optimization.optimizn import *
from scipy.special import gamma
class Weibull(Base):
'''
Methods pertaining to the Weibull distribution.
This is currently the only distribution that supports
regressing features.
'''
def __init__(self, ti=None, xi=None, k=None, lmb=None):
'''
Initializes an instance of the Weibull distribution.
'''
if ti is not None:
self.train_org = ti
self.train_inorg = xi
self.t = ti
self.x = xi
self.x_samples = None
self.x_censored = None
[self.k, self.lmb] = self.gradient_descent(
params=np.array([.5, .3]))
else: ##This path should seldom be used.
self.train = []
self.test = []
self.k = k
self.lmb = lmb
self.params = []
x_samples = generate_features(100)
t = generate_weibull(100)
self.x_censored = x_samples[t > 1.5, ]
self.x_samples = x_samples[t < 1.5, ]
self.x = np.ones(sum(t > 1.5)) * 1.5
self.t = t[t < 1.5]
self.train_org = np.copy(self.t)
self.train_inorg = np.copy(self.x)
def determine_params(self, k, lmb, params):
'''
Sets the parameters. Inherited from basemodel
args:
k: The shape parameter.
lmb: The scale parameter.
params: An array of shape and scale parameters.
'''
return super(Weibull, self).determine_params(k, lmb, params)
def logpdf(self, x, k, lmb):
'''
The logarithm of the PDF function.
args:
x: Value at which to evaluate.
k: Shape parameter.
lmb: Scale parameter.
'''
with np.errstate(all='ignore'):
return np.log(k) - k * np.log(lmb) + (k - 1) * np.log(x) - (x / lmb)**k
def pdf(self, x, k=-1, lmb=-1, params=None):
'''
The probability density function of the Weibull.
args:
x: The value at which to evaluate.
k: The shape parameter.
lmb: The scale parameter.
'''
[k, lmb] = self.determine_params(k, lmb, params)
return k / lmb * (x / lmb)**(k - 1) * np.exp(-(x / lmb)**k)
def pdf_grad(self, x, k, lmb):
'''
The gradient of the PDF.
args:
x: The value at which to evaluate.
k: The shape parameter.
lmb: The scale parameter.
'''
delWeibullDelLmb = (1 - (x / lmb)**k) * \
(-k / lmb) * self.pdf(x, k, lmb)
delWeibullDelK = self.pdf(
x, k, lmb) * ((-(x / lmb)**k + 1) * np.log(x / lmb) + 1 / k)
return np.array([delWeibullDelK, delWeibullDelLmb])
def cdf(self, t, k=-1, lmb=-1, params=None):
'''
The cumulative distribution function.
args:
t: The value at which to evaluate.
k: The shape parameter.
lmb: The scale parameter.
'''
return 1 - self.survival(t, k, lmb)
def survival(self, t, k=-1, lmb=-1, params=None):
'''
The survival function of the distribution (1-cdf)
args:
t: The value at which to evaluate.
k: The shape parameter.
lmb: The scale parameter.
'''
[k, lmb] = self.determine_params(k, lmb, params)
return np.exp(-(t / lmb)**k)
def survival_grad(self, x, k, lmb):
'''
The gradient of the survival function.
args:
x: The value at which to evaluate.
k: The shape parameter.
lmb: The scale parameter.
'''
survive = self.survival(x, k, lmb)
delk = -survive * (x / lmb)**k * np.log(x / lmb)
dellmb = survive * (x / lmb)**k * (k / lmb)
return np.array([delk, dellmb])
def logsurvival(self, t, k=-1, lmb=-1, params=None):
'''
The logarithm of the survival function.
args:
t: The value at which to evaluate.
k: The shape parameter.
lmb: The scale parameter.
params: An array of shape and scale parameters.
'''
with np.errstate(all='ignore'):
return -(t / lmb)**k
def hazard(self, x, k=None, lmb=None, params=None):
'''
The hazard rate of the Weibull (probability of seeing
a sample in next time unit conditional on not having
seen a sample until current time).
args:
x: The value at which to evaluate.
k: The shape parameter.
lmb: The scale parameter.
'''
[k, lmb] = self.determine_params(k, lmb, params)
return self.pdf(x, k, lmb) / self.survival(x, k, lmb)
def loglik(self, t, x=np.array([0]), k=0.5, lmb=0.3, W=None, x_samples=None, x_censored=None):
'''
The loglikelihood function for some observed data. Used to fit parameters to the data.
Ability to regress against features optional.
args:
t: The observed samples.
x: The censored samples.
k: The starting value of the shape parameter.
lmb: The starting value of the scale parameter.
W: In case of regressing with features, this
is a matrix of the parameters. It transforms
the feature space to the shape and scale
parameters of the Weibull.
x_samples: The matrix of features associated
with the sampled observations (t).
Should have the same rows as size of t.
x_censored: The matrix of features associated
with the censored observations (x).
Should have the same rows as size of x.
'''
#
# If there are features, calculate gradient of features.
#
if W is not None and len(W.shape) == 2 and x_samples is not None and x_censored is not None:
lik = 0
s1 = Sigmoid(6.0)
s2 = Sigmoid(1000.0)
for i in range(len(x_samples)):
theta = np.dot(W.T, x_samples[i])
[k, lmb] = [s1.transformed(theta[0]), s2.transformed(theta[1])]
lik += self.logpdf(t[i], k, lmb)
for i in range(len(x_censored)):
theta = np.dot(W.T, x_censored[i])
[k, lmb] = [s1.transformed(theta[0]), s2.transformed(theta[1])]
lik += self.logsurvival(x[i], k, lmb)
return lik
#
# If there are no features, calculate feature-less gradients.
#
else:
return sum(self.logpdf(t, k, lmb)) + sum(self.logsurvival(x, k, lmb))
def grad(self, t, x=np.array([1e-3]), k=0.5, lmb=0.3, W=None, x_samples=None, x_censored=None):
'''
The gradient of the loglikelihood function for some observed data.
Used to fit parameters to the data.
Ability to regress against features optional.
args:
t: The observed samples.
x: The censored samples.
k: The starting value of the shape parameter.
lmb: The starting value of the scale parameter.
W: In case of regressing with features, this
is a matrix of the parameters. It transforms
the feature space to the shape and scale
parameters of the Weibull.
x_samples: The matrix of features associated
with the sampled observations (t).
Should have the same rows as size of t.
x_censored: The matrix of features associated
with the censored observations (x).
Should have the same rows as size of x.
'''
#
# If there are features, calculate likelihood with the help of features.
#
if W is not None and len(W.shape) == 2 and x_samples is not None and x_censored is not None:
delW = np.zeros(W.shape)
s1 = Sigmoid(6.0)
s2 = Sigmoid(1000.0)
for i in range(len(x_samples)):
theta = np.dot(W.T, x_samples[i])
[k, lmb] = [s1.transformed(theta[0]), s2.transformed(theta[1])]
deltheta = np.array([s1.grad(theta[0]), s2.grad(
theta[1])]) * self.pdf_grad(t[i], k, lmb)
pdf = self.pdf(t[i], k, lmb)
# If the pdf is zero, we need to switch to survival.
if pdf > 1e-15:
delW += 1 / pdf * np.outer(x_samples[i], deltheta)
else: # Now, all we will say is that recovery took more than 10 seconds.
deltheta = np.array([s1.grad(theta[0]), s2.grad(
theta[1])]) * self.survival_grad(10.0, k, lmb)
delW += 1 / \
self.survival(10.0, k, lmb) * \
np.outer(x_samples[i], deltheta)
for i in range(len(x_censored)):
theta = np.dot(W.T, x_censored[i])
[k, lmb] = [s1.transformed(theta[0]), s2.transformed(theta[1])]
deltheta = np.array([s1.grad(theta[0]), s2.grad(
theta[1])]) * self.survival_grad(x[i], k, lmb)
sur = self.survival(x[i], k, lmb)
if sur > 1e-15:
delW += 1 / sur * np.outer(x_censored[i], deltheta)
else:
deltheta = np.array([s1.grad(theta[0]), s2.grad(
theta[1])]) * self.survival_grad(10.0, k, lmb)
delW += 1 / \
self.survival(10.0, k, lmb) * \
np.outer(x_censored[i], deltheta)
return delW
#
# If there are no features, calculate feature-less likelihood.
#
else:
n = len(t)
delk = n / k - n * np.log(lmb) + sum(np.log(t)) - sum(
(t / lmb)**k * np.log(t / lmb)) - sum((x / lmb)**k * np.log(x / lmb))
dellmb = -n * k / lmb + k / \
(lmb**(k + 1)) * (sum(t**k) + sum(x**k))
return np.array([delk, dellmb])
@staticmethod
def mean_s(k,lmb):
return lmb*gamma(1+1/k)
def mean(self):
return Weibull.mean_s(self.k,self.lmb)
@staticmethod
def kappa_fn_(t, k):
'''
The kappa function. Setting this to zero gives us
the kappa parameter assuming no censoring of the data.
https://en.wikipedia.org/wiki/Weibull_distribution
'''
n = len(t)
return sum(t**k*np.log(t))/sum(t**k) - 1/k -1/n*sum(np.log(t))
@staticmethod
def kappa_fn_wcensoring(t, x, k):
n = len(t)
return n / k + sum(np.log(t)) - n * (sum(t**k * np.log(t)) \
+ sum(x**k * np.log(x))) / (sum(x**k) + sum(t**k))
def kappa_fn(self, k):
return self.kappa_fn_(self.t, k)
@staticmethod
def lmbd_fn(t, k):
n = len(t)
return (sum(t**k) / n)**(1 / k)
@staticmethod
def lmbd_fn_wcensoring(t, x, k):
n = len(t)
return ((sum(t**k) + sum(x**k)) / n)**(1 / k)
@staticmethod
def est_params(t):
fn = lambda k: Weibull.kappa_fn_(t, k)
k = bisection(fn, 0.1, 5.0)
lmb = Weibull.lmbd_fn(t, k)
return k, lmb
def get_params(self, W, i):
'''
In the case of regressing against features,
we want to ensure that the final features
are always positive. For this, we apply
sigmoid functions to the output of what we
get from multiplying the parameter matrix
with the feature vector.
args:
W: The parameter matrix.
'''
theta = np.dot(W.T,self.x[i])
kappa = Sigmoid.transform(theta[0],6.0)
lmb = Sigmoid.transform(theta[1],1000.0)
return np.array([kappa,lmb])
def numerical_grad(self, t, x, k=0.5, lmb=0.3, W=None, x_samples=None, x_censored=None):
'''
The numericalgradient of the loglikelihood function for some observed data.
Used to validate the analytic gradient.
args:
t: The observed samples.
x: The censored samples.
k: The starting value of the shape parameter.
lmb: The starting value of the scale parameter.
W: In case of regressing with features, this
is a matrix of the parameters. It transforms
the feature space to the shape and scale
parameters of the Weibull.
x_samples: The matrix of features associated
with the sampled observations (t).
Should have the same rows as size of t.
x_censored: The matrix of features associated
with the censored observations (x).
Should have the same rows as size of x.
'''
eps = 1e-5
#
# If there are features, calculate likelihood with the help of features.
#
if W is not None and len(W.shape) == 2 and x_samples is not None and x_censored is not None:
delW = np.zeros(W.shape)
for i in range(W.shape[0]):
for j in range(W.shape[1]):
W[i, j] = W[i, j] + eps
hi = self.loglik(t, x, k, lmb, W, x_samples, x_censored)
W[i, j] = W[i, j] - 2 * eps
lo = self.loglik(t, x, k, lmb, W, x_samples, x_censored)
delW[i, j] = (hi - lo) / 2 / eps
W[i, j] = W[i, j] + eps
return delW
#
# If there are no features, calculate feature-less likelihood.
#
else:
delk = (self.loglik(t, x, k + eps, lmb) -
self.loglik(t, x, k - eps, lmb)) / 2 / eps
dellmb = (self.loglik(t, x, k, lmb + eps) -
self.loglik(t, x, k, lmb - eps)) / 2 / eps
return np.array([delk, dellmb])
'''
def gradient_descent(self, numIter=2001, params = np.array([.5,.3])):
for i in range(numIter):
#lik = self.loglik(self.t, self.x, params[0], params[1], params, self.x_samples, self.x_censored)
directn = self.grad(self.t, self.x, params[0], params[1], params, self.x_samples, self.x_censored)
params2 = params + 1e-9*directn
lik = self.loglik(self.t, self.x, params[0], params[1], params2, self.x_samples, self.x_censored)
for alp1 in [1e-8,1e-7,1e-5,1e-3,1e-2,.1]:
params1 = params + alp1 * directn
if len(params1.shape) == 2 or min(params1) > 0:
lik1 = self.loglik(self.t, self.x, params1[0], params1[1], params1, self.x_samples, self.x_censored)
if(lik1 > lik and np.isfinite(lik1)):
lik = lik1
params2 = params1
params = params2
if i%25 == 0:
print("Iteration " + str(i) + " ,objective function: " + str(lik) + " \nparams = " + str(params) + " \nGradient = " + str(directn))
print("\n########\n")
return params
'''
def hessian(self, t, x, k=0.5, lmb=0.3):
'''
The hessian of the log likelihood function.
Used by the Newton Raphson method.
args:
t: The array of observations of arrival times.
x: The array of censored observations.
k: The shape parameter.
lmb: The scale parameter.
'''
n = len(t)
delksq = -n / k**2 - \
sum((t / lmb)**k * np.log(t / lmb)**2) - \
sum((x / lmb)**k * np.log(x / lmb)**2)
dellmbsq = n * k / lmb**2 + \
(sum(t**k) + sum(x**k)) * (- k * (k + 1) / lmb**(k + 2))
dellmbk = -n / lmb + 1 / lmb * (sum(k * (t / lmb)**k * np.log(t / lmb) + (
t / lmb)**k) + sum(k * (x / lmb)**k * np.log(x / lmb) + (x / lmb)**k))
hess = np.zeros([2, 2])
hess[0, 0] = delksq
hess[1, 1] = dellmbsq
hess[0, 1] = hess[1, 0] = dellmbk
return hess
def optimal_wait_threshold(self, intervention_cost):
'''
Given the cost of plan B (intervention_cost), what
is the optimal time we should wait for this distribution?
args:
intervention_cost: The cost of giving up on this distribution.
'''
return self.lmb ** (self.k / (self.k - 1)) / \
(intervention_cost * self.k) ** (1 / (self.k - 1))
def samples(self, size=1000):
'''
Generates samples from current Weibull distribution.
args:
size: The number of samples to be generated.
'''
return exponweib.rvs(a=1, c=self.k, scale=self.lmb, size=size)
@staticmethod
def samples_(k, lmb, size=1000):
return exponweib.rvs(a=1, c=k, scale=lmb, size=size)
def generate_features(size):
x1 = np.array([[1, 1, 0], [1, 1, 0]])
x1 = np.repeat(x1, [2, (size - 2)], axis=0)
x2 = np.array([[1, 0, 1], [1, 0, 1]])
x2 = np.repeat(x2, [2, (size - 2)], axis=0)
x = np.concatenate([x1, x2], axis=0)
return x
def generate_weibull(size):
k1 = 0.5
scale1 = 0.3
dat1 = exponweib.rvs(a=1, c=k1, scale=scale1, size=size)
k2 = 1.1
scale2 = 0.1
dat2 = exponweib.rvs(a=1, c=k2, scale=scale2, size=size)
dat = np.concatenate([dat1, dat2])
return dat
if __name__ == '__main__':
w = Weibull()
x_samples = generate_features(100)
t = generate_weibull(100)
x_censored = x_samples[t > 1.5,]
x_samples = x_samples[t < 1.5,]
x = np.ones(sum(t > 1.5)) * 1.5
t = t[t < 1.5]
W = np.array([[0.1, 0.4], [0.5, 0.3], [0.2, 0.7]])
print(str(w.loglik(t, x, W=W, x_censored=x_censored, x_samples=x_samples)))
print(str(w.grad(t, x, W=W, x_censored=x_censored, x_samples=x_samples)))
w.gradient_descent(params=W)
| true |
5625d31ee3043ab332ee572d705d8f09c2291897 | Python | LuoBingjun/LeetCode-Solutions | /5.longest-palindromic-substring.py | UTF-8 | 929 | 3.5625 | 4 | [] | no_license | #
# @lc app=leetcode id=5 lang=python3
#
# [5] Longest Palindromic Substring
#
# @lc code=start
class Solution:
def longestPalindrome(self, s: str) -> str:
n = len(s)
if n == 0:
return ""
res = s[0]
def extend(i, j, s):
while(i >= 0 and j < n and s[i] == s[j]):
i -= 1
j += 1
return s[i + 1:j]
for i in range(n - 1):
e1 = extend(i, i, s)
e2 = extend(i, i + 1, s)
if max(len(e1), len(e2)) > len(res):
res = e1 if len(e1) > len(e2) else e2
return res
# @lc code=end
sol = Solution()
print(sol.longestPalindrome("babad"))
print(sol.longestPalindrome("cbbd"))
print(sol.longestPalindrome("a"))
print(sol.longestPalindrome("ba"))
print(sol.longestPalindrome("bb"))
print(sol.longestPalindrome("bab"))
print(sol.longestPalindrome("baba"))
| true |
d92c8b0e4f4790b18876e2393b44a5c838024014 | Python | keshav143420/AllCodeRhythms | /test/codejam/gcj2020/qual_round/test_vestigium.py | UTF-8 | 864 | 2.8125 | 3 | [] | no_license | import unittest
import codejam.gcj2020.qual_round.vestigium as vestigium
class VestigiumTestCase(unittest.TestCase):
def setUp(self) -> None:
self.vestigum = vestigium.Vestigium()
def test_get_result_sample_data_4d(self):
n = 4
arr = [[1, 2, 3, 4],
[2, 1, 4, 3],
[3, 4, 1, 2],
[4, 3, 2, 1]]
actual_result = self.vestigum.get_result(n, arr)
expected_result = (4, 0, 0)
self.assertEqual(expected_result, actual_result)
def test_get_result_sample_data_3d(self):
n = 3
arr = [[2, 1, 3],
[1, 3, 2],
[1, 2, 3]]
actual_result = self.vestigum.get_result(n, arr)
expected_result = (8, 0, 2)
self.assertEqual(expected_result, actual_result)
if __name__ == '__main__':
unittest.main()
| true |
006e84a338d3021ea2338ab41a4441cf340a03ab | Python | wangyy161/DDPG_CNN_Pendulum_practice | /7_CartPole_IQN.py | UTF-8 | 10,268 | 2.6875 | 3 | [] | no_license | # Cartpole
# State -> x, x_dot, theta, theta_dot
# Action -> force (+1, -1)
import datetime
import random
import gym
import matplotlib.pyplot as plt
import numpy as np
# Import modules
import tensorflow as tf
env = gym.make('CartPole-v0')
game_name = 'CartPole'
algorithm = 'IQN'
# Parameter setting
Num_action = 2
Gamma = 0.99
Learning_rate = 0.00025
Epsilon = 1
Final_epsilon = 0.01
# Parameter for IQN
Num_quantile = 32
embedding_dim = 64
# Parameter for risk sensitive policy
sample_min = 0.0
sample_max = 1.0
# Parameter for training
Num_replay_memory = 10000
Num_start_training = 10000
Num_training = 15000
Num_testing = 10000
Num_update = 150
Num_batch = 32
Num_episode_plot = 20
first_fc = [4, 512]
second_fc = [512, 128]
third_fc = [128, Num_action]
embedding_fc = [embedding_dim, 512]
Is_render = False
# Initialize weights and bias
def weight_variable(name, shape):
return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(name, shape):
return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer())
# Assigning network variables to target network variables
def assign_network_to_target():
# Get trainable variables
trainable_variables = tf.trainable_variables()
# network variables
trainable_variables_network = [var for var in trainable_variables if var.name.startswith('network')]
# target variables
trainable_variables_target = [var for var in trainable_variables if var.name.startswith('target')]
for i in range(len(trainable_variables_network)):
sess.run(tf.assign(trainable_variables_target[i], trainable_variables_network[i]))
# Input
x = tf.placeholder(tf.float32, shape=[None, 4])
# Embedding weight and bias
w_embedding = weight_variable('w_embedding', embedding_fc)
b_embedding = bias_variable('b_embedding', embedding_fc[1])
# Embedding
batch_size = tf.shape(x)[0]
sample = tf.random_uniform([batch_size * Num_quantile, 1], minval=sample_min, maxval=sample_max, dtype=tf.float32)
sample_tile = tf.tile(sample, [1, embedding_dim])
embedding = tf.cos(tf.cast(tf.range(0, embedding_dim, 1), tf.float32) * np.pi * sample_tile)
embedding_out = tf.nn.relu(tf.matmul(embedding, w_embedding) + b_embedding)
# Densely connect layer variables
with tf.variable_scope('network'):
w_fc1 = weight_variable('_w_fc1', first_fc)
b_fc1 = bias_variable('_b_fc1', [first_fc[1]])
w_fc2 = weight_variable('_w_fc2', second_fc)
b_fc2 = bias_variable('_b_fc2', [second_fc[1]])
w_fc3 = weight_variable('_w_fc3', third_fc)
b_fc3 = bias_variable('_b_fc3', [third_fc[1]])
h_fc1 = tf.nn.relu(tf.matmul(x, w_fc1) + b_fc1)
# Embedding
h_fc1_tile = tf.tile(h_fc1, [Num_quantile, 1])
h_fc1_embedding = tf.multiply(h_fc1_tile, embedding_out)
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_embedding, w_fc2) + b_fc2)
logits = tf.matmul(h_fc2, w_fc3) + b_fc3
logits_reshape = tf.reshape(logits, [Num_quantile, batch_size, Num_action])
Q_network = tf.reduce_mean(logits_reshape, axis=0)
# Densely connect layer variables target
with tf.variable_scope('target'):
w_fc1_target = weight_variable('_w_fc1', first_fc)
b_fc1_target = bias_variable('_b_fc1', [first_fc[1]])
w_fc2_target = weight_variable('_w_fc2', second_fc)
b_fc2_target = bias_variable('_b_fc2', [second_fc[1]])
w_fc3_target = weight_variable('_w_fc3', third_fc)
b_fc3_target = bias_variable('_b_fc3', [third_fc[1]])
h_fc1_target = tf.nn.relu(tf.matmul(x, w_fc1_target) + b_fc1_target)
# Embedding (target)
h_fc1_tile_target = tf.tile(h_fc1_target, [Num_quantile, 1])
h_fc1_embedding_target = tf.multiply(h_fc1_tile_target, embedding_out)
h_fc2_target = tf.nn.relu(tf.matmul(h_fc1_embedding_target, w_fc2_target) + b_fc2_target)
logits_target = tf.matmul(h_fc2_target, w_fc3_target) + b_fc3_target
logits_target_reshape = tf.reshape(logits_target, [Num_quantile, batch_size, Num_action])
# Loss function and Train
theta_target_loss = tf.placeholder(tf.float32, shape=[None, Num_quantile])
action_binary_loss = tf.placeholder(tf.float32, shape=[Num_quantile, None, Num_action])
# Get valid logits (extracting output with respect to action batch)
theta_pred = tf.reduce_sum(tf.multiply(logits_reshape, action_binary_loss), axis=2)
theta_target_tile = tf.tile(tf.expand_dims(theta_target_loss, axis=0), [Num_quantile, 1, 1])
theta_pred_tile = tf.tile(tf.expand_dims(theta_pred, axis=2), [1, 1, Num_quantile])
# Get Huber loss
Huber_loss = tf.losses.huber_loss(theta_target_tile, theta_pred_tile, reduction=tf.losses.Reduction.NONE)
# Get tau
tau = tf.reshape(sample, [Num_quantile, -1, 1])
inv_tau = 1.0 - tau
# Get Loss
error_loss = theta_target_tile - theta_pred_tile
Loss = tf.where(tf.less(error_loss, 0.0), inv_tau * Huber_loss, tau * Huber_loss)
Loss = tf.reduce_mean(tf.reduce_sum(tf.reduce_mean(Loss, axis=0), axis=1))
# Train step
train_step = tf.train.AdamOptimizer(learning_rate=Learning_rate, epsilon=0.01 / 32).minimize(Loss)
# Initialize variables
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
init = tf.global_variables_initializer()
sess.run(init)
# Initial parameters
Replay_memory = []
step = 1
score = 0
episode = 0
plot_y_loss = []
plot_y_maxQ = []
loss_list = []
maxQ_list = []
data_time = str(datetime.date.today()) + '_' + str(datetime.datetime.now().hour) + '_' + str(
datetime.datetime.now().minute)
state = env.reset()
# Figure and figure data setting
plot_x = []
plot_y = []
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
# Making replay memory
while True:
if Is_render:
# Rendering
env.render()
if step <= Num_start_training:
progress = 'Exploring'
elif step <= Num_start_training + Num_training:
progress = 'Training'
elif step < Num_start_training + Num_training + Num_testing:
progress = 'Testing'
else:
# Test is finished
print('Test is finished!!')
plt.savefig('./Plot/' + data_time + '_' + algorithm + '_' + game_name + '.png')
break
# Select Action (Epsilon Greedy)
if random.random() < Epsilon:
action = np.zeros([Num_action])
action[random.randint(0, Num_action - 1)] = 1.0
action_step = np.argmax(action)
else:
Q_value = Q_network.eval(feed_dict={x: [state]})[0]
action = np.zeros([Num_action])
action[np.argmax(Q_value)] = 1
action_step = np.argmax(action)
state_next, reward, terminal, info = env.step(action_step)
if progress != 'Testing':
# Training to stay at the center
reward -= 5 * abs(state_next[0])
# Save experience to the Replay memory
if len(Replay_memory) > Num_replay_memory:
del Replay_memory[0]
Replay_memory.append([state, action, reward, state_next, terminal])
if progress == 'Training':
minibatch = random.sample(Replay_memory, Num_batch)
# Save the each batch data
state_batch = [batch[0] for batch in minibatch]
action_batch = [batch[1] for batch in minibatch]
reward_batch = [batch[2] for batch in minibatch]
state_next_batch = [batch[3] for batch in minibatch]
terminal_batch = [batch[4] for batch in minibatch]
# Update target network according to the Num_update value
if step % Num_update == 0:
assign_network_to_target()
# Get target supports
theta_target = []
Q_batch = Q_network.eval(feed_dict={x: state_next_batch})
theta_batch = logits_target_reshape.eval(feed_dict={x: state_next_batch})
for i in range(len(minibatch)):
theta_target.append([])
for j in range(Num_quantile):
if terminal_batch[i] == True:
theta_target[i].append(reward_batch[i])
else:
theta_target[i].append(reward_batch[i] + Gamma * theta_batch[j, i, np.argmax(Q_batch[i])])
# Calculate action binary
action_binary = np.zeros([Num_quantile, Num_batch, Num_action])
for i in range(len(action_batch)):
action_batch_max = np.argmax(action_batch[i])
action_binary[:, i, action_batch_max] = 1
loss, _ = sess.run([Loss, train_step], feed_dict={action_binary_loss: action_binary,
theta_target_loss: theta_target,
x: state_batch})
loss_list.append(loss)
maxQ_list.append(np.max(Q_batch))
# Reduce epsilon at training mode
if Epsilon > Final_epsilon:
Epsilon -= 1.0 / Num_training
if progress == 'Testing':
Epsilon = 0
# Update parameters at every iteration
step += 1
score += reward
state = state_next
# Plot average score
if len(plot_x) % Num_episode_plot == 0 and len(plot_x) != 0 and progress != 'Exploring':
ax1.plot(np.average(plot_x), np.average(plot_y_loss), '*')
ax1.set_title('Mean Loss')
ax1.set_ylabel('Mean Loss')
ax1.hold(True)
ax2.plot(np.average(plot_x), np.average(plot_y), '*')
ax2.set_title('Mean score')
ax2.set_ylabel('Mean score')
ax2.hold(True)
ax3.plot(np.average(plot_x), np.average(plot_y_maxQ), '*')
ax3.set_title('Mean Max Q')
ax3.set_ylabel('Mean Max Q')
ax3.set_xlabel('Episode')
ax3.hold(True)
plt.draw()
plt.pause(0.000001)
plot_x = []
plot_y = []
plot_y_loss = []
plot_y_maxQ = []
# Terminal
if terminal == True:
print('step: ' + str(step) + ' / ' +
'episode: ' + str(episode) + ' / ' +
'progess: ' + progress + ' / ' +
'epsilon: ' + str(Epsilon) + ' / ' +
'score: ' + str(score))
if progress != 'Exploring':
# add data for plotting
plot_x.append(episode)
plot_y.append(score)
plot_y_loss.append(np.mean(loss_list))
plot_y_maxQ.append(np.mean(maxQ_list))
score = 0
loss_list = []
maxQ_list = []
episode += 1
state = env.reset()
| true |
6575b2789a09bc59f0c072557b05c70fac838474 | Python | AlexisDongMariano/leetcode | /1678 - Goal Parser Interpretation.py | UTF-8 | 1,162 | 4.15625 | 4 | [] | no_license | # ==============================
# Information
# ==============================
# Title: 1678 - Goal Parser Interpretation
# Link: https://leetcode.com/problems/goal-parser-interpretation/
# Difficulty: Easy
# Language: Python
# Problem:
# You own a Goal Parser that can interpret a string command. The command consists of an alphabet of
# "G", "()" and/or "(al)" in some order. The Goal Parser will interpret "G" as the string "G", "()"
# as the string "o", and "(al)" as the string "al". The interpreted strings are then concatenated
# in the original order.
# Given the string command, return the Goal Parser's interpretation of command.
# Example
# Input: command = "G()(al)"
# Output: "Goal"
# Explanation: The Goal Parser interprets the command as follows:
# G -> G
# () -> o
# (al) -> al
# The final concatenated result is "Goal".
# ==============================
# Solution
# ==============================
def parse_goal(command):
return command.replace('()', 'o').replace('(al)', 'al')
command = 'G()()()()(al)'
command2 = '(al)G(al)()()G'
print(parse_goal(command))
print(parse_goal(command2)) | true |
b3919810426cd5266054317480371bb34a78c4c2 | Python | vendetta546/codewars | /Python/Authored/scooby.py | UTF-8 | 538 | 3.3125 | 3 | [] | no_license | def scoobydoo(villian, villians):
villian = villian.lower()
# Step 1 - Rotate Right by 5
villian = ''.join(shift(villian, 5))
# Step 2 - Reverse
villian = villian[::-1]
# Step 3 - Change even letters by 5
badguy = ""
num = 0
for le in villian:
num += 1
if num % 2 == 0:
le = le.translate(str.maketrans('abcdefghijklmnopqrstuvwxyz','fghijklmnopqrstuvwxyzabcde'))
badguy += le
for bg in villians:
if ''.join(bg.lower().split(' ')) == badguy: return bg
| true |
a804ed4b6ef38c0fcb871bcd749ce2af86cabcf9 | Python | beasonshu/XX-Net | /code/default/lib/tests/test_utils.py | UTF-8 | 597 | 2.796875 | 3 | [
"BSD-2-Clause"
] | permissive | import unittest
import utils
class TestIP(unittest.TestCase):
def test_check_ipv4(self):
host = 'bat-bing-com.a-0001.a-msedge.net.'
res = utils.check_ip_valid4(host)
self.assertFalse(res)
def test_private_ip(self):
ip = 'bat-bing-com.a-0001.a-msedge.net.'
res = utils.is_private_ip(ip)
self.assertFalse(res)
def test_merge_dict(self):
x = {'a': 1, 'b': 2}
y = {'b': 3, 'c': 4}
z = utils.merge_two_dict(x, y)
self.assertEqual(z, {
'a': 1,
'b': 3,
'c': 4
})
| true |
3b0813a6dca995d3841b310b01b88b464e33c125 | Python | aidsfintech/Algorithm-and-Query | /Algorithm/python/algorithmjobs/thursdaytest/210401/04(veryimportant)baseballgame.py | UTF-8 | 2,307 | 3.046875 | 3 | [] | no_license | import sys
# int 1, str '1' ๋ชจ๋ ์ถ๋ ฅ๋๋ฉด 1์ด๋๋ผ....
# ์ ์ ํ ์ด๊ธฐํ ์์น์ ๊ธฐ๋ณธ๋ฌธ๋ฒ์ธ ์๋ฃํ....
# ํ์ด์ฌ ํ๋ฉด ๋ณ์์ ๋ํ ๊ฐ๊ฐ์ด ๋ฌด๋์ง๋๋ฐ, ๊ทธ๋ฅ C ํธํ์ํ๋ก ์๊ฐํ์
def makeallcases(depth,case,limit):
global allcases
# print(depth,case)
if(depth>=limit):
# print('ck')
# fucking shellow copy, cuz to case=[depth]=0 when at the last all case is become [0,0,0]
# so should slice or deepcopy
# allcases.append(case[]) is worthless
allcases.append(case[:])
# print(len(allcases),end=' ')
return
else:
for num in range(1,10):
if(num not in case):
case[depth]=num
makeallcases(depth+1,case,limit)
case[depth]=0
else:
continue
if __name__=="__main__":
N=int(sys.stdin.readline().strip())
answers=[]
for _ in range(N):
answer=sys.stdin.readline().split()
answers.append(answer)
# print(answers)
global allcases
allcases=[]
depth=0
case=[0]*3
limit=3
makeallcases(depth,case,limit)
# print(len(allcases))
satisfyingcases=[]
for case in allcases:
#token per case
token_allpass=True
# print(case)
#init strike, ball per case, NO!!! per answer!!
# cnt_strike=0
# cnt_ball=0
for answer in answers:
#init strike, ball per case, NO!!! per answer!!
cnt_strike=0
cnt_ball=0
std_trial=list(map(int,answer[0]))
std_strike=int(answer[1])
std_ball=int(answer[2])
# print(std_trial,std_strike,std_ball,end='/')
for idx_std, val_std in enumerate(std_trial):
for idx_case, val_case in enumerate(case):
if(val_std==val_case):
#at least ball or strike
if(idx_std==idx_case):
cnt_strike+=1
else:
cnt_ball+=1
else:
pass
# print(case, cnt_strike,cnt_ball)
# checking one answer
if(cnt_strike==std_strike and cnt_ball==std_ball):
#maintain
# print(case)
token_allpass=True
else:
token_allpass=False
break
#after checking answers per case ex) 4times
if(token_allpass):
satisfyingcases.append(case)
print(len(satisfyingcases)) | true |
34799bc29d6d81d47f9b21a93f9155b597f6587c | Python | johnqh/Sudoku-Solver | /codes/solver.py | UTF-8 | 1,426 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 09:40:39 2020
@author: omer.eker
"""
from single_candidate import single_cand
from hidden_singles import hidden_singles
from hidden_pairs_triples_quads import hidden_pairs_triples, hidden_quads
from naked_pairs_triples_quads import naked_pairs_triples, naked_quads
from pointing_pairs import pointing_pairs
from box_line import box_line
from x_wing import x_wing
from y_wing import y_wing
from singles_chains import singles_chains
from xyz_wing import xyz_wing
from swordfish import swordfish
import sys
#%% SOLVER FUNCTION
def solver(board,cands,square_pos):
#run strategies in listed order as long as the board has empty cells (".")
if (board==".").any().any():
single_cand(board,cands,square_pos)
hidden_singles(board,cands,square_pos)
naked_pairs_triples(board,cands,square_pos)
hidden_pairs_triples(board,cands,square_pos)
pointing_pairs(board,cands,square_pos)
box_line(board,cands,square_pos)
naked_quads(board,cands,square_pos)
hidden_quads(board,cands,square_pos)
x_wing(board,cands,square_pos)
y_wing(board,cands,square_pos)
singles_chains(board,cands,square_pos)
xyz_wing(board,cands,square_pos)
swordfish(board,cands,square_pos)
else:
print("COMPLETE!!!!!")
# break
sys.exit(0) | true |
a7a0b378a75f63df2e844a97b819c5365a8b586d | Python | chaobiubiu/RMAX-KNN | /rmax_knn_td_test_pendulum.py | UTF-8 | 2,136 | 2.546875 | 3 | [] | no_license | #different k in pendulum
from rmax_knn_td import *
from rmax_knn_td_pendulum_part import *
from rmax_knn_td_part2 import *
import gym
import matplotlib.pyplot as plt
x1=[]
y1=[]
x2=[]
y2=[]
x3=[]
y3=[]
x4=[]
y4=[]
x5=[]
y5=[]
def BenchmarkExperiment(Episodes,k,x,y):
print()
print('- - - - - -')
print('INIT EXPERIMENT','k='+str(k))
Env=gym.make('Pendulum-v0')
Env=Env.unwrapped
#IQ=IncrementKNNQ(n_max=10000,n_actions=5,n_features=3,k=k,d_target=0.06,d_point=0.08,input_ranges=[[-1,1],[-1,1],[-8,8]],RMAX=0,alpha=0.9,lr=0.95,gamma=0.95)
IQ = IncrementKNNQ(n_max=10000, n_actions=5, n_features=3, k=k, d_target=0.05, d_point=0.09,
input_ranges=[[-1, 1], [-1, 1], [-8, 8]], RMAX=0, alpha=0.9, lr=0.95, gamma=0.95)
As=Action_selector()
MC=Base(IQ,Env,As,gamma=0.95)
for i in range(Episodes):
result=MC.SarsaEpisode(500)
print(len(IQ.representations))
#MC.IQ.ResetTraces()
print('Episodes:',i,'Total_reward:',result[0])
if i %12==0:
x.append(i)
y.append(result[0])
BenchmarkExperiment(Episodes=100,k=1,x=x1,y=y1)
BenchmarkExperiment(Episodes=100,k=3,x=x2,y=y2)
BenchmarkExperiment(Episodes=100,k=5,x=x3,y=y3)
BenchmarkExperiment(Episodes=100,k=7,x=x4,y=y4)
BenchmarkExperiment(Episodes=100,k=9,x=x5,y=y5)
plt.figure(num=7,figsize=(10,8))
np.savetxt('pendulumk_x1.txt',x1)
np.savetxt('pendulumk_y1.txt',y1)
np.savetxt('pendulumk_x2.txt',x2)
np.savetxt('pendulumk_y2.txt',y2)
np.savetxt('pendulumk_x3.txt',x3)
np.savetxt('pendulumk_y3.txt',y3)
np.savetxt('pendulumk_x4.txt',x4)
np.savetxt('pendulumk_y4.txt',y4)
np.savetxt('pendulumk_x5.txt',x5)
np.savetxt('pendulumk_y5.txt',y5)
plt.plot(x1,y1,color='red',label='k=1',linestyle='-.',marker="x")
plt.plot(x2,y2,color='black',label="k=3",linestyle='-',marker="o")
plt.plot(x3,y3,color='blue',label="k=5",linestyle='--',marker="^")
plt.plot(x4,y4,color='green',label="k=7",linestyle=':',marker="s")
plt.plot(x5,y5,color='magenta',label="k=9",linestyle='-.',marker="d")
plt.legend(loc='lower right')
plt.xlabel('Episodes')
plt.ylabel('Total_reward')
plt.show()
| true |
84d27f8c7a432377b206ba4a7b206adf8a5713e8 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2811/60827/317909.py | UTF-8 | 251 | 3.0625 | 3 | [] | no_license | p,n=[int(x) for x in input().split()]
dic ={}
c = 0
for i in range(int(n)):
value = int(input())
address =value % p
if address in dic:
print(i+1)
c= i
break
else:
dic[address]=value
if c==0:
print(-1)
| true |
3da7eb145850222a1bd85c438219433845ae485f | Python | zhouziling/sign_language_robot | /dual_ur5_control/script/pyswarms_example.py | UTF-8 | 921 | 2.71875 | 3 | [] | no_license | # Import modules
import numpy as np
import matplotlib.pyplot as plt
import math
# Import PySwarms
import pyswarms as ps
from pyswarms.utils.functions import single_obj as fx
# create a parameterized version of the classic Rosenbrock unconstrained optimzation function
def get_a_cost_val(x):
f = float(input("Input a cost val"))
#import pdb
#pdb.set_trace()
return f
# Set-up hyperparameters as dict
options = {'c1': 0.5, 'c2': 0.3, 'w':0.9}
# set bounds
g_max = np.array([0.6, 0.6, 0.6, math.pi, math.pi, math.pi])
g_min = np.array([-0.6, 0.3, 0.25, -math.pi, -math.pi, -math.pi])
bounds = (g_min, g_max)
# Create an instance of PSO optimizer
optimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=6, options=options) # global best PSO
# Perform optimization: call the optimize() and store the optimal cost as well as positions
cost, pos = optimizer.optimize(get_a_cost_val, iters=10)
| true |
8a8f23d60cc0b833c0250eb8122f6fdc14e95279 | Python | mark-styx/development_management_platform | /classes/gui/mk_btn.py | UTF-8 | 4,207 | 2.703125 | 3 | [] | no_license | from tkinter import Button,Label
class Btn():
def __init__(
self,parent,loc,size=(100,20),txt=None,img=None,
cmd=lambda:print('undefined'),
deact_cmd=lambda:print('undefined'),
label=False,label_loc='left',label_txt=None,toggle=False,
border=True,alt_clr=False
):
if not alt_clr:
self.bg = '#1c1c1f';self.fg='white'
else:
self.bg = '#292e30';self.fg='white'
self.cmd,self.deact_cmd = cmd,deact_cmd
if toggle: cmd = self.toggle
self.button = Button(
parent,image=img,bg=self.bg,fg=self.fg,command=cmd,text=txt,
highlightthickness=0,borderwidth=0)
self.button.place(x=loc[0],y=loc[1],width=size[0],height=size[1])
if label:
self.label = Label(parent,bg=bg,fg=fg,text=label_txt)
location = {
'left':lambda x:(x[0]-size[0],x[1]),
'right':lambda x:(x[0]+size[0],x[1]),
'below':lambda x:(x[0],x[1]+size[1]),
'above':lambda x:(x[0],x[1]-size[1])
}
x,y = location[label_loc](loc)
if img:
w = 50 + (len(label_txt)*3)
else: w = size[0]
self.label.place(x=x,y=y,width=w,height=size[1])
self.active = False
if not border: self.button['border'] = '0'
def toggle(self):
if not self.active:
self.active = True
self.button.config(relief='sunken',bg='#856c14',fg='black')
self.cmd()
elif self.active:
self.active = False
self.button.config(relief='raised',bg=self.bg,fg=self.fg)
self.deact_cmd()
def deactivate(self):
self.active = False
self.button.config(relief='raised',bg=self.bg,fg=self.fg)
def destroy(self):
self.button.destroy()
'''
class fBtn(tk.Canvas):
def __init__(self,
parent, width=100, height=20, cornerradius=2, padding=2,
color='#1c1c1f',bg='#292e30', command=lambda: print('unbound')
):
tk.Canvas.__init__(self, parent, borderwidth=0,
relief="raised", highlightthickness=0, bg=bg)
self.parent = parent
self.command = command
self.color = color
if cornerradius > 0.5*width:
print("Error: cornerradius is greater than width.")
return None
if cornerradius > 0.5*height:
print("Error: cornerradius is greater than height.")
return None
rad = 2*cornerradius
self.ids = [
self.create_polygon((padding,height-cornerradius-padding,padding,cornerradius+padding,padding +cornerradius,padding,width-padding-cornerradius,padding,width-padding,cornerradius+padding, width-padding,height-cornerradius-padding,width-padding-cornerradius,height-padding,padding +cornerradius,height-padding), fill=color, outline=color),
self.create_arc((padding,padding+rad,padding+rad,padding), start=90, extent=90, fill=color, outline=color),
self.create_arc((width-padding-rad,padding,width-padding,padding+rad), start=0, extent=90, fill=color, outline=color),
self.create_arc((width-padding,height-rad-padding,width-padding-rad,height-padding), start=270, extent=90, fill=color, outline=color),
self.create_arc((padding,height-padding-rad,padding+rad,height-padding), start=180, extent=90, fill=color, outline=color)
]
(x0,y0,x1,y1) = self.bbox("all")
width = (x1-x0)
height = (y1-y0)
self.configure(width=width, height=height)
self.bind("<ButtonPress-1>", self._on_press)
self.bind("<ButtonRelease-1>", self._on_release)
self.active=False
def _on_press(self, event):
self.configure(relief="sunken")
for i in self.ids:
self.itemconfig(i,fill='#856c14')
self.command()
def _on_release(self, event):
self.configure(relief="raised")
if self.command is not None:
for i in self.ids:
self.itemconfig(i,fill=self.color)
self.command()''' | true |
09082c005808b70e56e1cfa9120f6f696c1bc425 | Python | michaelstchen/machinelearn-course | /hw4 - Ridge and Kernel Logistic Regression/code/prob3_5.py | UTF-8 | 1,938 | 2.765625 | 3 | [] | no_license | import numpy as np
import math
import csv
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn.preprocessing import scale, binarize
def kernel_mat(X1, X2, deg, rho):
K = np.dot(X1, X2.T) + rho
return K**deg
def sig(x):
return 1 / (1 + np.exp(-x))
def risk(K, a, y):
sig_Ka = sig(np.dot(K, a))
a = np.multiply(y, np.log(sig_Ka + 1e-100))
b = np.multiply(1-y, np.log(1 - sig_Ka + 1e-100))
return -np.sum(a + b)
def grad_desc_stoch(i, y_i, Ka_i, a, eps, lam):
a_upd = a - (eps * lam) * a
a_upd[i] = a_upd[i] + eps * (y_i - sig(Ka_i))
return a_upd
def log_regr_kernel(X, y, K, a, eps, lam, lim):
risks = []
for i in range(0, lim):
j = np.random.randint(0, len(X))
Ka_j = np.dot(K[j], a)
a = grad_desc_stoch(j, y[j], Ka_j, a, eps, lam)
if (i % 100 == 0):
risks.append(risk(K, a, y))
return a, risks
''' LOADING DATA '''
spam_data = loadmat(file_name="spam_data.mat", mat_dtype=True)
train_data = np.array(spam_data['training_data'])
train_labels = np.transpose(np.array(spam_data['training_labels']))
train_labels = train_labels[:, 0]
train_data = scale(train_data)
train_data = train_data[:3000]
train_labels = train_labels[:3000]
num_train = len(train_data)
''' LINEAR KERNEL LOGISTIC REGRESSION'''
lim = 15000
lam = 0.001
a0 = np.zeros(len(train_data))
eps = 1e-5
rho = 100
K_train = kernel_mat(train_data, train_data, 2, rho)
a, risks = log_regr_kernel(train_data, train_labels, K_train, a0, eps, lam, lim)
''' PREDICTING '''
test_data = np.array(spam_data['test_data'])
test_data = scale(test_data)
K_test = kernel_mat(test_data, train_data, 1, rho)
pred_labels = np.rint(sig(np.dot(K_test, a)))
with open('test_labels.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(['Id'] + ['Category'])
for i in range(0, len(pred_labels)):
writer.writerow([i+1] + [int(pred_labels[i])])
| true |
40b701ae2c6f150e44a69adbb4841133696fdd9d | Python | Suraj124/python_practice | /16-03-2019/lec_48.py | UTF-8 | 141 | 3.4375 | 3 | [] | no_license | def display(fun):
return "hello "+fun()
def name():
return "Smith"
print(display(name)) #passing name function in display function | true |
eb702ddbe25b9042ded6d109681a393c652c1c90 | Python | smjedael/python-challenge | /PyParagraph/main.py | UTF-8 | 1,848 | 3.6875 | 4 | [] | no_license | #PyParagraph Script
#Import modules
import os
import re
#Request path and filename of data file
filepath = input('Please enter path and filename of data file (e.g. "datafolder/datafile.txt"): ')
#Declare and initialize variables to store data
sentences = []
words = []
letter_counts = []
#Open Text file and perform operations
with open(filepath, newline="") as text:
paragraphs = text.read()
#Split text into sentences
sentences = re.split('[\.\?\!]\W+', paragraphs)
#Split text into words
words = re.split('[\.\?\!\W]+', paragraphs)
words.remove('') #Remove null element at end of list created by regex
#Count number of sentences in text
total_sentences = len(sentences)
#Count number of words in text
total_words = len(words)
#Count number of letters in each word
letter_counts = [len(word) for word in words]
#Count total number of letters
total_letters = 0
for letter_count in letter_counts:
total_letters = total_letters + letter_count
#Create Text Summary
text_summary = [f'',
f'Paragraph Analysis',
f'-----------------------------------',
f'Approximate Word Count: {total_words}',
f'Approximate Sentence Count: {total_sentences}',
f'Average Letter Count: {round(total_letters/total_words, 2)}',
f'Average Sentence Length: {round(total_words/total_sentences, 2)}',
f''
]
#Print Text Summary in terminal
for item in text_summary:
print(item)
#Request filename for text file
outputfile = input('Please type filename of new report file (e.g. "text_summary.txt"): ')
#Write Text Summary to text file
with open(outputfile, 'w', newline = "") as report:
for item in text_summary:
report.write(item + "\r")
| true |
02f25e0e8cf290b59768ccc6312eb260b0ec9e6f | Python | t8toel00/Valvo | /Raspi/Valvo/detect_faces.py | UTF-8 | 4,521 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
# This script will stay running and snap a picture when needed.
import cv2
from cv2 import *
import os
import datetime
if not os.path.exists('snapshots'):
os.mkdir('snapshots')
class cvCam():
def __init__(self):
# Create the haar cascade
self.faceCascPath = "haarcascade_frontalface_default.xml"
#self.cascPath = "haarcascade_fullbody.xml"
self.bodyCascPath = "haarcascade_upperbody.xml"
self.faceCascade = cv2.CascadeClassifier(self.faceCascPath)
self.bodyCascade = cv2.CascadeClassifier(self.bodyCascPath)
self.cam = VideoCapture(0)
if self.cam.isOpened() == False:
print("Camera not opened.")
else:
print("Camera feed opened.")
#self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
#self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1600)
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 896)
self.cam.grab()
def snapAndDetect(self):
# Snaps a picture when called and tries to detect faces.
# Returns the amount of faces and possibly the coordinates.
self.s, self.img = self.cam.read()
self.dt = datetime.datetime.now()
self.filename = "snapshot-" + self.dt.strftime('%Y-%m-%d-%H%M%S') + "-detected.jpg"
if self.s:
imwrite("snapshots/filename.jpg",self.img)
self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.faces = self.faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
#flags = cv2.CV_HAAR_SCALE_IMAGE
)
#print("Found {0} faces!".format(len(self.faces)))
for (x, y, w, h) in self.faces:
cv2.rectangle(self.img, (x, y), (x+w, y+h), (0, 255, 0), 2)
imwrite("snapshots/" + self.filename,self.img)
imwrite("snapshots/lastshot.jpg",self.img)
# Finally, return the amount of faces, timestamp and the trigger source:
return self.faces, self.dt
def Snap(self):
"""
Returns a list of three images taken sequentially.
Status is true if image was captured succesfully.
"""
self.cam.grab()
self.s, self.img = self.cam.read()
self.dt = datetime.datetime.now()
if self.s:
return self.s, self.img, self.dt
def SnapThree(self):
"""
Returns a list of three images taken sequentially:
((status,img,date))
Status is true if image was captured succesfully.
"""
self.imgList = []
picIndex = 0
while picIndex < 3:
self.cam.grab()
self.s, self.img = self.cam.read()
self.dt = datetime.datetime.now()
if self.s:
self.imgList.append ((self.s, self.img, self.dt))
picIndex = picIndex + 1
return self.s, self.imgList, self.dt
def Detect(self, date, photo):
"""
Detects faces AND upper bodies.
Returns face and bodies and the image in form:
(faces, bodies, photo)
"""
try:
self.faces = []
except:
pass
try:
self.faces = []
except:
pass
self.gray = cv2.cvtColor(photo, cv2.COLOR_BGR2GRAY)
self.faces = self.faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30),
)
#faces are colored green:
for (x, y, w, h) in self.faces:
cv2.rectangle(photo, (x, y), (x+w, y+h), (0, 255, 0), 2)
self.bodies = self.bodyCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30),
)
#Bodies are colored red:
for (x, y, w, h) in self.bodies:
cv2.rectangle(photo, (x, y), (x+w, y+h), (255, 0, 0), 2)
#self.filename = "snapshot-" + date.strftime('%Y-%m-%d-%H%M%S') + "-detected.jpg"
#imwrite("snapshots/" + self.filename,photo)
# imwrite("snapshots/lastshot.jpg",photo)
return self.faces, self.bodies, photo
def drawBox(self, pic, x, y, w, h):
cv2.rectangle(pic, (x, y), (x+w, y+h), (0, 0, 255), 2)
| true |
1534e4f903f08f3e62ea03a0f68f1400cddfe271 | Python | MihailoJoksimovic/cs6006-implementations | /ctci/3_4.py | UTF-8 | 1,226 | 4.28125 | 4 | [] | no_license | # Task: Implement Queue using Two stacks
# Idea: Move one stack to another and pop from another -- that's how we get a queue.
# Optimal solution: don't move back until needed
from ctci.common.classes import *
class QueueWithStacks:
def __init__(self):
# Stack used when adding elements
self.insert_stack = Stack()
# Stack used when popping the elements
self.remove_stack = Stack()
def add(self, value):
# O(1)
self.insert_stack.push(value)
def get(self):
"""Return next item in queue"""
# Worst-case: O(n) where n is total number of inserted elements
if self.remove_stack.empty() and not self.insert_stack.empty():
self.__from_a_to_b(self.insert_stack, self.remove_stack)
return self.remove_stack.pop()
def __from_a_to_b(self, stack_a: Stack, stack_b: Stack):
"""Moves all elements from stack A to stack B"""
while not stack_a.empty():
stack_b.push(stack_a.pop())
q = QueueWithStacks()
for i in range(1, 5):
q.add(i)
for i in range(3):
print(q.get())
for i in range(5, 10):
q.add(i)
while True:
el = q.get()
if el is None:
break
print(el) | true |
ccb38fe35983a76f5ec43fd067c6498ccf6e9d6a | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2953/60717/266971.py | UTF-8 | 278 | 3.046875 | 3 | [] | no_license | def gcd(a,b):
count=0
if b==0:
return [a,count]
tmp=gcd(b,a%b)
count=int(a/b)+tmp[1]
return [tmp[0],count]
n=int(input())
output=2**20
for i in range(1,n+1):
tmp=gcd(n,i)
if tmp[0]==1:
output=min(output,tmp[1]-1)
print(output,end='') | true |
4ef8e6e2a095d8858a47cf46de5f69bc609581ad | Python | SKTPausanias/machine_learning_bootcamp-42AI | /day04/ex05/linear_cost_reg.py | UTF-8 | 1,162 | 3.484375 | 3 | [] | no_license | import numpy as np
def reg_cost_(y, y_hat, theta, lambda_):
"""Computes the regularized cost of a linear regression model from two non-empty
,โ numpy.ndarray, without any for loop. The two arrays must have the same dimensions.
Args:
y: has to be an numpy.ndarray, a vector of dimension m * 1.
y_hat: has to be an numpy.ndarray, a vector of dimension m * 1.
theta: has to be a numpy.ndarray, a vector of dimension n * 1.
lambda_: has to be a float.
Returns:
The regularized cost as a float.
None if y, y_hat, or theta are empty numpy.ndarray.
None if y and y_hat do not share the same dimensions.
Raises:
This function should not raise any Exception.
"""
theta[0] = 0
if y.size == 0 or y_hat.size == 0 or theta.size == 0 or y.shape != y_hat.shape:
return None
return (np.dot((y_hat - y), (y_hat - y)) + (lambda_ * (np.dot(theta, theta)))) / float(y.shape[0] * 2)
if __name__ == "__main__":
y = np.array([2, 14, -13, 5, 12, 4, -19])
y_hat = np.array([3, 13, -11.5, 5, 11, 5, -20])
theta = np.array([1, 2.5, 1.5, -0.9])
print(reg_cost_(y, y_hat, theta, .5))
print(reg_cost_(y, y_hat, theta, .05))
print(reg_cost_(y, y_hat, theta, .9)) | true |
8d40ad392d2d2b68e06f02f9209c9bbaa2dada57 | Python | QuantLet/MVA | /QID-1530-MVAscabank56/MVAscabank56.py | UTF-8 | 632 | 2.890625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
x = pd.read_csv("bank2.dat", sep = "\s+", header=None)
x56 = x.iloc[:,4:]
x1 = [1] * 100
x2 = [2] * 100
xx = x56.copy()
x1.extend(x2)
xx["x1x2"] = x1
fig, ax = plt.subplots(figsize = (10, 10))
ax.scatter(xx.iloc[:100,0], xx.iloc[:100,1], c = "w", edgecolors = "black")
ax.scatter(xx.iloc[100:,0], xx.iloc[100:,1], c = "w", edgecolors = "r", marker = "^")
plt.xlim(7, 13)
plt.ylim(137.5, 142.5)
plt.yticks(list(np.arange(137.5, 143, 1)))
plt.title("Swiss bank notes")
plt.savefig('MVAscabank56_python.png', format='png', dpi=600, transparent=True)
plt.show() | true |
ef85a2bcc0206d09c0ddb6e04128cc0f6d31e806 | Python | pillumina/pset4_RRT | /2a.py | UTF-8 | 1,270 | 3.703125 | 4 | [] | no_license | import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# function to find the distance between 2 points
def get_distance(point1, point2):
if len(point1) != 2 or len(point2) != 2:
return 'please provide x,y coordinates for each point'
x_1 = point1[0]
y_1 = point1[1]
x_2 = point2[0]
y_2 = point2[1]
distance = np.sqrt((x_1 - x_2) ** 2 + (y_1 - y_2) ** 2)
return distance
if __name__ == '__main__':
# define the goal point
goal = (250,250)
# define all other points (randomly generated)
p_list = []
for i in range(100):
point = (random.randint(0,500), random.randint(0,500))
p_list.append(point)
# find the closest point
distance = []
for point in p_list:
distance.append(get_distance(goal, point))
smallest_distance_index = distance.index(min(distance))
closest_point = p_list[smallest_distance_index]
print('the closest point is : ', closest_point)
# plot out all of the points
for point in p_list:
plt.scatter(point[0], point[1], marker = '.', c='b')
plt.scatter(closest_point[0], closest_point[1], marker='*', c='k')
plt.scatter(goal[0], goal[1], marker='D', c='r')
plt.show() | true |
52d07b34c0729e322a6441358c67d27ca074f541 | Python | SNJIAWEI/elastic | /cn/Test.py | UTF-8 | 217 | 2.859375 | 3 | [] | no_license | import datetime
TIME_FMT_YMD = '%Y-%m-%d'
# today = datetime.date.today().strftime(TIME_FMT_YMD)
yestoday = (datetime.date.today() - datetime.timedelta(days=1)).strftime(TIME_FMT_YMD)
# print(today)
print(yestoday) | true |
c572cf19444d2c75ecfd29ebc05e50b2cf469177 | Python | almazgf/parser | /2_add_data_bd.py | UTF-8 | 446 | 2.703125 | 3 | [] | no_license |
from pymongo import MongoClient
import json
# ะงัะตะฝะธะต ะดะฐะฝะฝัั
ะธะท ัะฐะนะปะฐ
filename = 'goods.json'
rite_file = open(filename, mode='r', encoding='UTF-8')
list_goods = json.load(rite_file)
rite_file.close()
# ะกะพะทะดะฐะฝะธะต ะฑะด ะธ ะดะพะฑะฐะฒะปะตะฝะธะต ะดะฐะฝะฝัั
ะฒ ะฑะด
client = MongoClient('localhost', 27017)
db = client.barcode
col = db.goods
result = col.insert_many(list_goods)
print(result)
| true |
7652fdcef23173ba4d57ee37393a0b632c36c61e | Python | CarlosFdez/pytmi | /pytmi/message.py | UTF-8 | 818 | 3.015625 | 3 | [
"MIT"
] | permissive | from .user import User
class Message:
"""Represents a user message in the twitch message interface.
Attributes
-------------
channel: :class:`str`
The channel the message was sent from
content: :class:`str`
The text content of the message
author: :class:`User`
The :class:`User` object that sent this message
badges: :class:`list`
TODO: change badges to objects, then document
bits: :class:`int`
The number of bits the user sent in this message.
This value is 0 if no bits were sent.
"""
def __init__(self, channel : str, content : str, author : User, badges, bits : int):
self.channel = channel
self.content = content
self.author = author
self.badges = badges
self.bits = bits
| true |
ab155c1923babc240108d0b4e9ce85aa9dd51b04 | Python | barium-project/barium | /lib/clients/gui/q_custom_text_changing_button.py | UTF-8 | 2,240 | 3.109375 | 3 | [] | no_license | from PyQt4 import QtGui, QtCore
class TextChangingButton(QtGui.QPushButton):
"""Button that changes its text to ON or OFF and colors when it's pressed.
"""
def __init__(self, button_text, parent=None):
"""
NOTE: when both labels and addtext are not None, labels take
precedence.
Parameters
----------
button_text: could be a 2-tuple of string, a string, or None.
When it's a 2-tuple, the first entry corresponds to text when the
button is "ON", and the second entry corresponds to text when the
button is "OFF".
When it's a string, it is the text that gets added before "ON" or
"OFF".
When it's None, then the text gets displayed are "On" or "Off".
"""
super(TextChangingButton, self).__init__(parent)
self.button_text = button_text
self.setCheckable(True)
self.setFont(QtGui.QFont('MS Shell Dlg 2', pointSize=10))
self.setSizePolicy(QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Minimum)
# connect signal for appearance changing
self.toggled.connect(self.setAppearance)
self.setAppearance(self.isDown())
def setAppearance(self, down):
on_text, off_text = self._set_button_texts()
if down:
self.setText(on_text)
self.setPalette(QtGui.QPalette(QtCore.Qt.darkGreen))
else:
self.setText(off_text)
self.setPalette(QtGui.QPalette(QtCore.Qt.black))
def _set_button_texts(self):
"""Return button texts when they are on or off."""
if type(self.button_text) == str:
on_text = self.button_text + " On"
off_text = self.button_text + " Off"
elif type(self.button_text) == tuple:
on_text = self.button_text[0]
off_text = self.button_text[1]
elif self.button_text is None:
on_text = "On"
off_text = "Off"
else:
error_msg = "Text gets displayed on a button needs to be a string"
raise TypeError(error_msg)
return on_text, off_text
def sizeHint(self):
return QtCore.QSize(37, 26)
| true |
b454e7d70f05b7837c2e3a6babb7f6f49885d471 | Python | qiaokangqi/wifi_robot | /cmd_receiver.py | UTF-8 | 3,222 | 2.75 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import threading
import struct
import os
import time
import types
class CmdReceiver(object):
cmd_reception_socket = None
host_addr = None
def __init__(self, host_addr = ('', 7997)):
self.host_addr = host_addr
self.cmd_reception_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.cmd_reception_socket.bind(self.host_addr)
def parse_receive_op_code(self, op_code):
if len(op_code) != 2:
return False
if op_code[0] == '0':
print 'Motion operation๏ผ'
if op_code[1] == '0':
print 'Front'
elif op_code[1] == '1':
print 'Back'
elif op_code[1] == '2':
print 'Left'
elif op_code[1] == '3':
print 'Right'
elif op_code[1] == '4':
print 'Left-front'
elif op_code[1] == '5':
print 'Left-back'
elif op_code[1] == '6':
print 'Right-front'
elif op_code[1] == '7':
print 'Right-back'
elif op_code[0] == '1':
print 'Arm opration'
if op_code[1] == '0':
print 'Front'
elif op_code[1] == '1':
print 'Back'
elif op_code[1] == '2':
print 'Left'
elif op_code[1] == '3':
print 'Right'
elif op_code[1] == '4':
print 'Left-front'
elif op_code[1] == '5':
print 'Left-back'
elif op_code[1] == '6':
print 'Right-front'
elif op_code[1] == '7':
print 'Right-back'
elif op_code[1] == '8':
print 'Up'
elif op_code[1] == '9':
print 'Down'
elif op_code[1] == 'a':
print 'Auto recycle'
return True
def run_thread(self):
while True:
data, addr = self.cmd_reception_socket.recvfrom(2048)
if not data:
print "client has exist"
break
print "received:", data, "from", addr
#print type(data)
self.parse_receive_op_code(data)
def run(self):
reception_thread = threading.Thread(target = self.run_thread)
reception_thread.start()
'''
def connect_robot(self):
pass
'''
def __del__(self):
if self.cmd_reception_socket != None:
self.cmd_reception_socket.close()
if __name__ == "__main__":
cmd_receiver = CmdReceiver()
cmd_receiver.run()
while True:
pass
'''
address = ('192.168.5.175', 7998)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(address)
while True:
data, addr = s.recvfrom(2048)
if not data:
print "client has exist"
break
print "received:", data, "from", addr
s.close()
''' | true |
25f4f9f6d41b8c0498bbb557c2de7025b296aa52 | Python | jmalisano/Using_Python_for_Research | /Case Studies/GPS Tracking/GPS Tracking.py | UTF-8 | 2,671 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 16:01:01 2020
@author: Flat J
"""
import pandas as pd
birddata = pd.read_csv("bird_tracking.csv")
import matplotlib.pyplot as plt
import numpy as np
ix = birddata.bird_name == "Eric"
x, y = birddata.longitude[ix],birddata.latitude[ix] #indexing using a boolean
plt.plot(x,y, "-")
bird_names = pd.unique(birddata.bird_name)
for name in bird_names:
ix = birddata.bird_name == name
x, y = birddata.longitude[ix],birddata.latitude[ix] #indexing using a boolean
plt.plot(x,y, "-", label=name)
plt.xlabel("Longitude")
plt.ylabel("Lattitude")
plt.legend(loc="lower right")
plt.savefig("3traj.pdf")
plt.clf()
ix = birddata.bird_name == "Eric"
speed = birddata.speed_2d[ix]
ind = np.isnan(speed)
plt.hist(speed[~ind], bins=np.linspace(0, 30, 20), density=True)
plt.xlabel("2D speed")
plt.savefig("speedhist.pdf")
plt.clf()
birddata.speed_2d.plot(kind='hist', range=[0,30])
plt.xlabel("2D speed")
plt.savefig("pd_speedhist.pdf")
plt.clf()
#using date-time
import datetime
#this code strips the date_time column of bird data and converts it to a time object
timestamps = []
for k in birddata.date_time:
date_str = k
time_obj = datetime.datetime.strptime(date_str[:-3], "%Y-%m-%d %H:%M:%S")
timestamps.append(time_obj)
birddata["timestamp"] = pd.Series(timestamps, index=birddata.index) #initialises a new column to the birddata DF
times = birddata.timestamp[birddata.bird_name == "Eric"]
elapsed_time = [time - times[0] for time in times]
elapsed_days = np.array(elapsed_time) / datetime.timedelta(days=1)
next_day = 1
inds = [] #indicies for a given day
daily_mean_speed = []
for i, t in enumerate(elapsed_days):
if t < next_day:
inds.append(i) #indicies for current day are collected
else:
daily_mean_speed.append(np.mean(birddata.speed_2d[inds])) #computes daily mean speed
next_day += 1 # then updates the day
inds = [] #then clears the inds for the new day
plt.plot(daily_mean_speed)
plt.xlabel("day")
plt.ylabel("mean speed")
plt.savefig("dms.pdf")
plt.clf()
import cartopy.crs as ccrs
import cartopy.feature as cfeature
plt.figure(figsize=(10,10))
proj = ccrs.Mercator()
ax = plt.axes(projection=proj)
ax.set_extent((-25.0, 20.0, 52.0,10.0))
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS, linestyle=':')
for name in bird_names:
ix = birddata['bird_name'] == name
x,y = birddata.longitude[ix], birddata.latitude[ix]
ax.plot(x, y, '.', transform=ccrs.Geodetic(), label=name)
plt.legend(loc="upper left")
plt.savefig("map.pdf")
plt.clf() | true |
ffcdef98cf92b608ac8fbd14b188712de0375a96 | Python | izeus1/python-practice | /test.py | UTF-8 | 112 | 3.03125 | 3 | [] | no_license | s = '/usr/local/bin/python'
l = s.split("/")
for i in l[1:]:
print("'{0}'".format(i), end=" ")
print(" ")
| true |
5e02316b9ae10f2c130fe1c653f3c16863e5bdc3 | Python | jkamuda/protomonk | /src/game.py | UTF-8 | 2,413 | 2.8125 | 3 | [] | no_license | __author__ = 'jkamuda'
import os
import pygame
import constants
from src.sound import SoundManager
from menu import Menu
from overhead import Overhead
from game_state import GameState
from src.load_screen import LoadScreen
from game_info import GameInfo
from src.mario_game import MarioGame
from game_over_screen import GameOverScreen
class Game():
def __init__(self):
# Center window on screen
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
self.caption = 'NES Mario'
size = [constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT]
self.screen = pygame.display.set_mode(size)
self.game_info = GameInfo()
self.sound_manager = SoundManager()
def get_game_state(self, game_state):
if game_state == GameState.STATE_MENU:
# TODO kind of an ugly place to put this reset...
self.game_info.reset()
return Menu()
elif game_state == GameState.STATE_LOAD:
return LoadScreen(self.game_info)
elif game_state == GameState.STATE_GAME:
return MarioGame(self.game_info, self.sound_manager)
elif game_state == GameState.STATE_GAME_OVER:
return GameOverScreen(self.game_info, self.sound_manager)
def run(self):
pygame.display.set_caption(self.caption)
running = True
clock = pygame.time.Clock()
overhead_info = Overhead(self.game_info)
game_state = self.get_game_state(GameState.STATE_MENU)
while running:
game_time = pygame.time.get_ticks()
if game_state.switch_state():
game_state = self.get_game_state(game_state.next)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
key = event.key
if key == pygame.K_ESCAPE or key == pygame.K_q:
running = False
game_state.process_events(events)
overhead_info.update(game_time)
game_state.update(game_time)
game_state.draw(self.screen)
overhead_info.draw(self.screen)
# Limit to 60 frames per second
clock.tick(60)
# Update screen
pygame.display.flip()
pygame.quit()
| true |
d79866cf3848800f3d07aab109fc0603f981d824 | Python | johannbzh/Cracking-The-Coding-Interview-Python-Solutions | /recursion_and_dp/stackOfBoxes.py | UTF-8 | 825 | 3.171875 | 3 | [] | no_license | from collections import namedtuple
def stackOfBoxes(boxes):
def helper(bottom, top, h, num_boxes_left):
if not top :
return
for i in range(num_boxes_left):
box = top[i]
if not bottom or (bottom[-1].w > box.w and bottom[-1].h > box.h and bottom[-1].d > box.d):
bottom.append(box)
h+=box.h
res[0] = max(res[0], h)
helper(bottom, top[:i]+top[i+1:], h, num_boxes_left-1)
h-=box.h
bottom.pop()
res, bottom = [0], []
n = len(boxes)
helper(bottom, boxes, 0, n)
return res[0]
if __name__ == "__main__":
Box = namedtuple('box', ('w', 'h', 'd'))
boxes = [Box(14,6,7), Box(15,22,3), Box(14,25,6), Box(10,12,32)]
res = stackOfBoxes(boxes)
print(res)
| true |
2cc17b94ccd762106c5551b5f48e2d30f04bcca8 | Python | thehardwareguy2000/Weather-Analysis- | /BDA_Weather_analysis.py | UTF-8 | 3,955 | 3 | 3 | [
"MIT"
] | permissive | from google.colab import drive
drive.mount('/content/drive')
from pyspark import SparkContext
from google.colab import drive
from pyspark.sql import SQLContext, Row
import pyspark.sql.functions as sqlf
# Create a Spark Context
sc = SparkContext.getOrCreate()
# Create a sql context
sqlc = SQLContext(sc)
# Analysis on the latest 19 years
years = range(2015, 2020)
# Yearly stats
for year in years:
# get data as raw text
txtfile = sc.textFile('/content/drive/My Drive/data/%s.csv' % year)
# split attribute values using commas
data = txtfile.map(lambda x: x.split(','))
# create table
table = data.map(lambda r: Row(station=r[0], date=r[1], ele=r[2], val=int(r[3]), m_flag=r[4], q_flag=r[5], s_flag=r[6], obs_time=r[7]))
# create dataframe
df = sqlc.createDataFrame(table)
# Handle abnomalities and missing data
clean_df = df.filter(df['q_flag'] == '')
print("\nYear %s Stats:\n" % year)
# 1. Average min
res = clean_df.filter(clean_df['ele'] == 'TMIN').groupby().avg('val').first()
print('Avg. Min Temp = %.2f degrees Celsius' % (res['avg(val)'] / 10.0))
# 1. Average max
res = clean_df.filter(clean_df['ele'] == 'TMAX').groupby().avg('val').first()
print('Avg. Max Temp = %.2f degrees Celsius' % (res['avg(val)'] / 10.0))
# 2. Max TMAX
res = clean_df.filter(clean_df['ele'] == 'TMAX').groupby().max('val').first()
print('Max TMAX value = %.2f degrees Celsius' % (res['max(val)'] / 10.0))
# 2. Min TMIN
res = clean_df.filter(clean_df['ele'] == 'TMIN').groupby().min('val').first()
print('Min TMIN value = %.2f degrees Celsius' % (res['min(val)'] / 10.0))
# 3. Five distinct hottest weather stations
res = clean_df.filter(clean_df['ele'] == 'TMAX').sort(sqlf.desc('val')).groupBy(clean_df['station']).agg(sqlf.max('val')).sort(sqlf.desc('max(val)')).limit(5).collect()
print("Top 5 distinct hottest stations")
for i in res:
print('Station:%s\tTemperature:%.2f degrees Celsius' % (i.station, float(i['max(val)']) / 10.0))
# 3. Five hottest weather stations only by temperature
res = clean_df.filter(clean_df['ele'] == 'TMAX').sort(sqlf.desc('val')).limit(5).collect()
print("Top 5 hottest weather stations only by temperature")
for i in res:
print('Station:%s\tTemperature:%.2f degrees Celsius' % (i.station, float(i['val']) / 10.0))
# 3. Five distinct coldest weather stations
res = clean_df.filter(clean_df['ele'] == 'TMIN').sort(sqlf.asc('val')).groupBy(clean_df['station']).agg(sqlf.min('val')).sort(sqlf.asc('min(val)')).limit(5).collect()
print("Top 5 distinct coldest stations")
for i in res:
print('Station:%s\tTemperature:%.2f degrees Celsius' % (i.station, float(i['min(val)']) /10.0))
# 3. Five coldest weather stations only by temperature
res = clean_df.filter(clean_df['ele'] == 'TMIN').sort(sqlf.asc('val')).limit(5).collect()
print("Top 5 coldest stations only by temperature")
for i in res:
print('Station:%s\tTemperature:%.2f degrees Celsius' % (i.station, float(i['val']) / 10.0))
# Aggregate statistics
# 4. Hottest and coldest weather stations on entire data
txtfile1 = sc.textFile('../data/20??.csv')
data = txtfile1.map(lambda x: x.split(','))
table = data.map(lambda r: Row(station=r[0], date=r[1], ele=r[2], val=int(r[3]), m_flag=r[4], q_flag=r[5], s_flag=r[6], obs_time=r[7]))
df = sqlc.createDataFrame(table)
clean_df = df.filter(df['q_flag'] == '')
# hottest day and weather station
res = clean_df.filter(clean_df['ele'] == 'TMAX').sort(sqlf.desc('val')).first()
print("Hottest station: %s on %s with temperature:%.2f degrees Celsius" % (res.station, res.date, float(res['val']) / 10.0))
# coldest day and weather station
res = clean_df.filter(clean_df['ele'] == 'TMIN').sort(sqlf.asc('val')).first()
print("Coldest Station: %s on %s with temperature:%.2f degrees Celsius" % (res.station, res.date, float(res['val']) / 10.0)) | true |
61f555d1f4ceb4ddd942489c86675994d7fdc305 | Python | Abhijeet198/Python-Program | /45.Check whether a string contains all letters of the alphabet.py | UTF-8 | 342 | 4 | 4 | [] | no_license | # 45.Check whether a string contains all letters of the alphabet
import string
alphabet = set(string.ascii_lowercase)
input_string = 'The quick brown fox jumps over the lazy dog.'
print(set(input_string.lower()) >=alphabet)
input_string = 'The quick brown fox jumos over the lazy cat.'
print(set(input_string.lower()) >= alphabet)
| true |
4513d66dd95e7562f206159d51d65573620f0d08 | Python | areshta/python-edu | /misc/by_ref.py | UTF-8 | 103 | 3.265625 | 3 | [] | no_license | def fun1():
i = 1
fun2(i)
print ("i=", i)
def fun2(i):
ii = i
ii += 1
print ("ii=", ii)
fun1()
| true |
60f946df1ae8191961fecf00c6d07bdf437dd15f | Python | learningpeople/PythonTest | /python_test/tcp_udp/udp.py | UTF-8 | 639 | 3.015625 | 3 | [] | no_license | #!/usr/bin/python
#-*- coding: utf-8 -*-
import socket
#ๅๅปบsocket๏ผUDP่ฟๆฅ๏ผSCOVK_DGRAM๏ผ
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#็ปๅฎ็ซฏๅฃ๏ผไธ้่ฆ่ฐ็จlisten()ๆนๆณ๏ผ็ดๆฅๆฅๆถๆฅ่ชไบบๅๅฎขๆท็ซฏ็ๆฐๆฎ
s.bind(('127.0.0.1',9998))
print 'Bind UDP on 9998 ...'
while True:
print s.recvfrom(1024),s.recvfrom()
print s.recv()
data,addr = s.recvfrom(1024)
print 'Received from %s:%s..'%addr
s.sendto('Hello,%s'%date,addr)
#recvfrom()ๆนๆณ่ฟๅๆฐๆฎๅๅฎขๆท็ซฏ็ๅฐๅไธ็ซฏๅฃ๏ผ่ฟๆ ทๆๅกๅจๆถๅฐๆฐๆฎๅ็ดๆฅ่ฐ็จsendto๏ผ๏ผๆนๆณๅฐฑๅฏไปฅๆๆฐๆฎ็จUDPๅ็ปๅฎขๆท็ซฏ
| true |
d7ea825537dfa9516945d7b97fda8007defb703f | Python | Nadunnissanka/day-4-Random-List-in-python | /coin-toss.py | UTF-8 | 122 | 3.453125 | 3 | [] | no_license | import random
coin_random = random.randint(0,1)
if(coin_random == 1):
print("It's Heads")
else:
print("It's Tails")
| true |
cdc5a67c66cb9f4727aac16d9f4cdda610c89691 | Python | jcchurch/PythonTutorial | /TurtleLesson2/ucodeshapes.py | UTF-8 | 687 | 4.03125 | 4 | [] | no_license | import turtle
def drawSquare(t, size):
t.down()
t.forward(size)
t.left(90)
t.forward(size)
t.left(90)
t.forward(size)
t.left(90)
t.forward(size)
t.left(90)
def moveUnderSquare(t):
t.up()
t.right(90)
t.forward(110)
t.left(90)
window = turtle.Screen()
def makeTurtle(color1, color2):
t = turtle.Turtle()
t.shape('turtle')
t.color(color1, color2)
return t
t = makeTurtle('green', 'yellow')
r = turtle.Turtle()
r.shape('turtle')
r.color('red', 'red')
drawSquare(t, 100)
moveUnderSquare(t)
drawSquare(t, 100)
r.up()
r.forward(150)
for i in range(4):
drawSquare(r, 80)
moveUnderSquare(r)
window.mainloop()
| true |
5b9ab918b7c4a32b6341cfb4c001b1875ad945ae | Python | twangcode/eebb3 | /bb3_scanner.py | UTF-8 | 3,230 | 2.515625 | 3 | [] | no_license | import data_parser as dp
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import csv
import tuotools as tt
import time
FILEPATH = '/var/opt/lufgroup/apps/nova_lufcomp/novaStats_ma/data'
def generate_factor_dict(dict_location):
df = pd.read_csv(dict_location, header=None, sep=' ', names=['product', 'factor'], index_col=['product'])
prod_dict = df.to_dict()
return prod_dict['factor']
def look_up(prod, prod_dict):
factor = 1.0
temp_factor = ''
while not prod[0].isalpha():
temp_factor = temp_factor + prod[0]
prod = prod[1:]
if prod in prod_dict:
if temp_factor:
return float(prod_dict[prod])
else:
return float(prod_dict[prod])
else:
return 0
def normalize(ratio_list):
min_ratio = min(ratio_list)
return [round(x / min_ratio, 3) for x in ratio_list]
def get_ratio(filename, prod_dict):
# extract spread name
spread_name = filename.split(':')[2][:-5]
prod_list = spread_name.replace('+', ' ').replace('-', ' ').split(' ')
ratio_list = []
for prod in prod_list:
if look_up(prod, prod_dict):
ratio_list.append(look_up(prod, prod_dict))
if ratio_list:
ratio_list = normalize(ratio_list)
return ratio_list
def scanner(input_filename, output_filename, entries, exits):
# generate factor dictionary for factor ratio calculation
prod_dict = generate_factor_dict('output/symbol_list.data')
# read bb3 names and slippage into dataframe:
bb3_list = pd.read_csv(input_filename, names=['name', 'slippage'])
total_files = float(len(bb3_list.index))
count = 0
with open(output_filename, 'wb') as fout:
writer = csv.writer(fout)
for index, row in bb3_list.iterrows():
count += 1
test_spread = dp.spread(row['name'])
ratio_list = get_ratio(row['name'], prod_dict)
for entry in entries:
for exit in exits:
try:
(sr, profit, num_trades) = test_spread.EE_Sharpe_Ratio(entry, exit, float(row['slippage']))
if sr > 2:
print row['name'], (entry, exit), (sr, profit, num_trades), "%6.2f" % (count/total_files * 100) + '%'
writer.writerow([row['name'][2:-5], entry, entry*exit, '%6.2f' % sr, profit, num_trades, profit/num_trades*2., row['slippage'], ratio_list])
except:
pass
fout.close()
def main():
start_time = time.time()
scanner('input/list_BB3_TEN.csv', 'output/BB3_filtered_TEN.csv', [1, 1.5, 2, 2.5, 3, 3.5, 4], [0.5, 0.75, 1])
stop_point_1 = time.time()
print 'BB3_TEN time is: ', '%6.2f seconds.' % (stop_point_1 - start_time)
scanner('input/list_BB3_FIX.csv', 'output/BB3_filtered_FIX.csv', [1, 1.5, 2, 2.5, 3, 3.5, 4], [0.5, 0.75, 1])
stop_point_2 = time.time()
print 'BB3_FIX time is: ', '%6.2f seconds.' % (stop_point_2 - stop_point_1)
scanner('input/list_BB3_FLY.csv', 'output/BB3_filtered_FLY.csv', [1, 1.5, 2, 2.5, 3, 3.5, 4], [0.5, 0.75, 1])
stop_point_3 = time.time()
print 'BB3_FLY time is: ', '%6.2f seconds.' % (stop_point_3 - stop_point_2)
scanner('input/list_BB3.csv', 'output/BB3_filtered.csv', [1, 1.5, 2, 2.5, 3, 3.5, 4], [0.5, 0.75, 1])
print 'BB3 time is: ', '%6.2f seconds.' % (time.time() - stop_point_3)
def main_2():
print get_ratio('S:BB3_FIX:2.0US5Y-US3Y-GE18.data')
if __name__ == '__main__':
main()
| true |
e8abb1536d0ef097ccc01ddaef0bc44db385af5f | Python | akhilabattula/Hotel-Review-Classification | /nbclassify.py | UTF-8 | 2,599 | 2.625 | 3 | [] | no_license | '''
Created on Jan 31, 2016
@author: akhila
'''
import glob
import os
import sys
import re
from fileinput import FileInput
from collections import OrderedDict
filedir=sys.argv[1]
#dp=open("","r")
word_dict=OrderedDict()
with open('nbmodel.txt', 'r') as f:
for line in f:
values=line.split("\t")
key=values[0]
word_dict[key]=[]
for val in values:
if val!=key and val!='\n':
word_dict[key].append(val)
print word_dict
pk=open("nboutput.txt","w+")
os.chdir(filedir)
for f in glob.glob("*.txt"):
print f
fp=open(f,"r")
mydata=fp.read()
letters_only = re.sub("[^a-zA-Z0-9-']", " ", mydata)
words=[]
lower_case = letters_only.lower() # Convert to lower case
words = lower_case.split()
temporary=[]
nd=float(1)
nt=float(1)
pd=float(1)
pt=float(1)
for akhi in words:
if akhi[0]=="'":
akhi=akhi[1:]
if akhi.endswith("'"):
akhi=akhi[:-2]
if akhi in word_dict.keys():
#print f,akhi,"found"
temporary=word_dict[akhi]
if len(temporary)==4:
nd=nd*float(temporary[0])
nt=nt*float(temporary[1])
pd=pd*float(temporary[2])
pt=pt*float(temporary[3])
else:
print "akhi is",akhi
d = {'nd': nd, 'nt': nt, 'pd': pd,'pt':pt}
""" maxval=max(d, key=d.get)
if maxval=='nd':
pk.write("deceptive negative ")
pk.write(filedir+"\\"+f)
pk.write("\n")
elif maxval=='nt':
pk.write("truthful negative ")
pk.write(filedir+"\\"+f)
pk.write("\n")
elif maxval=='pd':
pk.write("deceptive positive ")
pk.write(filedir+"\\"+f)
pk.write("\n")
else:
pk.write("truthful positive ")
pk.write(filedir+"\\"+f)
pk.write("\n")"""
if nd>nt:
if pd>pt:
pk.write("deceptive negative ")
pk.write(filedir+"\\"+f)
pk.write("\n")
else:
pk.write("truthful negative ")
pk.write(filedir+"\\"+f)
pk.write("\n")
else:
if pd>pt:
pk.write("deceptive positive ")
pk.write(filedir+"\\"+f)
pk.write("\n")
else:
pk.write("truthful positive ")
pk.write(filedir+"\\"+f)
pk.write("\n")
| true |
fa4f6732e69f6bef03003daaa235529819faf260 | Python | christopher-a-johnson/Got-It | /libs/mailgun.py | UTF-8 | 1,446 | 2.625 | 3 | [] | no_license | import os
from typing import List
from requests import Response, post
class Mailgun:
FROM_TITLE = 'Pricing Service'
FROM_EMAIL = 'do-not-reply@sandbox29e1ff01c0be4282a0cfa1c0986a9fc7.mailgun.org'
@classmethod
def send_mail(cls, email: List[str], subject: str, text: str, html: str) -> Response:
api_key = os.environ.get('MAILGUN_API_KEY', None)
domain = os.environ.get('MAILGUN_DOMAIN', None)
if api_key is None:
raise MailgunException('Failed to load Mailgun API ley')
if domain is None:
raise MailgunException("Failed to load Mailgun domain.")
response = post(f"{domain}/messages",
auth=("api", api_key),
data={"from": f"{cls.FROM_TITLE}<{cls.FROM_EMAIL}>",
"to": email,
"subject": subject,
"text": text,
"html": html})
if response.status_code != 200:
print(response.json())
raise MailgunException('An error occurred while sending e-mail')
return response
# def send_simple_message():
# return requests.post(
# "https://api.mailgun.net/v3/sandbox29e1ff01c0be4282a0cfa1c0986a9fc7.mailgun.org/messages",
# auth=("api", "key-50ec7f3d57be9c0bc17569d6ef806ca4"),
# data={
# "from": "Excited User <do-not-reply@sandbox29e1ff01c0be4282a0cfa1c0986a9fc7.mailgun.org>",
# "to": ["christopher.a.johnson14@gmail.com"],
# "subject": "hello",
# "text": "Testing some Mailgun awesomenesss!"})
#
# print(send_simple_message())
| true |
c49c15863e5c28f5b9842c47a1b409babac8d4f7 | Python | shants/LeetCodePy | /785.py | UTF-8 | 1,068 | 3.21875 | 3 | [] | no_license | class Solution(object):
def bfs(self, graph, color, s):
color[s]=0
q = []
q.append(s)
while(len(q)>0):
i = q.pop()
for j in graph[i]:
if color[j]==-1:
color[j]=1-color[i]
q.append(j)
else:
if color[j]==color[i]:
return False
return True
def isBipartite(self, graph):
"""
:type graph: List[List[int]]
:rtype: bool
"""
if len(graph)<=1:
return True
v = [-1]*len(graph)
for i in range(len(graph)):
if v[i]==-1:
if self.bfs(graph, v, i) == False:
return False
return True
if __name__ == "__main__":
s = Solution()
#print(s.isBipartite([[1,3], [0,2], [1,3], [0,2]]))
#print(s.isBipartite([[1,2,3], [0,2], [0,1,3], [0,2]]))
print(s.isBipartite([[],[2,4,6],[1,4,8,9],[7,8],[1,2,8,9],[6,9],[1,5,7,8,9],[3,6,9],[2,3,4,6,9],[2,4,5,6,7,8]])) | true |
1c99446177919262ba49d9b96d4b34885389571f | Python | zvxr/tickets | /app/cache.py | UTF-8 | 532 | 2.53125 | 3 | [
"MIT"
] | permissive | import redis
import app.config as config
_redis_client = None
_redis_client_pool = None
def get_client():
"""Return a client from the pool."""
global _redis_client
global _redis_client_pool
if _redis_client_pool is None:
_redis_client_pool = redis.BlockingConnectionPool(**config.REDIS)
if _redis_client is None:
_redis_client = redis.Redis(connection_pool=_redis_client_pool)
return _redis_client
def ping():
"""Get a client and execute ping command."""
get_client().ping()
| true |
a2b86baf3de9e3c55cb00b738805827fa04540ea | Python | Tomaspereyra/Gamificando-Algoritmos | /Procesos Orientados al Desarrollo del Software/Proceso de implementaciรณn/GDA/negocio/DocenteABM.py | UTF-8 | 1,222 | 3 | 3 | [] | no_license | from negocio.UsuarioABM import UsuarioABM
from datos.Docente import Docente
from dao.DocenteDao import DocenteDao
class DocenteABM:
def __init__(self):
self.dao = DocenteDao()
def traerDocente(self, username):
usuario = UsuarioABM()
docente = None
if usuario.traerUsuario(username) is not None:
docente = self.dao.traerDocente(usuario.traerUsuario(username).getId())
return docente
def registrarDocente(self, username, password, email, nombre, apellido, fechaNacimiento):
agregado = 0
if self.traerDocente(username) is None:
usuario = UsuarioABM()
usuario.registrarUsuario(username, password, email, nombre, apellido, fechaNacimiento)
agregado = self.dao.agregarDocente(int(usuario.traerUsuario(username).getId()))
else:
print "Nombre de usuario en uso."
return agregado
def eliminarDocente(self, username):
docente = self.traerDocente(username)
print docente.getIdDocente()
if docente is not None:
self.dao.eliminar(docente)
print docente.getUsername()
else:
print "Error, el docente no existe"
| true |
ee26b19d995163c72f3c817352a48275245ae6cc | Python | aishwat/missionPeace | /graph/floydWarshall.py | UTF-8 | 838 | 3.5 | 4 | [] | no_license | INF = float('inf')
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = []
def floydWarshall(self):
dist = [row[:] for row in self.graph]
for k in range(self.V):
for i in range(self.V):
for j in range(self.V):
if dist[i][k] != float('inf') and dist[k][j] != float('inf') and \
dist[i][j] > dist[i][k] + dist[k][j]:
dist[i][j] = dist[i][k] + dist[k][j]
for i in range(self.V):
for j in range(self.V):
print(dist[i][j], end="\t")
print("\n")
g = Graph(4)
g.graph = [[0, 5, INF, 10],
[INF, 0, 3, INF],
[INF, INF, 0, 1],
[INF, INF, INF, 0]
]
# Print the solution
g.floydWarshall();
| true |
0d3ddb60cfca95c0edc5039a7333dc0ca753943e | Python | Mashakal/Minerva | /Minerva/luis/LuisInterpreter.py | UTF-8 | 7,672 | 2.953125 | 3 | [] | no_license | import InfoManager
import collections
# For development purposes only:
from Essentials import enter_and_exit_labels, print_smart
# Constants
# These may be better off in the Bot module.
_YES_WORDS = ['yes', 'yeah', 'okay', 'ok', 'k', 'y', 'ya', 'right', 'correct', "that's right", 'sure', 'for sure']
_NO_WORDS = ['no', 'n', 'nah', 'nope', 'negative']
class BaseLuisInterpreter(object):
"""A base class for all interpreters."""
def __init__(self, **kwargs):
return super().__init__(**kwargs)
def analyze(self, json):
"""Analyzes the json returned from a call to the base LuisClient class's method, query_raw."""
raise NotImplementedError("Function analyze has not yet been customized.")
@classmethod
def _get_top_scoring_intent(cls, json):
try:
return json['intents'][0]['intent']
except LookupError:
return 'undefined'
@classmethod
def _get_literals(cls, json):
return [e['entity'] for e in json['entities']]
@classmethod
def _get_types(cls, json):
return [e['type'] for e in json['entities']]
@classmethod
def _get_all_literals_of_type(cls, t, json):
return [e['entity'] for e in json['entities'] if e['type'] == t]
class ProjectSystemLuisInterpreter(BaseLuisInterpreter):
"""Interprets questions for language specific project systems of Visual Studio
as a part of a help bot.
"""
def __init__(self, bot, project_system):
# _info is the main point of access for anything specific to a project (e.g. urls, triggers, keywords).
self._info = InfoManager.ProjectSystemInfoManager(project_system)
# Use _bot to interact with the user (e.g. ask a question, clarify between options, acknowledge keywords).
self._bot = bot
# Maps an intent to a function.
self._STRATAGIES = {
'Get Help': self._get_help,
'undefined': self._undefined
}
def analyze(self, json):
"""Analyzes the json returned from a call to LuisClient's method, query_raw."""
intent = self._get_top_scoring_intent(json)
try:
rv = self._STRATAGIES[intent](json)
except KeyError:
rv = self._STRATAGIES['undefined']()
return rv
def _format_data(self, json):
"""Formats the raw json into a more easily managable dictionary."""
o = {
'keywords': self._get_all_literals_of_type('Keyword', json),
'intent': self._get_top_scoring_intent(json)
}
# Add to the set any entities that you have urls for in the info.links dict.
# Just make sure to call _get_all_literals_of_type(entity, json) as above.
o['paths'] = self.__get_paths(set(o['keywords']))
return o
def __get_paths(self, word_set):
# I die a little inside everytime I look at this function.
"""Get's paths to all words in the set, if a path for it exists.
Filters the paths found such that only the deepest path will be
returned, which is helpful when a Luis picks up a trigger to a key
and also a trigger to a more specialized version of that key in the
same query.
"""
def get_paths(word_set):
"""A helper function for __get_paths. Returns an unfiltered list
of all the paths pointed to by words in the word set.
"""
paths = []
for word in word_set:
path = self._info.find_path_to_trigger_key(word)
if path:
paths.append(path)
return paths
def remove_duplicates(paths, key):
"""Remove all but the longest path from paths."""
list_max = None # The list with the longest length.
# Get the paths that contain key.
with_key = [path for path in paths if key in path]
# Find the longest one.
for path in with_key:
if not list_max or len(path) > len(list_max):
list_max = path
# Remove all lists of paths that are not the one with the longest length.
[paths.remove(p) for p in with_key if p is not list_max]
return paths
paths = get_paths(word_set)
flattened_paths = [p for path in paths for p in path]
counts = {}
counter = collections.Counter()
for key in flattened_paths:
counter[key] += 1
for key, count in counter.most_common(): # Get ALL elements in counter.
if count > 1:
paths = remove_duplicates(paths, key)
# TODO: Log how many paths were returned, and which ones.
return paths
def _get_trigger_paths(self):
"""Returns a mapping of a trigger to a set of keys that will lead to the value
for the key that this trigger is mapped to.
"""
# Get all triggers as a set. This function will use
triggers = self._info.set_from_key_values(k_to_collect='Triggers')
def _get_help(self, json):
"""Called from function 'analyze' when the intent of a LUIS query is determined
to be 'Get Help'.
"""
def clarify_paths(paths):
"""Determine which topic is most pertinent to the user
when more than one unique path is found given the
user's query.
"""
# One path is good, as long Luis picked up the right keywords.
if 1 == len(paths):
return paths
elif 1 < len(paths):
ending_keys = [p[len(p) - 1] for p in paths]
ans = self._bot.clarify(ending_keys)
return [p for p in paths for a in ans if p[len(p) - 1] == a]
else:
# No paths found.
return False
def get_ending_url(path):
"""Given a single path, get to an url.
"""
u = self._info.traverse_keys(path)
while not isinstance(u, str): # Path might not lead to url yet.
# If our path doesn't point to a key with its own url,
# ask the user where to go from here.
keys = list(u.keys())
# We only need to ask when there is more than one potential key.
if 1 < len(keys):
self._bot.acknowledge(path[len(path) - 1])
next = self._bot.give_options([k for k in u.keys()])
path.append(next)
else:
path.append(keys[0])
u = self._info.traverse_keys(path)
return u
data = self._format_data(json)
# Check if the user triggered any links to the wiki page.
paths = clarify_paths(data['paths'])
if paths:
urls = [get_ending_url(path) for path in paths]
topics = [self._info.get_url_description(u) for u in urls]
self._bot.acknowledge(topics)
self._bot.suggest_multiple_urls(urls, topics)
else:
# Try StackExchange
self._bot.say("Hmmm, I'm not sure the wiki can help.\nLet me see what I can find through stackoverflow.\n\n")
#raise NotImplementedError("Querying stackoverflow is not yet implemented.")
def _undefined(self):
self._bot.say("I'm sorry, I don't know what you're asking.")
# "Help me understand what I can do for you?"
# Give options on different intents, call that function with the original query or a new one, whichever makes more sense.
| true |
d374b7611ee1e8b1ad198fa0396653e774c73b87 | Python | soelves/portfolio | /IN1000/IN1000/Uke 5/temperatur.py | UTF-8 | 343 | 3.578125 | 4 | [] | no_license | def snitt(liste):
Sum = 0
for verdi in liste:
Sum += verdi
gjennomsnitt = Sum/len(liste)
print("Gjennomsnitt:", gjennomsnitt)
def hovedprogram():
temp = open("temperatur.txt", "r")
mnd=[]
for linje in temp:
mnd.append(float(linje))
print(mnd)
snitt(mnd)
temp.close()
hovedprogram()
| true |
44f82b8873f51ebdd53ea9e221b9778e703faca3 | Python | MarcelTkacik/genderStereotypes | /pลฏv_child_books.py | UTF-8 | 850 | 3.125 | 3 | [] | no_license | import nltk
## when installing nltk, you will also need to install "wordnet" using nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
file = open("/Users/tessacharlesworth/Desktop/Embeddings/Raw Data/Child_Books/child_books.txt", 'r')
outfile = open("/Users/tessacharlesworth/Desktop/Embeddings/Clean Data/Child_Books/lemmatized_child_books.txt", 'w+')
words = file.read().split()
for word in words:
if ((word.find('_') != -1) or (word.find('.') != -1) or (word.find(':') != -1) or (word.find(';') != -1) or
(word.find('?') != -1) or (word.find('`') != -1) or (word.find('!') != -1) or (word.find(',') != -1) or
(word.find('-') != -1) or word == 'CHAPTER' or not word.isalnum()):
pass
else:
outfile.write(lemmatizer.lemmatize(word).lower() + " ")
print(len(words))
outfile.close()
file.close() | true |
300ae8eb219e0a1423626e9f270ce25a9753f1e7 | Python | NickHMC/LANL_2019_Clinic | /Pipeline/__init__.py | UTF-8 | 2,827 | 2.703125 | 3 | [] | no_license |
from importlib import __import__ as imp
import inspect
import os
import re
import sys
from pnspipe import PNSPipe
# This dictionary is used to register all available pipeline functions
_pipe_functions = {
x: [] for x in "preprocess;onsegment;postsegment;postprocess".split(';')}
def register_pipe_function(f):
"""
Call register_pipe_function on a function that is designed to accept
a PNSPipe object and key word arguments. Some examples are present
in this file, but the register_pipe_function allows functions in
other files to register their processing functions so that they can
be included in the list of available such functions supplied by
the help commanmd and so that they can be called from the main loop.
The functions defined in the present file are included automatically
and need not be registered manually.
"""
if not inspect.isfunction(f):
return
params = inspect.signature(f).parameters
keys = list(params.keys())
annotes = [x.annotation for x in params.values()]
try:
if len(keys) == 2:
# the first argument should be a PNSPipe;
# the second should be named kwargs
if annotes[0] == PNSPipe and keys[1] == 'kwargs':
_pipe_functions['onsegment'].append(f)
if annotes[0] == list and keys[1] == 'kwargs':
_pipe_functions['postsegment'].append(f)
if len(keys) == 3:
if annotes[0] == re.Pattern \
and annotes[1] == re.Pattern \
and keys[2] == 'kwargs':
_pipe_functions['preprocess'].append(f)
if len(keys) == 4:
if annotes[0] == str \
and annotes[1] == dict \
and annotes[2] == dict:
_pipe_functions['postprocess'].append(f)
except Exception as eeps:
print(eeps)
def describe_pipe_functions(key: str):
"""
Prepare a long text string describing all the registered
pipe functions satisying the given key. If key is "",
describe them all.
"""
fstring = "{0}: {1}"
if key == "":
lines = []
for key, vals in _pipe_functions.items():
lines.append(f"{key.upper()} ------------------\n")
lines.extend([fstring.format(x.__name__, x.__doc__)
for x in vals])
lines.append("\n")
else:
lines = [fstring.format(x.__name__, x.__doc__) for x in
_pipe_functions.vals()]
return "\n".join(lines)
folder = os.path.split(__file__)[0]
for file in os.listdir(folder):
path = f"Pipeline.{os.path.splitext(file)[0]}"
try:
imp(path)
for name, obj in inspect.getmembers(sys.modules[path]):
register_pipe_function(obj)
except:
pass
| true |
527eed9b3d79784d12e216f0a17240072168d91a | Python | hitechparadigm/Programming-for-Everybody | /conditional.py | UTF-8 | 139 | 3.734375 | 4 | [] | no_license | i = input('Enter Number')
if int(i) < 2:
print('Ok ',i)
if int(i) > 2:
print('Bigger than 2')
print('Done with i',i)
print('All Done')
| true |
4d57f3b17386847dc711e1e9610eec46c95a07fd | Python | pacomunuera/Concentrated_Solar_Power | /src/plot_results.py | UTF-8 | 7,671 | 3.078125 | 3 | [] | no_license | import os
import pickle
import matplotlib.pyplot as plt
from eda import *
from modeling_base import *
def get_pickle_files(dirpath):
'''
Reads all pickle files in dirpath in as dictionaries and returns a
list of dictionaries.
Parameters:
----------
dirpath : (str)
The absolute or relative path to the directory in which the files
are stored.
Returns:
----------
dicts : (list)
List of dictionaries
'''
dicts = []
for pickle_file in os.listdir(dirpath):
filepath = dirpath + pickle_file
with open(filepath, 'rb') as f:
current = pickle.load(f)
dicts.append(current)
return dicts
def format_dict_for_plot(lst, key_identifiers):
'''
Creates a dictionary from a list of dictionaries in order to be passed
to a plotting function. (Used to deconstruct list of dictionaries output
from get_pickle_files())
Parameters:
----------
lst : (list)
List of dictionaries
key_identifiers : (list)
List of strings (len(2)). Each element should be part of the key for all
the values that you would like in the final dictionary
Returns:
----------
out : (dict)
Dictionary with keys equal to those keys within lst that have
key_identifier in their key, and values equal to the values of
those keys
'''
total = {}
for dictionary in results:
for k, v in dictionary.items():
for lower_dict_key, lower_dict_value in dictionary[k].items():
if key_identifiers[0] in lower_dict_key or key_identifiers[1] in lower_dict_key:
out_key = k + " " + lower_dict_key
total[out_key] = lower_dict_value
return total
def results_error_plot(error_dict, model_colors, base_colors, title, xlab, ylab, savefig=False):
'''
Plots the error arrays of two models against each other with the x-axis
ticks being months of the year.
Parameters:
----------
error_dict : (dict)
A dictionary where the keys are the names of the error arrays
(i.e 'Linear Regression Error') and the values are an array_like
(array/list) sequence of errors
model_colors : (list)
List of strings with length equal to number of keys in error_dict
divided by 2
base_colors : (list)
List of strings with length equal to the number of keys in error_dict
divided by 2
title : (str)
The title for the plot
xlab : (str)
Label for x-axis
ylab : (str)
Label for y-axis
savefig : (bool/str)
If False default, image will be displayed and not saved. If the
user would like the image saved, pass the filepath as string to
which the image should be saved.
Returns:
----------
None
'''
fig, ax = plt.subplots(figsize=(12,8))
model_counter = 0
base_counter =0
for name, array in error_dict.items():
broken_name = name.split()
if 'Persistence' in broken_name:
ax.plot(array, c=base_colors[base_counter])
base_counter += 1
else:
ax.plot(array, c=model_colors[model_counter])
model_counter += 1
plt.xticks(range(0,12), ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sept','Oct','Nov','Dec'])
plt.xlabel(xlab, fontweight='bold', fontsize=19)
plt.ylabel(ylab, fontweight='bold', rotation=0, fontsize=19)
ax.tick_params(axis='both', labelcolor='black', labelsize=15.0)
ax.yaxis.set_label_coords(-0.105,0.5)
plt.suptitle(title, fontweight='bold', fontsize=21)
if savefig:
plt.savefig(savefig)
plt.show()
def separate_dict(units, parent_dict):
'''
Used to pull associated keys out of a dictionary. Keys in parent_dict
must be separated by underscores. All keys that have the str specified
by units will be returned with their associated values in the output
dictionary
Parameters:
----------
units : (str)
An identifier that is in the keys of every key you would like
separated out from parent_dict
parent_dict : (dict)
The parent dictionary with multiple keys
Returns:
----------
out : (dict)
Dictionary of key value pairs that have units in the key name
'''
out = {}
for k, v in parent_dict.items():
words = k.split("_")
if units in words:
out[k] = v
return out
def dict_plot(dict, model_color_map, base_model_color_map, title, xlab, ylab, savefig=False):
'''
Creates colormaps for each list of values in dict and passes through to
results_error_plot() (helper function)
Parameters:
----------
error_dict : (dict)
A dictionary where the keys are the names of the error arrays
(i.e 'Linear Regression Error') and the values are an array_like
(array/list) sequence of errors
model_colors_map : (str)
Valid string specifying a seaborn color pallete
base_model_color_map : (str)
Valid string specifying a seaborn color pallete
title : (str)
The title for the plot
xlab : (str)
Label for x-axis
ylab : (str)
Label for y-axis
savefig : (bool/str)
If False default, image will be displayed and not saved. If the
user would like the image saved, pass the filepath as string to
which the image should be saved.
Returns:
----------
None
'''
model_color_map = sns.color_palette(model_color_map,len(dict.keys())//2)
base_model_color_map = sns.color_palette(base_model_color_map,len(dict.keys())//2)
results_error_plot(dict, model_color_map, base_model_color_map, title, xlab, ylab, savefig)
def create_mean_pairs(dictionary):
'''
Takes as input a dictionary whose values are lists of even length (i.e. 24)
and returns a dictionary with the same keys. The values of the output
dictionary will be the mean of every pair of numbers in the input dictionary
value lists.
Example:
[1]: input_dict = {'key1': [10, 20, 30, 40, 50, 60],
'key2': [70, 80, 90, 100, 110, 120]}
[2]: output_dict = create_mean_pairs(input_dict)
[3]: output_dict = {'key1': [15.0, 35.0, 55.0],
'key2': [75.0, 95.0, 115.0]}
Parameters:
----------
dictionary : (dict)
Dictionary with array like values of even length
Returns:
----------
out : (dict)
Dictionary with pair-wise means of input dictionary values
'''
out = {}
for k in dictionary.keys():
out[k] = []
cache = []
for v in dictionary[k]:
if len(cache) == 1:
v = np.mean((cache[0], v))
out[k].append(v)
cache = []
elif len(cache) == 0:
cache.append(v)
return out
if __name__ == "__main__":
results = get_pickle_files("../pickle_results/")
total = format_dict_for_plot(results, ['RMSE', 'MAE'])
units = ['month','week','day','hour']
for unit in units:
time_dict = separate_dict(unit, total)
plot_dict = create_mean_pairs(time_dict)
title = unit.capitalize() + "s"
dict_plot(plot_dict, 'Greens', "Reds", f"MAE & RMSE Over Multiple {title}", "Month", r"$\frac{Watts}{Meter^2}$")
years = separate_dict('year', total)
new_years = create_mean_pairs(years)
dict_plot(new_years, 'Greens', "Reds", "MAE & RMSE Over Multiple Years", "Month", r"$\frac{Watts}{Meter^2}$", "../images/boostrapped_nn_errors.png")
| true |
65cf1315505416074a5e00ce74bc25ed4379476f | Python | IronLanguages/ironpython2 | /Src/StdLib/Lib/test/test_import_magic.py | UTF-8 | 2,267 | 2.640625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | import imp
import sys
import unittest
# Note:
# In Python 3.x, this test case is in Lib/test/test_importlib/test_util.py
class MagicNumberTests(unittest.TestCase):
"""
Test release compatibility issues relating to precompiled bytecode
"""
@unittest.skipUnless(
sys.version_info.releaselevel in ('candidate', 'final'),
'only applies to candidate or final python release levels'
)
def test_magic_number(self):
"""
Each python minor release should generally have a MAGIC_NUMBER
that does not change once the release reaches candidate status.
Once a release reaches candidate status, the value of the constant
EXPECTED_MAGIC_NUMBER in this test should be changed.
This test will then check that the actual MAGIC_NUMBER matches
the expected value for the release.
In exceptional cases, it may be required to change the MAGIC_NUMBER
for a maintenance release. In this case the change should be
discussed in python-dev. If a change is required, community
stakeholders such as OS package maintainers must be notified
in advance. Such exceptional releases will then require an
adjustment to this test case.
"""
EXPECTED_MAGIC_NUMBER = 62211
raw_magic = imp.get_magic()
actual = (ord(raw_magic[1]) << 8) + ord(raw_magic[0])
msg = (
"To avoid breaking backwards compatibility with cached bytecode "
"files that can't be automatically regenerated by the current "
"user, candidate and final releases require the current "
"importlib.util.MAGIC_NUMBER to match the expected "
"magic number in this test. Set the expected "
"magic number in this test to the current MAGIC_NUMBER to "
"continue with the release.\n\n"
"Changing the MAGIC_NUMBER for a maintenance release "
"requires discussion in python-dev and notification of "
"community stakeholders."
)
self.assertEqual(EXPECTED_MAGIC_NUMBER, actual)#, msg)
def test_main():
from test.support import run_unittest
run_unittest(MagicNumberTests)
if __name__ == '__main__':
test_main()
| true |
4f410c062f8280f6243a1b7b7ce3fdb5c6006b0a | Python | bhatnitish1998/aps-2020 | /fenwick_tree_sum.py | UTF-8 | 492 | 3.53125 | 4 | [] | no_license | # Fenwick tree to calculate the sum in range queries
# Input : Update(index,value) array is 1 indexed
# qlr(left index, right index both inclusive) Note it is 1-indexed
# Output: qlr=The sum of elements in the range
#n= len(arr)
fenwik=[0]*(n+1)
def update(i,value):
global fenwik
global n
while(i<=n):
fenwik[i]+=value
i+=i&-i
def query(x):
global fenwik
global n
qsum=0
while(x>=1):
qsum+=fenwik[x]
x-=x&-x
return qsum
def qlr(l,r):
return(query(r)-query(l-1)) | true |
7ba27718bef68aa07504735a996e6eaae94269d9 | Python | sympy/sympy | /sympy/ntheory/tests/test_primetest.py | UTF-8 | 8,937 | 2.59375 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | from sympy.ntheory.generate import Sieve, sieve
from sympy.ntheory.primetest import (mr, _lucas_sequence, _lucas_selfridge_params, _lucas_extrastrong_params,
is_lucas_prp, is_square,
is_strong_lucas_prp, is_extra_strong_lucas_prp, isprime, is_euler_pseudoprime,
is_gaussian_prime)
from sympy.testing.pytest import slow
from sympy.core.numbers import I
def test_euler_pseudoprimes():
assert is_euler_pseudoprime(9, 1) == True
assert is_euler_pseudoprime(341, 2) == False
assert is_euler_pseudoprime(121, 3) == True
assert is_euler_pseudoprime(341, 4) == True
assert is_euler_pseudoprime(217, 5) == False
assert is_euler_pseudoprime(185, 6) == False
assert is_euler_pseudoprime(55, 111) == True
assert is_euler_pseudoprime(115, 114) == True
assert is_euler_pseudoprime(49, 117) == True
assert is_euler_pseudoprime(85, 84) == True
assert is_euler_pseudoprime(87, 88) == True
assert is_euler_pseudoprime(49, 128) == True
assert is_euler_pseudoprime(39, 77) == True
assert is_euler_pseudoprime(9881, 30) == True
assert is_euler_pseudoprime(8841, 29) == False
assert is_euler_pseudoprime(8421, 29) == False
assert is_euler_pseudoprime(9997, 19) == True
def test_lucas_sequence():
def lucas_u(P, Q, length):
array = [0] * length
array[1] = 1
for k in range(2, length):
array[k] = P * array[k - 1] - Q * array[k - 2]
return array
def lucas_v(P, Q, length):
array = [0] * length
array[0] = 2
array[1] = P
for k in range(2, length):
array[k] = P * array[k - 1] - Q * array[k - 2]
return array
length = 20
for P in range(-10, 10):
for Q in range(-10, 10):
D = P**2 - 4*Q
if D == 0:
continue
us = lucas_u(P, Q, length)
vs = lucas_v(P, Q, length)
for n in range(3, 100, 2):
for k in range(length):
U, V, Qk = _lucas_sequence(n, P, Q, k)
assert U == us[k] % n
assert V == vs[k] % n
assert pow(Q, k, n) == Qk
def test_lucas_selfridge_params():
assert _lucas_selfridge_params(3) == (5, 1, -1)
assert _lucas_selfridge_params(5) == (-7, 1, 2)
assert _lucas_selfridge_params(7) == (5, 1, -1)
assert _lucas_selfridge_params(9) == (0, 0, 0)
assert _lucas_selfridge_params(11) == (13, 1, -3)
assert _lucas_selfridge_params(19) == (-7, 1, 2)
assert _lucas_selfridge_params(29) == (-11, 1, 3)
def test_lucas_extrastrong_params():
assert _lucas_extrastrong_params(3) == (5, 3, 1)
assert _lucas_extrastrong_params(5) == (12, 4, 1)
assert _lucas_extrastrong_params(7) == (5, 3, 1)
assert _lucas_extrastrong_params(9) == (0, 0, 0)
assert _lucas_extrastrong_params(11) == (21, 5, 1)
assert _lucas_extrastrong_params(59) == (32, 6, 1)
assert _lucas_extrastrong_params(479) == (117, 11, 1)
def test_is_extra_strong_lucas_prp():
assert is_extra_strong_lucas_prp(4) == False
assert is_extra_strong_lucas_prp(989) == True
assert is_extra_strong_lucas_prp(10877) == True
assert is_extra_strong_lucas_prp(9) == False
assert is_extra_strong_lucas_prp(16) == False
assert is_extra_strong_lucas_prp(169) == False
@slow
def test_prps():
oddcomposites = [n for n in range(1, 10**5) if
n % 2 and not isprime(n)]
# A checksum would be better.
assert sum(oddcomposites) == 2045603465
assert [n for n in oddcomposites if mr(n, [2])] == [
2047, 3277, 4033, 4681, 8321, 15841, 29341, 42799, 49141,
52633, 65281, 74665, 80581, 85489, 88357, 90751]
assert [n for n in oddcomposites if mr(n, [3])] == [
121, 703, 1891, 3281, 8401, 8911, 10585, 12403, 16531,
18721, 19345, 23521, 31621, 44287, 47197, 55969, 63139,
74593, 79003, 82513, 87913, 88573, 97567]
assert [n for n in oddcomposites if mr(n, [325])] == [
9, 25, 27, 49, 65, 81, 325, 341, 343, 697, 1141, 2059,
2149, 3097, 3537, 4033, 4681, 4941, 5833, 6517, 7987, 8911,
12403, 12913, 15043, 16021, 20017, 22261, 23221, 24649,
24929, 31841, 35371, 38503, 43213, 44173, 47197, 50041,
55909, 56033, 58969, 59089, 61337, 65441, 68823, 72641,
76793, 78409, 85879]
assert not any(mr(n, [9345883071009581737]) for n in oddcomposites)
assert [n for n in oddcomposites if is_lucas_prp(n)] == [
323, 377, 1159, 1829, 3827, 5459, 5777, 9071, 9179, 10877,
11419, 11663, 13919, 14839, 16109, 16211, 18407, 18971,
19043, 22499, 23407, 24569, 25199, 25877, 26069, 27323,
32759, 34943, 35207, 39059, 39203, 39689, 40309, 44099,
46979, 47879, 50183, 51983, 53663, 56279, 58519, 60377,
63881, 69509, 72389, 73919, 75077, 77219, 79547, 79799,
82983, 84419, 86063, 90287, 94667, 97019, 97439]
assert [n for n in oddcomposites if is_strong_lucas_prp(n)] == [
5459, 5777, 10877, 16109, 18971, 22499, 24569, 25199, 40309,
58519, 75077, 97439]
assert [n for n in oddcomposites if is_extra_strong_lucas_prp(n)
] == [
989, 3239, 5777, 10877, 27971, 29681, 30739, 31631, 39059,
72389, 73919, 75077]
def test_isprime():
s = Sieve()
s.extend(100000)
ps = set(s.primerange(2, 100001))
for n in range(100001):
# if (n in ps) != isprime(n): print n
assert (n in ps) == isprime(n)
assert isprime(179424673)
assert isprime(20678048681)
assert isprime(1968188556461)
assert isprime(2614941710599)
assert isprime(65635624165761929287)
assert isprime(1162566711635022452267983)
assert isprime(77123077103005189615466924501)
assert isprime(3991617775553178702574451996736229)
assert isprime(273952953553395851092382714516720001799)
assert isprime(int('''
531137992816767098689588206552468627329593117727031923199444138200403\
559860852242739162502265229285668889329486246501015346579337652707239\
409519978766587351943831270835393219031728127'''))
# Some Mersenne primes
assert isprime(2**61 - 1)
assert isprime(2**89 - 1)
assert isprime(2**607 - 1)
# (but not all Mersenne's are primes
assert not isprime(2**601 - 1)
# pseudoprimes
#-------------
# to some small bases
assert not isprime(2152302898747)
assert not isprime(3474749660383)
assert not isprime(341550071728321)
assert not isprime(3825123056546413051)
# passes the base set [2, 3, 7, 61, 24251]
assert not isprime(9188353522314541)
# large examples
assert not isprime(877777777777777777777777)
# conjectured psi_12 given at http://mathworld.wolfram.com/StrongPseudoprime.html
assert not isprime(318665857834031151167461)
# conjectured psi_17 given at http://mathworld.wolfram.com/StrongPseudoprime.html
assert not isprime(564132928021909221014087501701)
# Arnault's 1993 number; a factor of it is
# 400958216639499605418306452084546853005188166041132508774506\
# 204738003217070119624271622319159721973358216316508535816696\
# 9145233813917169287527980445796800452592031836601
assert not isprime(int('''
803837457453639491257079614341942108138837688287558145837488917522297\
427376533365218650233616396004545791504202360320876656996676098728404\
396540823292873879185086916685732826776177102938969773947016708230428\
687109997439976544144845341155872450633409279022275296229414984230688\
1685404326457534018329786111298960644845216191652872597534901'''))
# Arnault's 1995 number; can be factored as
# p1*(313*(p1 - 1) + 1)*(353*(p1 - 1) + 1) where p1 is
# 296744956686855105501541746429053327307719917998530433509950\
# 755312768387531717701995942385964281211880336647542183455624\
# 93168782883
assert not isprime(int('''
288714823805077121267142959713039399197760945927972270092651602419743\
230379915273311632898314463922594197780311092934965557841894944174093\
380561511397999942154241693397290542371100275104208013496673175515285\
922696291677532547504444585610194940420003990443211677661994962953925\
045269871932907037356403227370127845389912612030924484149472897688540\
6024976768122077071687938121709811322297802059565867'''))
sieve.extend(3000)
assert isprime(2819)
assert not isprime(2931)
assert not isprime(2.0)
def test_is_square():
assert [i for i in range(25) if is_square(i)] == [0, 1, 4, 9, 16]
# issue #17044
assert not is_square(60 ** 3)
assert not is_square(60 ** 5)
assert not is_square(84 ** 7)
assert not is_square(105 ** 9)
assert not is_square(120 ** 3)
def test_is_gaussianprime():
assert is_gaussian_prime(7*I)
assert is_gaussian_prime(7)
assert is_gaussian_prime(2 + 3*I)
assert not is_gaussian_prime(2 + 2*I)
| true |
8352b60419772c1c0b83a0e68eb4d730b0213b01 | Python | hellJane/Python_DataAnalysis | /Optimization/curve_fit.py | UTF-8 | 1,395 | 3.234375 | 3 | [] | no_license | import numpy as np
import scipy.linalg as sl
import scipy.optimize as so
import sklearn.linear_model as slm
'''
้ช่ฏscipy.optimize.curve_fit()่ฟไธชๆนๆณ๏ผไธsklearnๅๅ
ฌๅผ็ธๆฏ่พ๏ผ็่ฎบไธ่ฆๆฏไธๆ ท็
'''
def randRange(vmin, vmax, size=20):
return vmin + (vmax - vmin) * np.random.rand(20)
def func(x, w0, w1):
'''
curve_fit()ไธญไฝฟ็จ็ๅฝๆฐf(*)ๅฎไนๅพไธฅๆ ผ:
1. ็ญพๅ้้ข็ฌฌไธไธชๅๆฐxๅฏนๅบไบxdata
2. ไฝไธ็ๆฏไธไธชๅๆฐ้ฝๆฏๅพ
ไผฐ่ฎก็ๅๆฐ๏ผๅนถไธๅไธบๆ ้
Assume๏ผydata = f(xdata, *params) + eps
'''
return w0 + w1 * x
# generate data
w_true = [1, 1] # ็ๅฎ็ๆ้ๅ้
x = randRange(-4, 4, 20)
y = w_true[0] + w_true[1] * x + 0.8 * np.random.randn(20) # N(0, 0.8**2)็ๅชๅฃฐ
popt, pcov = so.curve_fit(func, x, y) # pcov ๆฏ ไธไธชๅๆนๅทฎ็ฉ้ต๏ผๆๆถไธๆๅฐๅบๆฏๅนฒๅ็
print('w by scipy.optimize.curve_fit(): ', popt)
reg = slm.LinearRegression()
reg.fit(x.reshape(-1, 1), y)
print('w by sklean.linear_model.LinearRegression:', [ reg.intercept_, reg.coef_]) # ็ณปๆฐๅๆช่ทๆฏๅๅผ็
# ็จๅ
ฌๅผ่ฎก็ฎ
dm = np.c_[np.ones(len(x)).reshape(-1, 1), x.reshape(-1, 1)] # design matrix
y_vector = y.reshape(-1, 1)
w_MLE = np.dot(sl.inv(np.dot(dm.T, dm)), np.dot(dm.T, y_vector))
print('w calculated by equation: ', w_MLE.ravel())
| true |
233485fb977254c86bece1e2b3c19c1aefe23203 | Python | minkpang/Damwha | /sub1/์ ์คํ/analyze.py | UTF-8 | 3,629 | 3.3125 | 3 | [] | no_license | from parse import load_dataframes
import pandas as pd
import shutil
def sort_stores_by_score(dataframes, n=20, min_reviews=30):
"""
Req. 1-2-1 ๊ฐ ์์์ ์ ํ๊ท ํ์ ์ ๊ณ์ฐํ์ฌ ๋์ ํ์ ์ ์์์ ์์ผ๋ก `n`๊ฐ์ ์์์ ์ ์ ๋ ฌํ์ฌ ๋ฆฌํดํฉ๋๋ค
Req. 1-2-2 ๋ฆฌ๋ทฐ ๊ฐ์๊ฐ `min_reviews` ๋ฏธ๋ง์ธ ์์์ ์ ์ ์ธํฉ๋๋ค.
"""
#stroes_reviews ๋ store ์ ๋ณด์ reviews ์ ๋ณด๋ฅผ ํฉ์น ํ
์ด๋ธ
stores_reviews = pd.merge(
dataframes["stores"], dataframes["reviews"], left_on="id", right_on="store"
)
# ํฉ์น ํ
์ด๋ธ์ store ์ store_name ์ด ๊ฐ์ ๊ฒ (๊ฐ์ ์๋น) ๋ผ๋ฆฌ ๋ฌถ๋๋ค.
# ์ด๋ ๋ฆฌ๋ทฐ ๊ฐ์๋ฅผ dataFrame์ counts ๋ผ๋ ์ด์ ์ถ๊ฐํ์ฌ ์ ์ฅํ๋ค.
stores_reviews["counts"] = stores_reviews.groupby(["store", "store_name"])["score"].transform('count')
scores_group = stores_reviews.groupby(["store", "store_name"])
# ๋ฌถ์ ๊ฒ๋ค ์ค score๋ฅผ ํ๊ท ๊ณ์ฐํ๋ค.
scores = scores_group.mean()
# ์ฌ๊ธฐ์ counts ๊ฐ min_reviews ๋ณด๋ค ์์ ๊ฒฝ์ฐ ํด๋น ํ ์ญ์ ํ๋ค.
scores = scores[scores['counts'] >= min_reviews]
# ํ๊ท ์ ๊ณ์ฐํ ๊ฒ๋ค์ ๋ด๋ฆผ์ฐจ์ ์ ๋ ฌํ๋ค.
score_sorted = scores.sort_values(by="score", ascending=False)
return score_sorted.head(n=n).reset_index()
def get_most_reviewed_stores(dataframes, n=20):
"""
Req. 1-2-3 ๊ฐ์ฅ ๋ง์ ๋ฆฌ๋ทฐ๋ฅผ ๋ฐ์ `n`๊ฐ์ ์์์ ์ ์ ๋ ฌํ์ฌ ๋ฆฌํดํฉ๋๋ค
"""
stores_reviews = pd.merge(
dataframes["stores"], dataframes["reviews"], left_on="id", right_on="store"
)
scores_group = stores_reviews.groupby(["store", "store_name"])
top_reviews = scores_group.count()
reviews_sorted = top_reviews.sort_values(by=["score"], ascending=False)
return reviews_sorted.head(n=n).reset_index()
def get_most_active_users(dataframes, n=20):
"""
Req. 1-2-4 ๊ฐ์ฅ ๋ง์ ๋ฆฌ๋ทฐ๋ฅผ ์์ฑํ `n`๋ช
์ ์ ์ ๋ฅผ ์ ๋ ฌํ์ฌ ๋ฆฌํดํฉ๋๋ค.
"""
stores_reviews = pd.merge(
dataframes["stores"], dataframes["reviews"], left_on="id", right_on="store"
)
scores_group = stores_reviews.groupby(["user"])
top_reviewer = scores_group.count()
reviewer_sorted = top_reviewer.sort_values(by=["score"], ascending=False)
return reviewer_sorted.head(n=n).reset_index()
def main():
data = load_dataframes()
term_w = shutil.get_terminal_size()[0] - 1
separater = "-" * term_w
stores_most_scored = sort_stores_by_score(data)
print("[์ต๊ณ ํ์ ์์์ ]")
print(f"{separater}\n")
for i, store in stores_most_scored.iterrows():
print(
"{rank}์: {store}({score}์ )".format(
rank=i + 1, store=store.store_name, score=store.score
)
)
print(f"\n{separater}\n\n")
stores_most_reviewed = get_most_active_users(data)
print("[์ต๋ค ๋ฆฌ๋ทฐ ์์์ ]")
print(f"{separater}\n")
for i, store in stores_most_reviewed.iterrows():
print(
"{rank}์: {store}({score}์ )".format(
rank=i + 1, store=store.store_name, score=store.score
)
)
print(f"\n{separater}\n\n")
stores_most_reviewer = get_most_active_users(data)
print("[์ต๋ค ๋ฆฌ๋ทฐ ์์ฑ์]")
print(f"{separater}\n")
for i, store in stores_most_reviewed.iterrows():
print(
"{rank}์: {user}({score}๊ฐ)".format(
rank=i + 1, user=store.user, score=store.score
)
)
print(f"\n{separater}\n\n")
if __name__ == "__main__":
main()
| true |
625c9b9fe150937e19ddcaa04a37e64515469604 | Python | zzzdeb/dotfiles | /scripts/tools/pdfsep.py | UTF-8 | 1,899 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
import copy
import sys
import math
import PyPDF2
def split_pages(src, dst):
m = {'l':10, 'u':10, 'r':20, 'd':15}
src_f = open(src, 'r+b')
dst_f = open(dst, 'w+b')
input = PyPDF2.PdfFileReader(src_f)
output = PyPDF2.PdfFileWriter()
current = 1
for i in range(input.getNumPages()):
p = input.getPage(i)
q = copy.copy(p)
q.mediaBox = copy.copy(p.mediaBox)
x1, x2 = p.mediaBox.lowerLeft
x3, x4 = p.mediaBox.upperRight
#print('{} {} {} {}'.format(x1, x2, x3, x4))
x1, x2 = math.floor(x1), math.floor(x2)
x3, x4 = math.floor(x3), math.floor(x4)
x5, x6 = math.floor(x3/2), math.floor(x4/2)
if x3 > x4:
# horizontal
p.mediaBox.upperRight = (x5-m['d'], x4-m['r'])
p.mediaBox.lowerLeft = (x1+m['u'], x2+m['l'])
q.mediaBox.upperRight = (x3-m['d'], x4-m['u'])
q.mediaBox.lowerLeft = (x5+m['u'], x2+m['l'])
else:
# vertical
p.mediaBox.upperRight = (x3-m['d'], x4-m['r'])
p.mediaBox.lowerLeft = (x1+m['u'], x6+m['l'])
#rint('p {} {}'.format(p.mediaBox.upperRight,
# p.mediaBox.lowerLeft))
q.mediaBox.upperRight = (x3-m['d'], x6-m['r'])
q.mediaBox.lowerLeft = (x1+m['u'], x2+m['l'])
#rint('q {} {}'.format(q.mediaBox.upperRight, q.mediaBox.lowerLeft))
if current:
output.insertPage(p, index=i)
output.insertPage(q, index=i+1)
else:
output.insertPage(q, index=i)
output.insertPage(p, index=i+1)
current += 1
current %= 2
output.write(dst_f)
# output1.write(dst_f)
src_f.close()
dst_f.close()
input_file = sys.argv[1]
output_file = sys.argv[2]
split_pages(input_file, output_file)
| true |
350fd9ea0312ae5d361c5c7f6522379d1bf602e0 | Python | raspibo/Thermo | /var/www/cgi-bin/mhl.py | UTF-8 | 2,636 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
## My HTML Library
#
""" ATTENZIONE: Non tutte le funzioni sono state testate/usate
alcune neanche fatte
Mon 23 Feb 2015 17:28:26 CET - Qualcosa e` stato fatto
"""
"""
Aggiornamenti: Sat 19 Mar 2016 08:31:19 AM CET
"""
## Blocchi per la costruzione della pagina web
# Html page
def MyHtml():
return "Content-type: text/html\n\n"
def MyHtmlHead():
return ("""
<html>
<head>
<title>My HTML Library</title>
<meta name="GENERATOR" content="Midnight Commander (mcedit)">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="Keywords" content="mydynamicpage">
<meta name="Author" content="Davide">
</head>
<body>
""")
# Qui nel mezzo il codice html
def MyHtmlBottom():
return ("""
</body>
</html>
""")
# Fine blocchi html
## Forms # Non tutte fatte
def MyActionForm(Action,Post):
return("<form action=\""+Action+"\" method=\""+Post+"\">")
def MyTextForm(Name,Value,Size,Required,Readonly):
return("<input type=\"text\" name=\""+Name+"\" value=\""+Value+"\" size=\""+Size+"\" "+Required+" "+Readonly+">")
def MyMailForm(Name,Value,Size,Required,Readonly):
return("<input type=\"email\" name=\""+Name+"\" value=\""+Value+"\" size=\""+Size+"\" "+Required+" "+Readonly+">")
def MyTextAreaForm(Name,Value,Cols,Rows,Required,Readonly):
return("<textarea name=\""+Name+"\" value=\""+Value+"\" cols=\""+Cols+"\" rows=\""+Rows+"\" "+Required+" "+Readonly+">")
def MyNumberForm(Name,Value,Size,Maxlenght,Min,Max,Step,Required,Readonly):
return("<input type=\"number\" name=\""+Name+"\" value=\""+Value+"\" size=\""+Size+"\" maxlenght=\""+Maxlenght+"\" min=\""+Min+"\" max=\""+Max+"\" step=\""+Step+"\" "+Required+" "+Readonly+">")
def MyCheckboxForm(Name,Value,Checked=""):
return("<input type=\"checkbox\" name=\""+Name+"\" value=\""+Value+"\" "+Checked+">")
def MyRadioButton(Name,Value,Checked):
return("<input type=\"radio\" name=\""+Name+"\" value=\""+Value+"\" "+Checked+">")
def MyDropDown(Name,Values,SelectedValue): # SelectedValue deve contenere uno dei Values
Select="<select name=\""+Name+"\">"
for i in Values:
if i == SelectedValue:
Selected="selected"
else:
Selected=""
Option="<option value=\""+i+"\" "+Selected+">"+i+"</option>"
Select=Select+Option
Select=Select+"</select>"
return(Select)
def MyPasswordForm(Type,Name,Required):
return("<input type=\""+Type+"\" name=\""+Name+"\" "+Required+">")
def MyButtonForm(Type,Value):
return("<input type=\""+Type+"\" value=\""+Value+"\">")
def MyEndForm():
return("</form>")
| true |
6ca405462af7a356397c9ef911dde900fbe21705 | Python | lozdan/oj | /MOG/28.py | UTF-8 | 401 | 3.109375 | 3 | [] | no_license | # author: Daniel Lozano
# source: MatcomOnlineGrader (MOG) ( http://matcomgrader.com )
# problem name: Snail
# problem url: http://matcomgrader.com/problem/28/snail/
# date: 6/2/2017
def number_of_days(A, B, V):
result = (V - B) // (A - B)
if (V - B) % (A - B) != 0:
return result + 1
return result
arr = [int(i) for i in input().split()]
print(number_of_days(arr[0], arr[1], arr[2])) | true |
57ed04dd0df5800c724911850d80261e5fd1398d | Python | szymonsadowski3/Discord-Evaluate-Message-Positivity-Bot | /bot_extended.py | UTF-8 | 4,331 | 2.78125 | 3 | [] | no_license | import discord
from discord.ext import commands
import random
from textblob import TextBlob
import os
description = 'Bot'
bot_prefix = '?'
bot = commands.Bot(description=description, command_prefix=bot_prefix)
tts_lines = []
class Navy(object):
def read_lines(self, fname):
with open(fname, 'r', encoding='utf8') as f:
return f.readlines()
def __init__(self):
self.navy = self.read_lines('navy.txt')
self.iter_navy = iter(self.navy)
def get_next(self):
try:
next_line = next(self.iter_navy)
return next_line
except StopIteration:
self.iter_navy = iter(self.navy)
return next(self.iter_navy)
def list_files(mypath):
f = []
for (dirpath, dirnames, filenames) in os.walk(mypath):
f.extend(filenames)
break
return f
IMGS_PATH = './img/'
imgs = list_files(IMGS_PATH)
navy = Navy()
def evaluate_msg(msg):
analysis = TextBlob(msg)
to_ret = 'Message Positivity [SCALE -1 TO 1]: '
to_ret += str(analysis.sentiment.polarity)
return to_ret
def get_everything_after_first_space(s):
return s.split(' ', 1)[1]
@bot.event
async def on_ready():
print('Logged in')
print('Name : %s' % bot.user.name)
print('ID : %s' % bot.user.id)
print(discord.__version__)
@bot.command()
async def roll(dice : str):
"""Rolls a dice in NdN format."""
try:
rolls, limit = map(int, dice.split('d'))
except Exception:
await bot.say('Format has to be in NdN!')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await bot.say(result)
@bot.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == bot.user:
return
if message.content.startswith('!eval'):
arg = message.content.split()[1]
msg = await bot.get_message(message.channel, int(arg))
polarity = evaluate_msg(msg.content)
# await bot.send_message(message.channel, str(message.channel))
await bot.send_message(message.channel, polarity)
if message.content.startswith('!roll'):
arg = message.content.split()[1]
try:
rolls, limit = map(int, arg.split('d'))
if rolls>=200:
rolls=200
except Exception:
await bot.send_message(message.channel, 'Format has to be in NdN!')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await bot.send_message(message.channel, result)
if message.content.startswith('!read'):
arg = get_everything_after_first_space(message.content)
await bot.send_message(message.channel, arg, tts=True)
if message.content.startswith('!tts_add'):
arg = get_everything_after_first_space(message.content)
tts_lines.append(arg)
await bot.send_message(message.channel, 'tts_add successful')
if message.content.startswith('!tts_rand'):
await bot.send_message(message.channel, random.choice(tts_lines), tts=True)
if message.content.startswith('!next_navy'):
await bot.send_message(message.channel, navy.get_next(), tts=True)
if message.content.startswith('!nice_person'):
with open(IMGS_PATH + random.choice(imgs), 'rb') as f:
await bot.send_file(message.channel, f)
if message.content.startswith('!guess'):
await bot.send_message(message.channel, 'Guess a number between 1 to 10')
def guess_check(m):
return m.content.isdigit()
guess = await bot.wait_for_message(timeout=5.0, author=message.author, check=guess_check)
answer = random.randint(1, 10)
if guess is None:
fmt = 'Sorry, you took too long. It was {}.'
await bot.send_message(message.channel, fmt.format(answer))
return
if int(guess.content) == answer:
await bot.send_message(message.channel, 'You are right!')
else:
await bot.send_message(message.channel, 'Sorry. It is actually {}.'.format(answer))
bot.run('Mjk3ODMxMzM1MjExMzAyOTEy.C8GixA.mC1sCrokOgiq-ell-N8r45pg6Bg') | true |
6effbac6a61df63927f0140debf4fcedb5470575 | Python | Official12345/year2_ALDS | /Week1/1.1.py | UTF-8 | 440 | 3.625 | 4 | [] | no_license | def mymax(a):
assert len(a) > 0
b = a[0]
for i in a:
assert type(i) == int or type(i) == float
if i > b:
b = i
return(b)
try:
mylist = [1, 2, 12, 3, 5, 7]
print(mymax(mylist))
except:
print("error test 1")
try:
mylist2 = []
print(mymax(mylist2))
except:
print("error test 2")
try:
mylist3 = ['a', 6, "3"]
print(mymax(mylist3))
except:
print("error test 3") | true |
e50431fe5e9fdce50b74d365d4ad3a1056885848 | Python | missvicki/rnn | /gen_gbu.py | UTF-8 | 718 | 3.484375 | 3 | [] | no_license | """Generate sentiment analysis data."""
import numpy as np
VOCAB = ["good", "bad", "uh"]
VOCAB2 = ["one", "two", "three", "four", "five"]
def gen_gbu(nobs=1000):
"""Generate good/bad/uh data."""
data = []
sentiments = []
for _ in range(nobs):
num_positions = np.random.randint(5, 20)
words = np.random.choice(
VOCAB,
num_positions,
p=np.random.dirichlet([0.2, 0.2, 0.2])
)
sentiments.append(np.sum(words == "good") - np.sum(words == "bad"))
data.append(list(words))
return data, sentiments
def main():
"""Test GBU data."""
X, Y = gen_gbu(10)
print(X)
print(Y)
if __name__ == "__main__":
main()
| true |
9f0de95f4d2af12c186c7ad19c0b3e2f91f14d62 | Python | codud0954/megait_python_20201116 | /13_module/quiz01/quiz01.py | UTF-8 | 324 | 3.4375 | 3 | [] | no_license | import random
lotto = list()
while (len(lotto) < 6): # ์ฐธ์ธ ๋์ ๋ฃจํ๊ฐ ๋๋ค.
randNum = random.randrange(1, 46) # 1 ~ 45
# ์ค๋ณต๋ ๋ฒํธ๊ฐ ๋์ค๋ฉด ๋ฐ์ ๋ด์ฉ์ ์ํํ์ง ์๊ณ ์กฐ๊ฑด์ ๋ณธ๋ค.
if randNum in lotto:
continue
lotto.append(randNum)
print(lotto)
| true |
fc23ed0c09c7628f091730ca74b7b253fb7c5e2f | Python | Dviejopomata/tfg | /celery/test.py | UTF-8 | 401 | 2.734375 | 3 | [] | no_license | from tasks import add
from celery import group
# se lanzar 100 tareas
for i in range(1, 100):
result=add.apply_async((i, i), )
print(i, result.get())
# se lanzan 4 tareas dentro de un grupo
numbers = [(2, 2), (4, 4), (8, 8), (16, 16)]
res = group(add.s(i, j) for i, j in numbers).apply_async(queue='priority.high', serializer="json")
# aquรญ tenemos el resultado de las tareas
print(res.get()) | true |
4895f6b9c346c4e9ef5c7de62a44119591de94e9 | Python | ciceropzr/DesafiosPython | /km.py | UTF-8 | 314 | 3.390625 | 3 | [] | no_license | '''
O valor de entrada contรฉm dois valores, em valor inteiro X, representatdo a ditรขncia percorrida (em Km),
e um valor real Y representando o tatal de combustivel gasto, com um digito apรณs a casa decimal.
'''
X = int(input())
Y = int(input())
consumoMedio = X / Y
print(str('%.3f' % consumoMedio) + ' Km/l') | true |
d462a5dafd84cacb2a92a9362a12859e0fdcdc1f | Python | aleksey-masl/hello_world | /even_numbers.py | UTF-8 | 72 | 2.828125 | 3 | [] | no_license | print(list(range(1, 1000, 2)))
for i in range(1, 1000, 2):
print(i) | true |