blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bb3025309cc413d00e6738855d47d2fda5ad3985 | Python | sawich/havok-reflection | /havok_classes/hkaSkeletonPartition.py | UTF-8 | 720 | 2.609375 | 3 | [
"MIT"
] | permissive | import struct
class hkaSkeletonPartition(object):
name: str
startBoneIndex: int
numBones: int
def __init__(self, infile):
self.name = struct.unpack('>s', infile.read(0)) # TYPE_STRINGPTR:TYPE_VOID
self.startBoneIndex = struct.unpack('>h', infile.read(2)) # TYPE_INT16:TYPE_VOID
self.numBones = struct.unpack('>h', infile.read(2)) # TYPE_INT16:TYPE_VOID
def __repr__(self):
return "<{class_name} name=\"{name}\", startBoneIndex={startBoneIndex}, numBones={numBones}>".format(**{
"class_name": self.__class__.__name__,
"name": self.name,
"startBoneIndex": self.startBoneIndex,
"numBones": self.numBones,
})
| true |
f10a2a71955a2bd004bbc0dae18754640cf3399a | Python | KotaCanchela/PythonCrashCourse | /9 Classes/restaurant.py | UTF-8 | 1,228 | 4.75 | 5 | [] | no_license | # Make a class called Restaurant.
# The __init__() method for Restaurant should store two attributes: a restaurant_name and a cuisine_type.
# Make a method called describe_restaurant() that prints these two pieces of information,
# and a method called open_restaurant() that prints a message indicating that the restaurant is open.
# Make an instance called restaurant from your class. Print the two attributes individually, and then call both methods.
class Restaurant:
"""Information is stored about a restaurant."""
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print(f"The restaurant's name is {self.restaurant_name.title()}")
print(f"The type of cuisine at the restaurant is: {self.cuisine_type.title()}")
def open_restaurant(self):
print(f"The restaurant, {self.restaurant_name.title()} is now open.")
restaurant = Restaurant("mirazur", "french gourmet")
print(
f"I am going to {restaurant.restaurant_name.title()} to eat some {restaurant.cuisine_type.title()}"
)
print("")
restaurant.describe_restaurant()
print("")
restaurant.open_restaurant()
| true |
8983bb953c7970c80bd40d93c82b31eecf71b22d | Python | WillemJan/Misc | /harvest.py | UTF-8 | 2,000 | 2.625 | 3 | [] | no_license | #!/usr/bin/python3
import os, subprocess, codecs, hashlib
from urllib import request, error
from urllib.parse import quote_plus
class Harvest():
files = {}
dir_count=0
file_count=0
def __init__(self):
for file in files.read():
if (file.find('"') > -1):
file="'"+file+"'"
self.files[file]="ok"
elif (file.find("'") > -1):
file='"'+file+'"'
self.files[file]="ok"
else:
file="'"+file+"'"
self.files[file]="ok"
if (file.split('/')[-1].find('.') > -1):
self.file_count+=1
else:
p1=subprocess. Popen("/usr/bin/file " +file,shell=True, stdout=subprocess.PIPE)
file_info=(str(p1.communicate()[0].strip()))
if not ( (file_info.rsplit(':')[-1][1:-1]) == "directory") :
print(file,file_info)
self.dir_count+=1
if __name__ == "__main__":
harvest = Harvest()
print (harvest.dir_count)
print (harvest.file_count)
# i+=1
# for key in harvest.known_file_extentions.keys():
# if item[1:-1].endswith(key):
# p1=subprocess. Popen("/usr/bin/file " +item ,shell=True, stdout=subprocess.PIPE)
# curr_file=str(p1.communicate()[0].strip())
# for ftype in harvest.ftype.keys():
# if ( curr_file.lower().find(ftype) > -1 ):
# print (item, harvest.ftype[ftype](item, curr_file))
# j+=1
# break
# else:
# if (item.split('/')[-1].find('.') < 0):
# j+=1
# break
# else:
# print(item)
# if i>10:
# print(i,j)
## break
# for artist in sorted(harvest.artist.keys()):
# print(artist)
# print(harvest.get_wiki_reference(str(artist)))
| true |
77c88ffa6a1ff37a89114ae43b0307bea7d6bae5 | Python | alfonsoamt/Process_Egineering_Simulator | /units.py | UTF-8 | 1,196 | 3.3125 | 3 | [] | no_license | # Here the file contains the class or functions necessary to changue units in variables
import pandas as pd
class UnitConverter:
def __init__(self):
self.Pu = pd.read_csv('./units/Pressure.csv', index_col = 0)
self.Tu = pd.DataFrame({"K":[lambda T: T , lambda T: T + 273.15, lambda T: (T + 459.67) * 5 / 9, lambda T: T * 5 / 9],
"°C":[lambda T: T - 273.15, lambda T: T, lambda T: (T - 32) * 5 /9, lambda T: (T - 491.67) * 5 / 9],
"°F":[lambda T: (T * 9 / 5) -459.67, lambda T: (T * 9 / 5) + 32, lambda T: T, lambda T: T - 459.67],
"R":[lambda T: T * 9 / 5, lambda T: (T + 273.15) * 9 / 5, lambda T: T + 459.67, lambda T: T]},
index = ["K", "°C", "°F", "R"])
self.Vmu = pd.read_csv('./units/MolarVolume.csv', index_col = 0)
def Pressure_Converter(self, P, Punits):
factor = self.Pu.at[P[1], Punits]
return [P[0] * factor, Punits]
def Temperature_Converter(self, T, Tunits):
return [round(self.Tu.at[T[1], Tunits](T[0]), 2), Tunits]
def MolarVolume_Converter(self, Vm, Vmunits):
factor = self.Vmu.at[Vm[1], Vmunits]
return [Vm[0] * factor, Vmunits] | true |
2afa58c01e1070ad2cf74a22f2410f80d766068d | Python | firekind/pytorch-dl-minimal | /{{ cookiecutter.dir_name }}/{{ cookiecutter.project_name }}/utils/__init__.py | UTF-8 | 3,405 | 2.9375 | 3 | [] | no_license | # pylint: disable=protected-access
import os
import glob
from typing import List, Optional
import torch
class CheckpointManager:
def __init__(self, path: str, retain: int) -> None:
"""
Constructor.
Args:
path (str): The path to the checkpoint directory.
retain (int): Specifies the maximum number of checkpoint files that can
be saved.
Returns:
None
"""
self.path = path
self.retain = retain
def make_checkpoint(self, module: torch.nn.Module, optimizer: torch.optim.Optimizer,
epoch: int, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None) -> None:
"""
Makes a checkpoint and saves the necessary details.
Args:
module (torch.nn.Module): The module to be checkpointed.
optimizer (torch.optim.Optimizer): The optimizer.
epoch (int): The current epoch.
scheduler (torch.optim.lr_scheduler._LRScheduler): The scheduler.
Returns:
None
"""
files = self.get_files_in_dir(module.name)
if len(files) >= self.retain:
for f in files[self.retain - 1:]:
os.remove(f) # remove file
checkpoint_name = module.name + "_epoch_" + str(epoch) + ".pth.tar"
save_dict = {
'state_dict': module.state_dict(),
'epoch': epoch,
'optimizer': optimizer.state_dict()
}
if scheduler is not None:
save_dict["scheduler"] = scheduler.state_dict()
torch.save(save_dict, self.path + "/" + checkpoint_name)
def restore(self, module: torch.nn.Module, optimizer: torch.optim.Optimizer,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None) -> int:
"""
Restores the module from the latest checkpoint.
Args:
module (torch.nn.Module): The module to restore.
optimizer (torch.optim.Optimizer): The optimizer.
scheduler (torch.optim.lr_scheduler._LRScheduler): The scheduler.
Returns:
int: The epoch to resume from.
"""
files = self.get_files_in_dir(module.name)
if not files:
return False
checkpoint_file = files[0]
# loading checkpoint data
data = torch.load(checkpoint_file)
# loading data to objects
module.load_state_dict(data['state_dict'])
optimizer.load_state_dict(data['optimizer'])
if scheduler is not None:
scheduler.load_state_dict(data['scheduler'])
return data['epoch']
def get_files_in_dir(self, module_name: str) -> List[str]:
"""
Gets the files of the module in the checkpoint directory and sorts them according to
date modified, in descending order.
Args:
module_name: The name of the module whose files are required.
Returns:
List[str]: The list of file paths.
"""
# getting contents of checkpoint dir
files = glob.glob(self.path + "/" + module_name + "*")
# storing only the files
files = [f for f in files if os.path.isfile(f)]
# sorting the files according to modification time
files.sort(key=os.path.getmtime, reverse=True)
return files
| true |
c8f061d0a557f3b03147282a76e80e117ed6acaf | Python | aconsilvio/SwarmAlgorithms | /Ants/ants.py | UTF-8 | 1,885 | 3.328125 | 3 | [] | no_license | from models import *
import random
Graph = [[0, 1, 0, 0, 1], [1, 0, 1, 1, 0], [0, 1, 0, 0, 1], [0, 1, 0, 0, 1], [1, 0, 1, 1, 0]]
def initialize_nodes(value_list):
nodes = []
for i, val in enumerate(value_list):
node = Node(val, i)
nodes.append(node)
return nodes
def initialize_ants(start_node, num_ants):
ant_list = []
for ant in range(num_ants):
ant = Ant(start_node)
ant_list.append(ant)
return ant_list
def move_step(ant, nodes):
if not ant.is_forward:
move_backward(ant, nodes)
else:
move_forward(ant, nodes)
def move_backward(ant, nodes):
from_node = ant.current_path.pop()
to_node = ant.current_path[-1]
Graph[from_node.index][to_node.index] += 1
def move_forward(ant, nodes):
edge_list = []
current_node = ant.current_path[-1]
edges = Graph[current_node.index]
for i, weight in enumerate(edges):
if weight:
edge_list += [i]*weight
new_position_index = random.choice(edge_list)
new_position = nodes[new_position_index]
ant.current_path.append(new_position)
def generate_random_ants(home_node):
num_ants = random.randint(1, 10)
ants = initialize_ants(home_node, num_ants)
return ants
def check_food(ant):
node_value = ant.current_path[-1].value
if node_value:
ant.is_forward = False
print "got food"
def ant_algorithm(value_list, num_cycles):
nodes = initialize_nodes(value_list)
home_node_position = random.randint(0, len(value_list)-1)
home_node = nodes[home_node_position]
print "home_node",home_node.index
ants = generate_random_ants(home_node)
for i in range(num_cycles):
for ant in ants:
move_step(ant, nodes)
if len(ant.current_path) == 1:
ants.remove(ant)
else:
check_food(ant)
new_ants = generate_random_ants(home_node)
ants += new_ants
print Graph
if __name__ == '__main__':
value_list = [0, 0, 1, 0, 1]
num_cycles = 5
print Graph
ant_algorithm(value_list, num_cycles)
| true |
101e3401c45cd255f1861583dbfa9df361aec8cc | Python | shawsa/complex_fractals | /newton_fractal.py | UTF-8 | 4,119 | 3.375 | 3 | [] | no_license | from numpy import sin,cos, log, exp, sqrt
def f(x):
#return x**4 - 20
return x**8-x**3+2 + sqrt(x)
def df(x):
#return 4* x**3
return 8*x**7-3*x**2 - .5/sqrt(x)
def newtons(z, f, df, max_n=100, epsilon=1e-10):
z0 = z
z1 = z0 - f(z0)/df(z0)
for n in range(max_n):
#print(z0, z1)
if abs(z0-z1) < epsilon:
return (n, z0)
z0, z1 = z0 - f(z0)/df(z0), z0
return (n, z0)
def secant(z, delta, f, max_n=100, epsilon=1e-10):
z0 = z
z1 = z0 + delta
f0, f1 = f(z0), f(z1)
for n in range(max_n):
#print(z0, z1)
if abs(z0-z1) < epsilon:
return (n, z0)
z0, z1 = z0 - (z1-z0)/(f1-f0)*f0, z0
f0, f1 = f(z0), f0
return (n, z1)
from PIL import Image
import sys
from math import pi
#**********************************************************
#My variables
aspect_ratio = 0.5625
res_quality = .25
x_center = 0
y_center = 0
x_window_width = 3
y_window_width = x_window_width * aspect_ratio
#referenced variables
x_min = x_center - x_window_width / 2
x_max = x_center + x_window_width / 2
y_min = y_center + y_window_width / 2
y_max = y_center - y_window_width / 2
x_pixels = int(1920*res_quality)
y_pixels = int(1080*res_quality) #int(x_pixels * (y_max - y_min)/(x_max - x_min))
converge_cutoff = 2**1
iteration_count = 50
iteration_color_start = 0
#colors = [(10,10,10),(255,0,0),(255,200,200)] #reds
colors = [(10,0,0), (255,0,0), (255,255,0), (0,255,0), (0,255,255), (0,0, 255), (255,0,255), (255,200,255)] #rainbow
mandelbrot_color = (0,0,0) #black
def domain(z):
return z #1/(z-1) #1/z + -3/4 #.2542585
#**********************************************************
#this is the range that the colors need to fill
iter_range = iteration_count - iteration_color_start
#this is the number of colors per gradient
gradient_span = iter_range//(len(colors)-1)
#The mapping from iteration count to a color.
color_map = []
for gradient in range(0,len(colors)-1):
for gradient_unit in range(0,gradient_span):
#each component is a weighted average of the gradients.
r = (colors[gradient][0] * (gradient_span - gradient_unit) + colors[gradient+1][0] * gradient_unit)//gradient_span
g = (colors[gradient][1] * (gradient_span - gradient_unit) + colors[gradient+1][1] * gradient_unit)//gradient_span
b = (colors[gradient][2] * (gradient_span - gradient_unit) + colors[gradient+1][2] * gradient_unit)//gradient_span
color_map.append((r,g,b))
color_map.append(colors[-1]) #Last would be first in next gradient iteration, so it must be added now.
color_map.append(mandelbrot_color) #These are determined to be in the set.
#due to rounding there may be fewer colors than iterations. The simplest solution is to redefine the iteration count
iteration_count = len(color_map) - 1
def x_to_re(x):
#turns x pixel into real coordinate
global x_pixels, x_min, x_max
return x_min + x * (x_max - x_min) / (x_pixels-1)
def y_to_im(y):
#turns y pixel into imaginary coordinate
global y_pixels, y_min, y_max
return y_min + y * (y_max - y_min) / (y_pixels-1)
def converge(z1):
try:
z = domain(z1)
except:
print('zero division at ' + str(z1))
return 0
cnt = 0
return secant(z, z+.5/x_pixels, f, iteration_count)[0]
#return newtons(z, f, df, iteration_count)[0]
'''
w = z
while abs(w) < converge_cutoff and cnt!=iteration_count:
w = w*w + z
cnt += 1
return cnt
'''
img = Image.new('RGB', (x_pixels,y_pixels)) #new black image
pixels = img.load() # create the pixel map
print('\nCalculating...')
for x in range(img.size[0]):
print( "{:.0f}".format(100*x//x_pixels) + '%', end='\r')
for y in range(img.size[1]): # for every pixel:
z = complex(x_to_re(x), y_to_im(y))
n = converge(z)
pixels[x,y] = color_map[n] # set the colour accordingly
print('100%')
img.show()
if len(sys.argv) > 1:
file = sys.argv[1]
else:
file = input('To save the image, enter a file name. Otherwise leave blank and press enter.\n')
if file != '' :
img.save('pics//' +file + '.jpg', 'JPEG')
print('Saved as ' + 'pics//' +file + '.jpg\n')
else:
print('Did not save the output.\n')
| true |
2ee4d63f7234ccac2dd65e971885e545597b097d | Python | Ayoob7/google-research | /correlation_clustering/utils.py | UTF-8 | 3,954 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for the fair correlation clustering algorithm.
"""
import collections
import math
import numpy as np
import sklearn.metrics
def BooleanVectorsFromGraph(graph):
"""Create a boolean encoding for the nodes in the graph.
Starting from the graph, it creates a set of boolean vectors where u,v,
has an entry 1 for each positive edge (0 for negative edge). Selfloops
are assumed positive.
Args:
graph: graph in nx.Graph format.
Returns:
the nxn bolean matrix with the encoding.
"""
n = graph.number_of_nodes()
vectors = np.identity(n)
for u, v, d in graph.edges(data=True):
if d['weight'] > 0:
vectors[u][v] = 1
vectors[v][u] = 1
return vectors
def PairwiseFairletCosts(graph):
"""Create a matrix with the fairlet cost.
Args:
graph: graph in nx.Graph format.
Returns:
the nxn matrix with the fairlet cost for each pair of nodes.
"""
assert max(list(graph.nodes())) == graph.number_of_nodes() - 1
assert min(list(graph.nodes())) == 0
bool_vectors = BooleanVectorsFromGraph(graph)
distance_matrix = sklearn.metrics.pairwise_distances(
bool_vectors, metric='l1')
# This counts twice the negative edge inside each u,v fairlet, so we deduct
# one for each such pair.
for u, v, d in graph.edges(data=True):
if d['weight'] < 0:
distance_matrix[u][v] -= 1
distance_matrix[v][u] -= 1
return distance_matrix
def ClusterIdMap(solution):
"""Create a map from node to cluster id.
Args:
solution: list of clusters.
Returns:
the map from node id to cluster id.
"""
clust_assignment = {}
for i, clust in enumerate(solution):
for elem in clust:
clust_assignment[elem] = i
return clust_assignment
def FractionalColorImbalance(graph, solution, alpha):
"""Evaluates the color imbalance of solution.
Computes the fraction of nodes that are above the threshold for color
representation.
Args:
graph: in nx.Graph format.
solution: list of clusters.
alpha: representation constraint.
Returns:
the fraction of nodes that are above the threshold for color.
"""
total_violation = 0
nodes = 0
for cluster in solution:
color_count = collections.defaultdict(int)
for elem in cluster:
color_count[graph.nodes[elem]['color']] += 1
for count in color_count.values():
imbalance = max(0, count - math.floor(float(len(cluster)) * alpha))
total_violation += imbalance
nodes += len(cluster)
return 1.0 * total_violation / nodes
def CorrelationClusteringError(graph, solution):
"""Evaluates the correlation clustering error of solution.
Computes the fraction of edges that are misclassified by the algorithm.
Args:
graph: in nx.Graph format.
solution: list of clusters.
Returns:
the fraction of edges that are incorrectly classified.
"""
clust_assignment = ClusterIdMap(solution)
errors = 0
corrects = 0
for u, v, d in graph.edges(data=True):
if (d['weight'] > 0 and clust_assignment[u] != clust_assignment[v]) or \
(d['weight'] < 0 and clust_assignment[u] == clust_assignment[v]):
errors += 1
elif (d['weight'] > 0 and clust_assignment[u] == clust_assignment[v]) or \
(d['weight'] < 0 and clust_assignment[u] != clust_assignment[v]):
corrects += 1
return float(errors) / (errors + corrects)
| true |
de11dba247d803e854f29d21a1e2e7bdba4a8aae | Python | the-only-ahmed/TaskMaster | /colors.py | UTF-8 | 1,007 | 2.75 | 3 | [] | no_license | import logger
class Scolors():
@classmethod
def getColor(x, s):
if type(s) is Scolors:
return s
elif s == "pink":
return Scolors.PINK;
elif s == "blue":
return Scolors.BLUE
elif s == "cyan":
return Scolors.CYAN
elif s == "green":
return Scolors.GREEN
elif s == "purple":
return Scolors.PURPLE
elif s == "red":
return Scolors.RED
elif s == "yellow":
return Scolors.YELLOW
elif s == "magenta":
return Scolors.MAGENTA
elif s == "grey":
return Scolors.GREY
elif s == "bold":
return Scolors.BOLD
elif s == "underline":
return Scolors.UNDERLINE
elif s == "endc":
return Scolors.ENDC
else:
print Scolors.RED + "Wrong Color" + Scolors.ENDC
logger.log("wrong color")
return Scolors.ENDC
PINK = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[36m'
GREEN = '\033[92m'
PURPLE = '\033[35m'
RED = '\033[91m'
YELLOW = '\033[93m'
MAGENTA = '\033[95m'
GREY = '\033[90m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ENDC = '\033[0m'
| true |
6303f4b37b1831b8a19a85320e999278f0214a0b | Python | data-intellisense/machine-learning-mini-projects | /plotly-dash-multiDownloads/multiDownloads2.py | UTF-8 | 4,852 | 2.609375 | 3 | [] | no_license | #%% see requirements.txt for library versions
import base64
import io
import os
import re
from datetime import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from dash.dependencies import Input, Output, State
from dash_extensions import Download
from dash_extensions.snippets import send_data_frame
app = dash.Dash(
__name__,
suppress_callback_exceptions=False,
external_stylesheets=["https://codepen.io/chriddyp/pen/bWLwgP.css"],
)
server = app.server
def parseContents(contents, filename):
_, content_string = contents.split(",") # content_type
decoded = base64.b64decode(content_string)
try:
if "csv" in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")))
elif "xls" in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded), engine="openpyxl")
except Exception as e:
print(e)
return "There was an error processing this file."
return df
#%% main app layout
app.layout = html.Div(
[
html.H3(
children="Mutliple Download Example",
style={
"text-align": "center",
"font-family": "Arial Black",
"color": "#ee283c",
},
),
html.Hr(),
# upload
html.Div(
className="row",
children=[
dcc.Upload(
id="upload-techSheets",
className="three columns",
children=html.Div(
[
"Upload ",
html.B(
"Multiple TechSheet",
style={
"text-align": "center",
"font-size": 14,
"font-family": "Arial Black",
"color": "#ee283c",
},
),
" Data",
],
style={
"text-transform": "uppercase",
"font-size": 14,
},
),
style={
"width": "300px",
"height": "38px",
"lineHeight": "38px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "10px",
"display": "inline-block",
"verticalAlign": "center",
},
multiple=True,
),
html.Button(
id="confirm-download",
children="download all",
n_clicks=None,
style={
"margin": "10px",
"width": "300px",
"text-align": "center",
"font-size": 14,
"font-family": "Arial Black",
},
),
],
),
# upload and download up to 5 files, you may set the upper limit to any number
# you want, ideally more than what you need. It should not affect the performance
# significantly
html.Div([Download(id=f"download-df-{i}") for i in range(5)]),
],
style={"display": "inline-block"},
)
@app.callback(
[Output(f"download-df-{i}", "data") for i in range(5)],
Input("confirm-download", "n_clicks"),
[
State("upload-techSheets", "filename"),
State("upload-techSheets", "contents"),
],
)
def update_uploads(
n_clicks,
techSheetFilename,
techSheetContents,
):
if n_clicks is not None:
if techSheetFilename is not None:
list_of_df = [
parseContents(c, n)
for c, n in zip(techSheetContents, techSheetFilename)
]
"""
Do something cool with df
"""
outputDF = [
send_data_frame(
df.to_csv,
filename=df_name,
)
for df, df_name in zip(list_of_df, techSheetFilename)
] + [None for i in range(5 - len(techSheetFilename))]
return outputDF
else:
return [None for i in range(5)]
else:
return [None for i in range(5)]
if __name__ == "__main__":
app.run_server()
| true |
d4006bc91ca396e5892411cde45c6e843d1feea2 | Python | KieranMcCool/MusicalEffectModelling-L4Project | /Experiments/NeuralNetwork/genrate.py | UTF-8 | 3,272 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
from sys import argv
from scipy.signal import chirp, sawtooth, square
from scipy.io import wavfile as wf
from random import randrange, uniform
low = 80
high = 1200
duration = 10
sampleRate = 44100
def sliceAudio(input, destination, index):
# Insert note slice into output at random location,
# ensure input fits into destination from index.
destination[index:index+len(input)] += input
return destination
def sinewave(f, duration=duration):
# single note
return (np.sin(2 * np.pi *
np.arange(sampleRate*duration)
* f/sampleRate))
def sweep(low=low, high=high, duration=duration):
# useful for sweeping frequency ranges of instruments
space = np.linspace(0, 10, duration * sampleRate)
return chirp(space, low, 10, high)
def sawtooth(f, duration=duration):
# Generates clicks at low f values
space = np.linspace(0, 10, duration * sampleRate)
return square(2 * np.pi * f * space)
def noteChanges():
# Complexity (Number of notes):
# 40% chance of 5 to 50, 40% chance of 1 to 10, 10% chance of 10 to 100,
# 10% chance of 100 to 150
complexity = randrange(0, 100)
notes = (randrange(5, 50) if complexity < 40
else randrange(1, 10) if complexity < 80
else randrange(10, 100) if complexity < 90
else randrange(100, 150))
output = np.zeros(duration*sampleRate)
for n in range(notes):
# Note Duration:
# 30% chance of 1 second, 30% chance of 1 to 3 seconds,
# 20% chance of 0 to 1 second, 20% chance of 0 to 0.5 seconds
noteDuration = randrange(0, 100)
d = (1 if noteDuration < 30
else randrange(1, 3) if noteDuration < 60
else uniform(0, 1) if noteDuration < 80
else uniform(0, 0.5))
# Frequency Choices
f1 = low
f2 = high # only required for sweep
# Note Type:
# 40% chance of sine, 40% chance of a sawtooth,
# 20% chance of a chirp
noteType = randrange(0, 100)
note = (sinewave(f1, duration = d) if noteType < 40
else sawtooth(f1, duration = d) if noteType < 80
else sweep(low = f1, high= f2, duration = d))
# Append to output
sliceAudio(note, output, randrange(0, len(output) - len(note)))
return output
def output(filename, data):
wf.write(filename, sampleRate, data)
def generate(files, sections):
for f in range(files):
sectionDuration = duration * sampleRate
data = np.zeros(sectionDuration * sections)
for s in range(sections):
startIndex = sectionDuration * s
endIndex = startIndex + sectionDuration
data[startIndex:endIndex] = noteChanges()
output("dataset/%s.wav" % f, data)
def main():
files = 1
sections = 1
try:
if '-n' in argv:
files = int(argv[argv.index('-n') + 1])
if '-s' in argv:
sections = int(argv[argv.index('-s') + 1])
except:
print("Usage: ./main.py [-n number] [-s number]\n",
"\tIf no arguments are given, it generates one random wav file.")
generate(files, sections)
if __name__ == '__main__':
main()
| true |
2250938a263e52643604cba3f70f1e2796635598 | Python | RaulAstudillo06/BOPL | /bopl/aux_software/GPy/kern/src/se.py | UTF-8 | 7,736 | 2.640625 | 3 | [
"BSD-3-Clause"
] | permissive | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .kern import Kern
import scipy
from ...core.parameterization import Param
from ...core.parameterization.parameterized import Parameterized
from paramz.caching import Cache_this
from paramz.transformations import Logexp
from .rbf import RBF
class SE(Kern):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='se'):
super(SE, self).__init__(input_dim, active_dims, name)
self.ARD = ARD
if not ARD:
if lengthscale is None:
lengthscale = np.ones(1)
else:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1, "Only 1 lengthscale needed for non-ARD kernel"
else:
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size in [1, input_dim], "Bad number of lengthscales"
if lengthscale.size != input_dim:
lengthscale = np.ones(input_dim)*lengthscale
else:
lengthscale = np.ones(self.input_dim)
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.variance = Param('variance', variance, Logexp())
assert self.variance.size==1
self.link_parameters(self.variance, self.lengthscale)
def _to_dict(self):
input_dict = super(SE, self)._to_dict()
input_dict["variance"] = self.variance.values.tolist()
input_dict["lengthscale"] = self.lengthscale.values.tolist()
input_dict["ARD"] = self.ARD
return input_dict
def K(self, X, X2=None):
"""
Compute the kernel function.
.. math::
K_{ij} = k(X_i, X_j)
:param X: the first set of inputs to the kernel
:param X2: (optional) the second set of arguments to the kernel. If X2
is None, this is passed throgh to the 'part' object, which
handLes this as X2 == X.
"""
if X2 is None:
val = scipy.spatial.distance.squareform(self.variance*np.exp(-0.5*self._scaled_squared_dist(X)),checks=False)
np.fill_diagonal(val,self.variance)
else:
val = self.variance*np.exp(-0.5*self._scaled_squared_dist(X, X2))
return val
def _unscaled_squared_dist(self, X, X2=None):
"""
Compute the Euclidean distance between each row of X and X2, or between
each pair of rows of X if X2 is None.
"""
if X2 is None:
return scipy.spatial.distance.pdist(X, 'sqeuclidean')
else:
return scipy.spatial.distance.cdist(X, X2, 'sqeuclidean')
#@Cache_this(limit=3, ignore_args=())
def _scaled_squared_dist(self, X, X2=None):
"""
Efficiently compute the scaled distance, r.
..math::
r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
Note that if thre is only one lengthscale, l comes outside the sum. In
this case we compute the unscaled distance first (in a separate
function for caching) and divide by lengthscale afterwards
"""
if self.ARD:
if X2 is not None:
X2 = X2/self.lengthscale
return self._unscaled_squared_dist(X/self.lengthscale, X2)
else:
return self._unscaled_squared_dist(X, X2)/(self.lengthscale**2)
def _scaled_squared_norm(self, D):
"""
"""
if self.ARD:
return np.sum(np.square(D/self.lengthscale),axis=2)
else:
return np.sum(np.square(D),axis=2)/(self.lengthscale**2)
def Kdiag(self, X):
"""
The diagonal of the kernel matrix K
.. math::
Kdiag_{i} = k(X_i, X_i)
"""
ret = np.empty(X.shape[0])
ret[:] = self.variance
return ret
def gradients_X2(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X
"""
if X2 is None:
X2 = X
tmp = X[:, None, :] - X2[None, :, :]
if dL_dK is None:
part1 = -self.variance*np.exp(-0.5*self._scaled_squared_norm(tmp))
part2 = tmp/(self.lengthscale**2)
grad = part1[:,:,None]*part2
else:
part1 = -(dL_dK*self.variance)*np.exp(-0.5*self._scaled_squared_norm(tmp))
part2 = tmp/(self.lengthscale**2)
grad = np.sum(part1[:,:,None]*part2,axis=1)
print('test')
print(grad)
print(self.gradients_X2(dL_dK, X, X2))
return grad
def gradients_X(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X
"""
if X2 is None: X2 = X
aux1 = X[:, None, :] - X2[None, :, :]
aux2 = (-self.variance)/(self.lengthscale**2)
if dL_dK is None:
aux3 = np.exp((-0.5)*self._scaled_squared_norm(aux1))
grad = (aux3[:,:,None]*aux1)*aux2
else:
aux3 = np.exp((-0.5)*self._scaled_squared_norm(aux1))*dL_dK
grad = np.sum(aux3[:,:,None]*aux1,axis=1)*aux2
return grad
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def parameters_changed(self):
super(SE,self).parameters_changed()
def update_gradients_diag(self, dL_dKdiag, X):
"""
Given the derivative of the objective with respect to the diagonal of
the covariance matrix, compute the derivative wrt the parameters of
this kernel and stor in the <parameter>.gradient field.
See also update_gradients_full
"""
self.variance.gradient = np.sum(dL_dKdiag)
if not self.ARD:
self.lengthscale.gradient = 0.
else:
self.lengthscale.gradient = np.zeros(self.input_dim)
def update_gradients_full(self, dL_dK, X, X2=None):
#test_kernel = RBF(input_dim = self.input_dim, variance=self.variance, lengthscale=self.lengthscale, ARD=self.ARD)
squared_dist = self._scaled_squared_dist(X, X2)
exp_squared_dist = np.exp(-0.5*squared_dist)
if X2 is None:
tmp = scipy.spatial.distance.squareform(exp_squared_dist, checks=False)
np.fill_diagonal(tmp,1.)
self.variance.gradient = np.sum(tmp*dL_dK)
if self.ARD:
self.lengthscale.gradient = (self.variance*np.sum(self.variance.gradient[:,:,None]*np.square(X[:, None, :] - X2[None, :, :]),axis=1))/(self.lengthscale**3)
else:
self.lengthscale.gradient = (self.variance/self.lengthscale)*np.sum(scipy.spatial.distance.squareform(exp_squared_dist*squared_dist)*dL_dK)
else:
self.variance.gradient = np.sum(exp_squared_dist*dL_dK)
if self.ARD:
self.lengthscale.gradient = (self.variance*np.sum(self.variance.gradient[:,:,None]*np.square(X[:, None, :] - X2[None, :, :]),axis=1))/(self.lengthscale**3)
else:
self.lengthscale.gradient = (self.variance/self.lengthscale)*np.sum((exp_squared_dist*squared_dist)*dL_dK)
#test_kernel.update_gradients_full(dL_dK,X,X2)
#print('test1')
#print(test_kernel.variance.gradient)
#print(test_kernel.lengthscale.gradient)
#print('test2')
#print(test_kernel.variance.gradient)
#print(test_kernel.lengthscale.gradient)
#print(np.abs(grad-test_grad) > 1e-8)
| true |
4102cfda64dbe77cadaa5c717dea4660d5811ed9 | Python | mikael-jorda/PandaApplications | /00-1dof_passivity/data_logging/plotter.py | UTF-8 | 1,307 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
# read text files and plot them
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
# data file to read given as argument
if len(sys.argv) < 2:
print("Give the name of the file to read as an argument\n")
exit()
file = np.loadtxt(sys.argv[1] ,skiprows=1)
desired_force = file[1::, 0]
sensed_force = file[1::, 1]
Rc = file[1::, 2]
forward_PO = file[1::, 3]
backward_PO = file[1::, 4]
E_correction_forward = file[1::, 5]
E_correction_backward = file[1::, 6]
vc = file[1::, 7]
alpha = file[1::, 8]
# window = range(3000)
window = range(len(Rc))
time = np.array(range(len(Rc[window])))/1000.0
plt.figure(1)
plt.plot(time, sensed_force[window], label='Fs')
plt.plot(time, desired_force[window], 'r--', label='Fd')
plt.title("desired vs sensed force")
plt.legend()
plt.figure(10)
plt.plot(time, vc)
plt.title("vc")
plt.figure(2)
plt.plot(time, Rc)
plt.title("Rc")
# plt.figure(12)
# plt.plot(time, alpha)
# plt.title("alpha")
plt.figure(3)
plt.plot(time,backward_PO)
plt.title("backward PO")
plt.figure(4)
plt.plot(time,E_correction_backward)
plt.title("E correction backward")
# plt.figure(5)
# plt.plot(time,forward_PO)
# plt.title("forward PO")
# plt.figure(6)
# plt.plot(time,E_correction_forward)
# plt.title("E correction forward")
plt.show()
| true |
d2f6a787eee600da54d7ae9838f2e931b2d5bda3 | Python | nanducode/Model-Parameter-Optimization | /graph_tracers.py | UTF-8 | 905 | 2.875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os import getcwd, listdir
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# create a color palette
palette = plt.get_cmap('Set1')
tracers = {}
path = getcwd() + "/data"
for run in listdir(path):
kh = run.split("_")[0]
if run.startswith("1000") or run.startswith("10000") or run.startswith("30000"):
if run.endswith("_10.csv"):
tracers[kh] = pd.read_csv(path + "/" + run).iloc[:,0]
# multiple line plot
num=0
for kh, tracer in tracers.items():
num+=1
# Find the right spot on the plot
plt.subplot(3,1, num)
# plot every groups, but discreet
for k, tracr in tracers.items():
plt.plot(tracr, marker='', color='grey', linewidth=0.6, alpha=0.3)
# Plot the lineplot
plt.plot(tracer, marker='', color=palette(num), linewidth=2.4, alpha=0.9)
plt.show()
| true |
427726f64b374070c47ee6d99d84bf2ee7587941 | Python | ironboy98/ironboy | /hb2.py | UTF-8 | 131 | 2.6875 | 3 | [] | no_license | hb,t=map(str,input().split())
for g in hb:
f=hb.count(g)
for s in t:
l=t.count(s)
if f==l:
print('yes')
else :
print('no')
| true |
9c67ed3e70a33ef6765a7312c4fa5f79746b50c9 | Python | AkshatRastogi-1nC0re/app | /model/main.py | UTF-8 | 2,209 | 2.609375 | 3 | [] | no_license | import cv2
im = cv2.imread("test_img.jpg")
x = str(im)
from selenium import webdriver
import win32com.client as comctl
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
import time
import clipboard
def encrypt(public_message, private_message):
wsh = comctl.Dispatch("WScript.Shell")
# ChromeDriverManager().install()
opts = webdriver.ChromeOptions()
opts.headless = False
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=opts)
driver.get('https://neatnik.net/steganographr/')
public = driver.find_element_by_xpath('//*[@id="public"]')
public.send_keys(public_message)
private = driver.find_element_by_xpath('//*[@id="private"]')
private.send_keys(private_message)
time.sleep(1)
drive=driver.find_element_by_xpath("/html/body/main/form[1]/fieldset/p[4]/button")
drive.click()
time.sleep(2)
elem = driver.find_element_by_xpath('/html/body/main/section/textarea')
elem.send_keys("p")
elem.send_keys(Keys.CONTROL, 'a') #highlight all in box
elem.send_keys(Keys.CONTROL, 'c') #copy
print("Text has been copied to clipboard")
driver.close()
def decrypt(message):
wsh = comctl.Dispatch("WScript.Shell")
# ChromeDriverManager().install()
opts = webdriver.ChromeOptions()
opts.headless = False
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=opts)
driver.get('https://neatnik.net/steganographr/')
public = driver.find_element_by_xpath('//*[@id="encoded"]')
public.send_keys(message)
# private = driver.find_element_by_xpath('//*[@id="private"]')
# private.send_keys(private_message)
time.sleep(1)
drive=driver.find_element_by_xpath("/html/body/main/form[2]/fieldset/p[3]/button")
drive.click()
time.sleep(2)
elem = driver.find_element_by_xpath('/html/body/main/section/textarea')
elem.send_keys("p")
elem.send_keys(Keys.CONTROL, 'a') #highlight all in box
elem.send_keys(Keys.CONTROL, 'c') #copy
print("Text has been copied to clipboard")
driver.close()
encrypt("Hi",x)
text = clipboard.paste()
print(text)
# decrypt(text)
| true |
917bc6041b15f93e55dab419d5a074fe71adeca2 | Python | jashidsany/Learning-Python | /Codecademy Lesson 6 Strings/LA6.10_Strings_And_Conditionals_2.py | UTF-8 | 1,196 | 4.96875 | 5 | [] | no_license | # We can iterate through a string using in
# The syntax for in is: letter in word
# Here, letter in word is a boolean expression that is True if the string letter is in the string word. Here are some examples:
print("e" in "blueberry")
# => True
print("a" in "blueberry")
# => False
# In fact, this method is more powerful than the function you wrote in the last exercise because it not only works with letters, but with entire strings as well.
print("blue" in "blueberry")
# => True
print("blue" in "strawberry")
# => False
# This function checks if the little string is in the big string
def contains(big_string, little_string):
if little_string in big_string:
return True
else:
return False
print(contains('watermelon', 'melon'))
print(contains('watermelon', 'berry'))
def common_letters(string_one, string_two):
common = []
for letter in string_one:
# iterate through the letters in string_one
if (letter in string_two) and not (letter in common):
# if letter is in string_two and not in the list of common append it
common.append(letter)
return common
print(common_letters('manhattan', 'san francisco'))
| true |
7f2f6838b4cceb6b59a8cc4722c7e1e786392278 | Python | CDivyaPrabha/CP1404Practicals | /sample exercises/string_letters.py | UTF-8 | 265 | 4 | 4 | [] | no_license | import string
def count_letters(word):
count = 0
for char in word:
if char.lower() in string.ascii_lowercase:
count += 1
return count
statement = input ("Enter a string: ")
alphabets = count_letters(statement)
print(alphabets)
| true |
42c8e5bffefa170db1d31556f017cfd21a370b8b | Python | Hello-JianPeiLi/PYTHON | /python-高级语法/回忆协程/斐波那契数列迭代器.py | UTF-8 | 1,409 | 3.96875 | 4 | [] | no_license | # -*- coding:utf-8 -*-
# Author: JianPei
# @Time : 2021/07/19 16:39
from collections.abc import Iterable
from collections.abc import Iterator
import time
class Fibs(object):
def __init__(self, num):
self.a = 0
self.b = 1
self.fibs_list = list()
self.current_num = 1
self.num = num
def __iter__(self):
return self
def __next__(self):
if self.num >= self.current_num:
self.a, self.b = self.b, self.a + self.b
ret = self.fibs_list.append(self.a)
self.current_num += 1
return self.a
else:
raise StopIteration
class ClassIterable(object):
def __init__(self):
self.names = list()
self.num = 0
def add(self, name):
self.names.append(name)
def __iter__(self):
return self
def __next__(self):
if self.num >= len(self.names):
raise StopIteration
else:
ret = self.names[self.num]
self.num += 1
return ret
fb = Fibs(10)
ci = ClassIterable()
ci.add("张三")
ci.add("李四")
ci.add("王五")
# for i in ci:
# print(i)
for i in fb:
print(i)
print(isinstance(fb, Iterator))
def fibs(num):
fibs_list = [0, 1]
for i in range(num - 2):
print(i)
fibs_list.append(fibs_list[-2] + fibs_list[-1])
return fibs_list
print(fibs(10))
| true |
2c4d8f04bf3ef545546b9b043fe0457bd99f4f4b | Python | MajidAliKhanQuaid/GoogleCodeJam19Qualifier | /You Can Go/sln.py | UTF-8 | 357 | 3.546875 | 4 | [] | no_license | N = int(input())
for i in range(N):
count = input()
movement = input()
lstOpponent = list(movement)
lstMe = list(movement)
for j in range(len(lstMe)):
if lstMe[j] == 'S':
lstMe[j] = 'E'
else:
lstMe[j] = 'S'
print('Case #{case}: {pattern}'.format(case=i+1, pattern=''.join(lstMe)))
| true |
e0c2d888c13d56ad084931a80b86bda15cb30c38 | Python | chinskiy/symcrypt_lab | /lab1/main.py | UTF-8 | 3,257 | 3.234375 | 3 | [] | no_license | import time
import math
def readfile(name):
file = open(name)
return file.read()
def filtertext_without_spaces(text):
text = text.lower()
newtext = ''
for _ in range(len(text)):
if 1072 <= ord(text[_]) <= 1105:
if ord(text[_]) == 1105:
newtext += chr(1077)
else:
newtext += text[_]
return newtext
def filtertext_with_spaces(oltxt):
oltxt = oltxt.lower()
newtxt = ''
for _ in range(len(oltxt)):
if (1072 <= ord(oltxt[_]) <= 1105) or oltxt[_] == chr(32) or oltxt[_] == chr(10):
if ord(oltxt[_]) == 32 or ord(oltxt[_]) == 10:
if ord(newtxt[- 1]) == 32:
pass
else:
newtxt += ' '
elif ord(oltxt[_]) == 1098:
newtxt += chr(1100)
elif ord(oltxt[_]) == 1105:
newtxt += chr(1077)
else:
newtxt += oltxt[_]
return newtxt
def letter_frequency(text):
freq = dict()
for elem in text:
if elem not in freq:
freq[elem] = 1
else:
freq[elem] += 1
for elem in freq:
freq[elem] /= len(text)
return freq
def letter_appearance(text):
freq = dict()
for elem in text:
if elem not in freq:
freq[elem] = 1
else:
freq[elem] += 1
return freq
def bigram_frequency(text):
freq = dict()
for _ in range(len(text) - 1):
tmp = text[_] + text[_ + 1]
if tmp not in freq:
freq[tmp] = 1
else:
freq[tmp] += 1
for elem in freq:
freq[elem] /= (len(text) - 1)
return freq
def print_letter_frequency(let_freq):
for elem in sorted(let_freq):
print(elem, ' ', round(let_freq[elem], 4))
def print_bigram_frequency(let_freq):
for key, val in sorted(let_freq.items(), key=lambda x: x[1], reverse=True):
print(key, ' ', round(val, 4))
def find_entropy(let_freq, numb):
entr = 0
for elem in let_freq.values():
entr -= elem * math.log(elem, 2)
entr /= numb
print(entr)
def encode_vigenere(text, passwrd):
decrtext = ''
for _ in range(len(text)):
decrtext += chr((((ord(text[_]) + ord(passwrd[_ % len(passwrd)])) % 1072) % 32) + 1072)
return decrtext
def index_of_matching(let_freq):
index, numb = 0, 0
for elem in let_freq:
index += let_freq[elem] * (let_freq[elem] - 1)
numb += let_freq[elem]
index /= (numb * (numb - 1))
print(index)
if __name__ == '__main__':
# 1частина
# text = readfile('bigtext.txt')
# text = filtertext_without_spaces(text)
# print(text)
# let_freq = letter_frequency(text)
# print_letter_frequency(let_freq)
# find_entropy(let_freq, 1)
# bigram_freq = bigram_frequency(text)
# print_bigram_frequency(bigram_freq)
# find_entropy(bigram_freq, 2)
enctext = readfile('forencode.txt')
enctext = filtertext_without_spaces(enctext)
key = readfile('key.txt')
key = filtertext_without_spaces(key)
dectext = encode_vigenere(enctext, key)
letters = letter_appearance(dectext)
index_of_matching(letters)
#print(dectext) | true |
98d9c4783f9b656d98472f778204da8d75666e89 | Python | Ivanqza/Python | /Python Basics/pythonProject3/task 6.py | UTF-8 | 412 | 3.84375 | 4 | [] | no_license | from math import pi
figure_type = input()
if figure_type == "square":
side = float(input())
area = side * side
elif figure_type == "rectangle":
a = float(input())
b = float(input())
area = a * b
elif figure_type == "circle":
r = float(input())
area = pi * r * r
elif figure_type == "triangle":
c = float(input())
h = float(input())
area = c * h / 2
print(f"{area:.3f}")
| true |
03d5eadfa064fd22c08c72da35d7836e0ce42395 | Python | Liu-Lingfei/hack-assembler | /SymbolTable.py | UTF-8 | 1,630 | 3.21875 | 3 | [] | no_license | class SymbolTable:
def __init__(self, file_name):
self.file = open(file_name, "r")
self.name = self.file.name
self.table = {}
self.lines = 0
def record(self, command):
first = command.find("(")
second = command.find(")")
if first == -1 or second == -1:
return False
symbol = command[(first + 1):second]
self.table[symbol] = self.lines
return True
def read_newline(self):
command = self.file.readline()
if command != '' and command.strip() == '':
return '...'
command = command.strip()
index = command.find("//")
if index == -1:
pass
elif index != 0:
command = command[:index]
command = command.strip()
return command
def commandType(self, command):
if command == '':
return False
elif command[0] == "@":
return "A_COMMAND"
elif command[0] == "(":
return "L_COMMAND"
elif command[0].isupper() or command[0] == '0' or command[0] == '1' or command[0] == '-1':
return "C_COMMAND"
else:
return "COMMENT"
def process_line(self):
command = self.read_newline()
com_type = self.commandType(command)
if com_type == "L_COMMAND":
self.record(command)
return True
elif com_type == "A_COMMAND" or com_type == "C_COMMAND":
self.lines += 1
return True
elif com_type == "COMMENT":
return True
else:
return False | true |
606bfed5c73f3daca9747286744e6cc056945a18 | Python | vyshaal/PythonProgramming | /Divisible_Sum_Pairs.py | UTF-8 | 207 | 2.859375 | 3 | [] | no_license | n,k = map(int,input().split())
a = [int(num) for num in input().split()]
pairs = 0
for i in range(n):
for j in range(i+1,n):
if (a[i] + a[j]) % k == 0:
pairs += 1
print(str(pairs)) | true |
24be9ca7a93fdd41235807d5ba7592fb2cc69c2e | Python | zulfi127/Data-Structures-Algorithms | /BinarySearchTree.py | UTF-8 | 3,309 | 3.84375 | 4 | [] | no_license |
class BSTree: # Binary Search Tree
def __init__(self):
self.root = None
def depth(self):
if self.root == None:
return 0
else:
return self.root.depth() # BSNode depth as assignment
def __str__(self):
if self.root == None:
return "EmptyBSTree"
else:
return self.root.__str__()
def insert(self, value):
if self.root == None:
self.root= BSNode(value)
else:
self.root.insert(value)
def sort(self):
if self.root != None:
return self.root.sort
else:
return "EmptyBSTree"
class BSNode: # Binary Search Tree Node
def __init__(self,val):
self.left = None
self.right = None
self.value = val
def __str__(self):
return self.strIndent('')
def insert(self, newVal):
if newVal < self.value:
if self.left == None:
self.left= BSNode(newVal)
else:
self.left.insert(newVal)
elif newVal > self.value:
if self.right == None:
self.right= BSNode(newVal)
else:
self.right.insert(newVal)
def calculate_depth(self):
# if both subtrees are empty trees;
if self.left == None and self.right== None:
return 1
# if the left subtree is empty, but the right subtree is not empty;
elif self.left == None:
return self.right.calculate_depth()+ 1
# if the right subtree is empty, but the left subtree is not empty;
elif self.right == None:
return self.left.calculate_depth()+ 1
# if neither subtree is empty
return max(self.left.calculate_depth(), self.right.calculate_depth()) +1
def sort_value(self):
sorted_values = []
if self.left:
sorted_values.extend(self.left.sort_value())
sorted_values.append(self.value)
if self.right:
sorted_values.extend(self.right.sort_value())
return sorted_values
def strIndent(self,ind):
"""Makes text-based print of tree"""
outputString = ''
if self.left == None and self.right != None:
outputString = outputString + ind + ' [none]\n'
if self.left != None:
outputString = outputString + self.left.strIndent(ind + ' ')
outputString = outputString + ind + self.value + '\n'
if self.right == None and self.left != None:
outputString = outputString + ind + ' [none]\n'
if self.right != None:
outputString = outputString + self.right.strIndent(ind + ' ')
return outputString
def treeSort(alist):
treeSort= BSTree()
for item in alist:
treeSort.insert(item)
return treeSort.sort_value()
tree = BSTree()
tree.root= BSNode(27)
for i in [14,23,12,56,92]:
tree.root.insert(i)
print(tree.root.sort_value())
print(tree.root.calculate_depth())
| true |
8663746239838168bf6676d25fff8a023848eb18 | Python | borisnorm/codeChallenge | /practiceSet/g4g/graph/prims.py | UTF-8 | 574 | 3.328125 | 3 | [] | no_license | #find the mst, based on each vertex keep on finding the closest vertex not visited till done
from heapq import *
def prims(source, graph):
h = []
solution = []
heappush(h, source)
size = len(graph.keys())
visited = set()
source = source
while len(visited) < size:
item = heappop(h)
if item not in visited:
solution.append(source, item, distance(source, item))
visited.add(item)
for nodes in getNeighbors(item):
heappush(h, (distance(item, nodes), nodes)) #assumes edges are recieved
source = item
return solution
| true |
55e342211d33bec6bb89daaac6b902cb86a4f0fa | Python | Shokir-developer/python_projects_beginner | /binary_search/binary.py | UTF-8 | 425 | 4.21875 | 4 | [] | no_license | def binary_search(arr, low, high, x):
if high >= low:
mid = (low+high)//2
if arr[mid] == x:
return mid
elif arr[mid] > x:
return binary_search(arr, low, mid-1, x)
else:
return binary_search(arr, mid+1, high, x)
else:
return -1
array = [1, 4, 6, 9, 10]
x = 4
result = binary_search(array, 0, len(array)-1, x)
if result != -1:
print("Index of element is ", str(result))
else:
print("Not found!") | true |
a21a616f6fdf648797f29874d5df00ec2835b80c | Python | mistermao/Apitest-Framework | /UItest-Automation_Framework/testsuites/test_ehr_birthday.py | UTF-8 | 1,918 | 2.703125 | 3 | [] | no_license | # coding = utf-8
import time
import unittest
from pageobjects.ehr_birthday import BirthdayPage
from pageobjects.ehr_birthday import BirthdaySql
from framework.browser_engine import BrowserEngine
class EhrBirthday(unittest.TestCase):
@ classmethod
def setUpClass(cls):
"""
测试固件setUp()代码,主要是测试前准备
:return:
"""
browse = BrowserEngine(cls)
cls.driver = browse.open_browser(cls)
@ classmethod
def tearDownClass(cls):
"""
测试结束后的操作,这里基本上是关闭浏览器
:return:
"""
cls.driver.quit()
def test_01_solar_calendar_submit(self):
"""
公历生日提交
:return:
"""
birthdaypage = BirthdayPage(self.driver)
birthdaysql = BirthdaySql()
birthdaypage.type_emp_code("GS0415")
birthdaypage.click_user_birthday()
birthdaypage.click_date()
birthdaypage.click_solar_calendar_submit()
try:
assert birthdaysql.get_birthday_sql()[0] == '阳历'
print('Test Pass.')
except Exception as e:
print('Test Fail.', format(e))
def test_lunar_calendar_submit(self):
"""
农历生日提交
:return:
"""
birthdaypage = BirthdayPage(self.driver)
birthdaysql = BirthdaySql()
birthdaypage.type_emp_code("GS0415")
birthdaypage.click_user_birthday()
birthdaypage.click_date()
birthdaypage.click_lunar_calendar()
birthdaypage.click_lunar_calendar_submit()
try:
assert birthdaysql.get_birthday_sql()[0] == '农历'
print('Test Pass.')
except Exception as e:
print('Test Fail.', format(e))
if __name__ == '__main__':
unittest.main()
| true |
fecc4a47f997b39100c0c33edaef5f776b17b194 | Python | aphex36/SteamSentiment | /baseline_sentiment_detector.py | UTF-8 | 3,003 | 2.96875 | 3 | [] | no_license | import json
import nltk
import csv
import math
import sys
import re
import numpy as np
import collections
import random
import os
#nltk.download('averaged_perceptron_tagger')
posWords = set()
negWords = set()
posDictCount = dict()
negDictCount = dict()
truePositives = 0
trueNegatives = 0
falsePositives = 0
falseNegatives = 0
def determineSentiment(words):
sentiment = 0
for i in range(len(words)):
tempWord = words[i].lower()
tempWord = re.sub(r'[^\w\s]','',tempWord)
if str(tempWord) in posWords:
sentiment += 1
if str(tempWord) not in posDictCount:
posDictCount[str(tempWord)] = 1
else:
posDictCount[str(tempWord)] += 1
elif str(tempWord) in negWords:
sentiment -= 1
if str(tempWord) not in negDictCount:
negDictCount[str(tempWord)] = 1
else:
negDictCount[str(tempWord)] += 1
if sentiment >= 0:
return 'positive'
else:
return 'negative'
def printOutMaxOccuringWords(indicator):
results = []
refDict = negDictCount
if indicator == 'Positive':
refDict = posDictCount
for key in refDict:
results.append((key, refDict[key]))
results.sort(key= lambda x: x[1], reverse=True)
print(indicator)
for i in xrange(len(results)):
print(results[i][0] + ": " + str(results[i][1]))
with open('positive_words.txt') as p:
for x in p.readlines():
posWords.add(x.strip('\n'))
with open('negative_words.txt') as n:
for x in n.readlines():
negWords.add(x.strip('\n'))
p.close()
n.close()
allFiles = os.listdir("./pos/")
allFiles.extend(os.listdir("./neg/"))
totalScore = 0
totalRevs = 0
fileNo = 0
for fileName in allFiles:
actualDir = ""
isPos = True
if fileName[-7:-4] == "neg":
actualDir = "./neg/" + fileName
isPos = False
else:
actualDir = "./pos/" + fileName
fileNo += 1
with open(actualDir) as f:
content = [x.strip('\n') for x in f.readlines()]
for i in range(len(content)-1):
if content[i] == '':
continue
#print(content[i])
tempJson = json.loads(content[i])
individualWords = tempJson['review'].split()
newStr = ""
for wordGiven in individualWords:
newStr += re.sub(r'[^\w\s]','',wordGiven.encode('ascii', 'ignore')) + " "
totalRevs += 1
if determineSentiment(individualWords) == 'negative' and not isPos:
totalScore += 1
trueNegatives += 1
elif determineSentiment(individualWords) == 'positive' and isPos:
totalScore += 1
truePositives += 1
elif determineSentiment(individualWords) == 'positive' and not isPos:
falsePositives += 1
else:
falseNegatives += 1
f.close()
print("\n")
print("Test Accuracy: " + str((1.0*totalScore)/totalRevs))
print("Precision: " + str((1.0*truePositives)/(truePositives+falsePositives)))
print("Recall: " + str((1.0*truePositives)/(truePositives+falseNegatives)))
| true |
1208945f8bd487c59a334e6396d5554ab5a3768a | Python | koenvo/dimarray | /dimarray/geo/geoarray.py | UTF-8 | 4,148 | 3.046875 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-other-permissive"
] | permissive | """ Subclass of DimArray specialized for geo-applications
==> dimensions always remain sorted: ('items','time','lat','lon','height','sample')
==> subset of methods with specific plotting behaviour: e.g. TimeSeries, Map
"""
import numpy as np
from dimarray import DimArray, Axes, Axis
def is_longitude(nm):
return nm.lower() in ("lon","long", "longitude", "lons", "longitudes")
def is_latitude(nm):
return nm.lower() in ("lat", "latitude", "lats", "latitudes")
class GeoArray(DimArray):
""" array for geophysical application
recognize longitude / latitude:
- automatically assign a weight to lon varying with cos of lat, so that mean()
returns a good approximation to a calculation in spherical coordinates.
- lon recognized as 360-modulo-axis: indexing -180 is the same as +180
- can input time, lat, lon as keyword arguments in addition to the standard way
"""
###- time, lat, lon order always maintained
#_order = ('time','lat','lon')
def __init__(self, values=None, axes=None, time=None, lat=None, lon=None, **kwargs):
"""
"""
keyword = (time is not None or lat is not None or lon is not None)
assert not (axes is not None and keyword), "can't input both `axes=` and keyword arguments!"
# construct the axes
if keyword:
axes = Axes()
if time is not None: axes.append(Axis(time, 'time'))
if lat is not None: axes.append(Axis(lat, 'lat'))
if lon is not None: axes.append(Axis(lon, 'lon'))
super(GeoArray, self).__init__(values, axes, **kwargs) # order dimensions
# add weight on latitude
for ax in self.axes:
if is_latitude(ax.name):
ax.weights = lambda x: np.cos(np.radians(x))
if is_longitude(ax.name):
ax.modulo = 360
def __repr__(self): return super(GeoArray, self).__repr__().replace("dimarray","geoarray")
def __print__(self): return super(GeoArray, self).__print__().replace("dimarray","geoarray")
def __str__(self): return super(GeoArray, self).__str__().replace("dimarray","geoarray")
#def _get_geoarray_cls(dims, globs=None):
# """ look whether a particular pre-defined array matches the dimensions
# """
# if globs is None: globs = globals()
# cls = None
# for obj in globs.keys():
# if isinstance(obj, globals()['GeoArray']):
# if tuple(dims) == cls._dimensions:
# cls = obj
#
# return cls
#class CommonGeoArray(GeoArray):
# #pass
# _order = ('items','time','lat','lon','height','sample')
# def __init__(self, values=None, *axes, **kwargs):
# """
# """
# assert (len(axes) == 0 or len(kwargs) ==0), "cant provide axes both as kwargs and list"
# assert self._dims is None or (len(axes) == self._dims or len(kwargs) == len(self._dims)), "dimension mismatch"
# if len(kwargs) > 0:
# for k in kwargs:
# if k not in self._order:
# raise ValueError("unknown dimension, please provide as axes")
# if self._dims is not None:
# axes = [k,kwargs[k] for k in self._dims if k in kwargs]
# else:
# axes = [k,kwargs[k] for k in self._order if k in kwargs]
#
# else:
# if self._dims is not None:
# assert tuple(ax.name for ax in axes) == self._dims, "dimension mismtach"
#
# super(CommonGeoArray, self).__init__(values, axes)
# for k in kwargs: self.setncattr(k, kwargs[k])
#
# @classmethod
# def _constructor(cls, values, axes, **kwargs):
# dims = tuple(ax.name for ax in axes)
# class_ = _get_geoarray_cls(dims)
# if class_ is not None:
# obj = class_(values, *axes)
# else:
# obj = cls(values, *axes)
# for k in kwargs: obj.setncattr(k, kwargs[k])
# return obj
#
#class TimeSeries(GeoArray):
# _dims = ('time',)
#
#class Map(GeoArray):
# _dims = ('lat','lon')
#
#class TimeMap(GeoArray):
# _dims = ('time','lat','lon')
#
#class Sample(GeoArray):
# _dims = ('sample',)
| true |
d890e61dfddf9ff2288adf91d213a6bf4360387e | Python | TravelSir/leetcode_solutions | /201-300/263. Ugly Number.py | UTF-8 | 702 | 3.953125 | 4 | [] | no_license | """
解题思路:
超简单的因式分解,
只需要将num先除以2除到不能整除,然后再除以3,以此类推,直到整除结果为1或除数超过了5,最后判断整除结果是否为1即可
"""
class Solution:
def isUgly(self, num):
factor = [2, 3, 5]
i = 0
while num != 1:
if i >= len(factor):
break
if num < factor[i]:
break
if num % factor[i] == 0:
num /= factor[i]
else:
i += 1
if num == 1:
return True
else:
return False
if __name__ == '__main__':
sl = Solution()
print(sl.isUgly(8))
| true |
4a496d8ce733039976b7c6368cbd71f7e36f59c9 | Python | clarissebaes/Python-Programs | /Academic/CS1100 Lab/Lab 5/check3.py | UTF-8 | 2,264 | 3.5625 | 4 | [] | no_license | import webbrowser
import lab05_util
restaurants = lab05_util.read_yelp('yelp.txt')
def output_score(avg_score,num_review):
if avg_score > 0 and avg_score <= 2:
return('This restaurant is rated bad, based on {:} reviews.'.format(num_review))
elif avg_score > 2 and avg_score <= 3:
return('This restaurant is rated average, based on {:} reviews.'.format(num_review))
elif avg_score > 3 and avg_score <= 4:
return('This restaurant is rated above average, based on {:} reviews.'.format(num_review))
elif avg_score > 4 and avg_score <= 5:
return('This restaurant is rated very good, based on {:} reviews.'.format(num_review))
def average_score(indx):
if len(restaurants[indx][6]) > 3:
no_max_min = (sum(restaurants[indx][6]) - max(restaurants[indx][6]) - min(restaurants[indx][6]))
num_review = int((len(restaurants[indx][6]))) -2
avg_score = no_max_min/num_review
final_avg_score = output_score(avg_score,num_review)
return final_avg_score
else:
avg_score = sum(restaurants[indx][6])/len(restaurants[indx][6])
num_review = len(restaurants[indx][6])
final_avg_score = output_score(avg_score,num_review)
return final_avg_score
def print_info(indx):
num = int(indx)
address = restaurants[indx][3].split("+")
avg_score = average_score(indx)
print(restaurants[indx][0], '({:})'.format(restaurants[indx][5]))
print('\t',address[0],sep = '')
print('\t',address[1],sep = '')
print('\t',avg_score,sep = '')
print()
res_num = int(input('Enter a Restaurant ID ==> '))
indx = res_num-1
if indx>= 0 and indx<155:
print_info(indx)
else:
print('No Restaurant has that ID')
choice = int(input('''What would you like to do next?
1.Visit the homepage
2.Show on Google Maps
3.Show directions to this restaurant
Your choice (1-3)? ==>'''))
if choice == 1:
webbrowser.open(restaurants[indx][4])
elif choice == 2:
webbrowser.open('http://www.google.com/maps/place/{:}'.format(restaurants[indx][3]))
elif choice ==3:
webbrowser.open('http://www.google.com/maps/dir/110 8th Street Troy NY/{:}'.format(restaurants[indx][3]))
| true |
d1f53e830f26ec922ab4b7ca642e7781def7d089 | Python | ravitejachebrolu/MachineLearning | /Classification/1D2-class.py | UTF-8 | 2,997 | 3.1875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 08 11:10:50 2016
@author: raviteja
"""
import numpy as np
import csv
from math import log
from sklearn.metrics import confusion_matrix
def main():
#print("Classification based on one feature")
#loading the data
X = []
Y = []
f=open("iris-dataset.csv")
for row in csv.reader(f):
X.append(row[3])
Y.append(row[4])
X = [float(i) for i in X]
X = X[:100]
Y=Y[:100]
mean_cl1,mean_cl2 = mean(X)
var_cl1,var_cl2 = variance(X)
X_test = X[21:50]+X[51:80]
# print mean_cl1,mean_cl2,var_cl1,var_cl2
# print X_test
Y_test = Y[21:50]+Y[51:80]
Pred_class = membership_func(X_test,mean_cl1,mean_cl2,var_cl1,var_cl2)
cfm= confusion_matrix(Y_test,Pred_class, labels=["Iris-setosa", "Iris-versicolor"])
print "confusion matrix:"
print cfm
accuracy_func(cfm)
Fmeasure_func(precision_func(cfm),recall_func(cfm))
def precision_func(cfm):
precision = cfm[0][0]/(cfm[0][0]+cfm[0][1])
print "precision: %s" %precision
return precision
def recall_func(cfm):
recall = cfm[0][0]/(cfm[0][0]+cfm[1][0])
print "recall: %s" %recall
return recall
def Fmeasure_func(precision,recall):
Fmeasure = 2*precision*recall/(precision+recall)
print "F-measure: %s" %Fmeasure
def accuracy_func(cfm):
accuracy = (cfm[0][0]+cfm[1][1])/(cfm[0][0]+cfm[1][0]+cfm[0][1]+cfm[1][1])
print "accuracy: %s" %accuracy
#discirmination function , a condition for memebership function
def discrimination_func(g_cl1,g_cl2):
Pred_class =[]
for i in g_cl1:
for j in g_cl2:
if i>j:
Pred_class.append( "Iris-setosa")
break
else:
Pred_class.append("Iris-versicolor")
break
return Pred_class
#membership_func to say which test data belongs to which class
def membership_func(X_test,mean_cl1,mean_cl2,var_cl1,var_cl2):
g_cl1 = -log(var_cl1)-((0.5)*((X_test-mean_cl1)**2/var_cl1**2))+log(0.5)
g_cl2 = -log(var_cl2)-((0.5)*((X_test-mean_cl2)**2/var_cl2**2))+log(0.5)
#print g_cl1,g_cl2
#print Pred_class
return discrimination_func(g_cl1,g_cl2)
def variance(X):
var_cl1 = np.asanyarray(X[0:20])
var_cl2 = np.asanyarray(X[81:100])
print "variance"
print np.std(var_cl1),np.std(var_cl2)
return np.std(var_cl1),np.std(var_cl2)
def mean(X):
mean_cl1 = np.asanyarray(X[0:20])
mean_cl2= np.asanyarray(X[81:100])
print "mean"
print np.mean(mean_cl1),np.mean(mean_cl2)
return np.mean(mean_cl1),np.mean(mean_cl2)
if __name__ == "__main__":
main()
| true |
bbfd0058f3d4109041616d86278a34adef41bc55 | Python | ferdirn/hello-python | /sorted_dict_by_value_reverse.py | UTF-8 | 175 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env python
# regular unsorted dictionary
d = {'banana': 3, 'apple':4, 'pear': 1, 'orange': 2}
print d
print sorted(d.items(), key=lambda t: t[1], reverse=True)
| true |
098d5d9a322e388f408ddfef7b8407cee07f2e31 | Python | Auwk/python | /计石器.py | UTF-8 | 673 | 2.71875 | 3 | [] | no_license | import datetime
def file_hdl(a,b,name='石头数量.txt'):
with open(name,'a',encoding='utf-8') as f :
a=int(a)
b=int(b)
c=str(datetime.datetime.now())
f.write('\n')
f.writelines('%s,仓库石头数为:%d,总石头数为:%d,记录点为:\'%s\',还差%d个石头'%(c,a,b,d,168*3-b))
f.write('\n')
def du(name='石头数量.txt'):
with open(name,encoding='utf-8') as w :
for i in w:
print(i,end='')
'''a=f.read()
print(a)'''
a=input('输入邮件石头数:')
b=input('输入总石头数:')
d=input('输入时间点:')
file_hdl(a,b)
du()
input()
| true |
1b04983ee6bf6acb9347a6194cb9def82f796138 | Python | achrafbenyounes/web-scraping-with-python | /scraping.py | UTF-8 | 3,354 | 3.28125 | 3 | [] | no_license | from time import sleep
from time import time
from random import randint
from IPython.display import clear_output
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
# Use pagination to navigate(until 4 pages), use comprehension list to create a list of strings containing the page number
pages = [str(i) for i in range(1,5)]
# Searche the movies between 2017 and 2018
years_url = [str(i) for i in range(2000, 2018)]
# Fix the date at starting script
start_time = time()
# Variable which calculates the request number
requests = 0
# For every year between 2000 and 2017
for year_url in years_url:
# For every page between 1 et 4
for page in pages:
# Do a http GET sur le site imdb
response = get('https://www.imdb.com/search/title/?release_date='+year_url + '&sort=num_votes,desc&page='+ page +'')
# Pause the loop between 8 until 15 secondes
sleep(randint(8, 15))
# Display request data
request += 1 # new request, we increment this variable by 1
sleep(randint(1, 3)) # Do a wait between 1 and 3 secondes
elapsed_time = time() - start_time # Calculate elapsed time since the first request
print("Requests{}; Frequency{} requests/s".format(request, request/elapsed_time))
clear_output(wait = True)
# If the status code is different from 200, there is a problem, we must warning that
if response.status_code != 200:
warn('Request:{}; status_code:{}'.format(request, response.status_code))
#The BeautifulSoup module allow to analyse a html document by gettings and parsing its elements
html_soup = BeautifulSoup(response.text, 'html.parser')
movies_container = html_soup .find_all('div', class_="lister-item mode-advanced")
# Create empty lists containg all the necessary data
names = []
years = []
imdb_ratings = []
meta_scores = []
votes = []
# We base on movies_container for dat extraction
for container in movies_container:
# if the movies has a metascore, we scrap this information
if container.find('div', class_="ratings-metascore") is not None:
# Scrap the movie name
name = container.h3.a.text
names.append(name) # We add every movie to the list
# Scrap the movie's year
year = container.h3.find('span', class_='lister-item-year text-muted unbold').get_text()
years.append(year) # On ajouter l'année de sortie de film
# Scrap the imdb note
imdb_rating = float(container.strong.text)
imdb_ratings.append(imdb_rating)
# Scrap the metascore
meta_score = container.find('div', class_='inline-block ratings-metascore').span.get_text()
meta_scores.append(int(meta_score))
# Scrap the nb votes
vote = container.find('span', attrs={"name":"nv"})['data-value']
votes.append(int(vote))
data_movies = pd.DataFrame ({
"movie": names,
"year": years,
"imdb_ratings": imdb_ratings,
"meta_scores": meta_scores,
"votes": votes
})
print("Data visualisation :" )
print(data_movies)
| true |
f6d33fa1ac75f49fc5e16cf38cd19255eefe87ee | Python | Wolven531/python_utils | /time_gen.py | UTF-8 | 1,627 | 3.734375 | 4 | [] | no_license | import random
def conv_min_to_time(tot_mins):
is_am = tot_mins < (11 * 60) + 59# earlier than 11:59 AM
hr = int(tot_mins / 60)
mins = int(tot_mins - (hr * 60))
str_mins = '0' + str(mins) if mins < 10 else str(mins)
str_hr = str(hr - 12) if hr > 12 else str(hr)
return '{}:{} AM'.format(hr, str_mins) if is_am else '{}:{} PM'.format(str_hr, str_mins)
def main():
is_early = input('Early start?\n') == 'y'
stayed_late = int(input('Stay late? (0,1,2,...)\n')) * 15
start_minimum = (8 * 60) + 25 if is_early else (9 * 60)# is_early minimum = 8:25, otherwise = 9:00
start_time = random.randint(start_minimum, start_minimum + 25)
lunch_time = start_time + random.randint(int(2.75 * 60), int(4.16 * 60))# start of lunch min = 2.75 hrs after arrival, lunch max = 4.16 hrs after
lunch_end = lunch_time + random.randint(30, 34)# end of lunch
lunch_dur = lunch_end - lunch_time# duration of lunch
end_minimum = start_time + (8 * 60) + lunch_dur# minimum of 8 hrs not on lunch
end_time = end_minimum + stayed_late
start_str = conv_min_to_time(start_time)
lunch_start_str = conv_min_to_time(lunch_time)
lunch_end_str = conv_min_to_time(lunch_end)
end_str = conv_min_to_time(end_time)
tot_dur_min = float(end_time - start_time - lunch_dur)
tot_dur = tot_dur_min / 60.0
print('''
Started work at {start_str}
Left work at {end_str}
Lunch was from {lunch_start_str} to {lunch_end_str} (duration: {lunch_dur} mins)
Total day duration: {tot_dur} hrs ({tot_dur_min} mins)
'''.format(**locals()))
main()
| true |
fbbfa2b599fa363b6daa819536e4918178d1f0c0 | Python | xkuang/django-estimators | /estimators/tests/test_estimators.py | UTF-8 | 3,377 | 2.75 | 3 | [
"MIT"
] | permissive |
import pytest
from django.core.exceptions import ValidationError
from estimators.models.estimators import Estimator
from estimators.tests.factories import EstimatorFactory
@pytest.mark.django_db
class TestEstimator():
def test_estimator_persistance_without_factory(self):
m = Estimator(estimator='new string', description='another object')
assert m.object_file.storage.exists(m.object_file.path) == False
assert m.is_file_persisted == False
m.save()
assert m.object_file.storage.exists(m.object_file.path) == True
assert m.is_file_persisted == True
def test_object_hash_with_factory(self):
m = EstimatorFactory(estimator=object)
assert m.estimator == object
del m
n = Estimator.objects.filter(estimator=object).first()
# sklearn hash of a object = 'd9c9f286391652b89978a6961b52b674'
assert n.object_hash == 'd9c9f286391652b89978a6961b52b674'
# assert loaded after calling n.estimator
assert n.estimator == object
assert Estimator._compute_hash(
object) == 'd9c9f286391652b89978a6961b52b674'
def test_get_or_create(self):
m, created = Estimator.objects.get_or_create(estimator='new_string_as_object')
m.save()
assert m.estimator == 'new_string_as_object'
assert created == True
n, created = Estimator.objects.get_or_create(estimator='new_string_as_object')
assert m == n
assert created == False
def test_update_or_create(self):
e = 'estimator_obj'
m, created = Estimator.objects.update_or_create(estimator=e)
m.save()
assert m.estimator == e
assert created == True
n, created = Estimator.objects.update_or_create(estimator=e)
assert m == n
assert created == False
def test_create_from_file_with_factory(self):
obj = "{'key': 'value'}"
m = EstimatorFactory(estimator=obj)
object_hash = m.object_hash
file_path = m.object_file.name
del m
m = Estimator.create_from_file(file_path)
assert m.estimator == obj
assert m.object_hash == object_hash
assert m.is_file_persisted == True
def test_update_estimator_fail(self):
m = Estimator(estimator='uneditable_object')
m.estimator = 'object_edited_before_persistance'
m.save()
m.estimator = 'object_edited_after_persistance'
with pytest.raises(ValidationError):
m.save()
def test_hashing_func(self):
object_hash = Estimator._compute_hash('abcd')
assert object_hash == '3062a9e3345c129799bd2c1603c2e966'
def test_hash_without_estimator_fail(self):
m = Estimator()
m.object_hash = 'randomly set hash'
with pytest.raises(ValidationError):
m.save()
def test_wrong_hash_fail(self):
m = Estimator(estimator='unique_object')
m.object_hash = 'randomly set hash'
with pytest.raises(ValidationError):
m.save()
def test_prevent_double_file_save(self):
EstimatorFactory(estimator='yes')
hash_of_yes = 'b635788f4b614e8469b470b8e9f68174'
e = Estimator.objects.get(object_hash=hash_of_yes)
assert e.is_file_persisted == True
e.save()
assert e.object_file.name.endswith(hash_of_yes)
| true |
aa2e56cf55911ad9138f4b3a6a25eafbf726cf0d | Python | manji369/Geeks4GeeksTop80InterviewQs | /Python/q42longestIncreasingSubsequence.py | UTF-8 | 552 | 4.09375 | 4 | [] | no_license | '''
Q42. The Longest Increasing Subsequence (LIS) problem is to find the length of
the longest subsequence of a given sequence such that all elements of the
subsequence are sorted in increasing order. For example, the length of LIS for
{10, 22, 9, 33, 21, 50, 41, 60, 80} is 6 and LIS is {10, 22, 33, 50, 60, 80}.
'''
def lis(arr):
n = len(arr); L = [1]*len(arr)
for i in range(1, n):
for j in range(i):
if arr[i] >= arr[j] and L[j]+1 > L[i]:
L[i] = L[j]+1
return max(L)
print(lis([3, 2, 6, 4, 5, 1]))
| true |
f2a8bed7d037d73bdd1db380f007a0315d895613 | Python | GoldF15h/LeetCode | /892. Surface Area of 3D Shapes.py | UTF-8 | 1,035 | 2.921875 | 3 | [] | no_license | def sol (grid) :
ans = 0
# top
for curRow in range(len(grid)) :
for curCol in range(len(grid[curRow])) :
if grid[curRow][curCol] > 0 :
ans += 2
# rowSum
# print('ROW RUN')
for curRow in range(len(grid)) :
cur = 0
add = 0
for curCol in range(len(grid[0])) :
# print('add',abs(cur-grid[curRow][curCol]))
add += abs(cur-grid[curRow][curCol])
cur = grid[curRow][curCol]
# print('add',cur)
ans += (add+cur)
# colSum
# print('COL RUN')
for curCol in range(len(grid[0])) :
cur = 0
add = 0
for curRow in range(len(grid)) :
# print('add',abs(cur-grid[curRow][curCol]))
add += abs(cur-grid[curRow][curCol])
cur = grid[curRow][curCol]
# print('add',cur)
ans += (add+cur)
return ans
if __name__ == "__main__" :
l = list( list(map(int,x.split(','))) for x in input().strip('[]').split('],['))
print(sol(l)) | true |
abbaa616b0165d3528244d100b6a512cde587dfc | Python | CesMak/gym_schafkopf | /01_Tutorials/06_MCTS_Node/mcts/mct.py | UTF-8 | 7,919 | 2.75 | 3 | [
"BSD-3-Clause"
] | permissive | import random
import gym
import gym_schafkopf
from mcts.node import Node
from copy import deepcopy
opts_RL = {"names": ["Max", "Lea", "Jo", "Tim"], "type": ["RL", "RL", "RL", "RL"], "nu_cards": 8, "active_player": 3, "seed": None, "colors": ['E', 'G', 'H', 'S'], "value_conversion": {1: "7", 2: "8", 3: "9", 4: "U", 5: "O", 6: "K", 7: "X", 8: "A"}}
opts_RAND = {"names": ["Max", "Lea", "Jo", "Tim"], "type": ["RANDOM", "RANDOM", "RANDOM", "RANDOM"], "nu_cards": 8, "active_player": 3, "seed": None, "colors": ['E', 'G', 'H', 'S'], "value_conversion": {1: "7", 2: "8", 3: "9", 4: "U", 5: "O", 6: "K", 7: "X", 8: "A"}}
class MonteCarloTree:
'''
Inspired by https://github.com/Taschee/schafkopf/blob/master/schafkopf/players/uct_player.py
'''
def __init__(self, game_state, player_hands, allowed_actions, ucb_const=1):
self.root = Node(None, None, game_state, player_hands, allowed_actions)
self.ucb_const = ucb_const
self.rewards = []
self.gameOver = False
self.treeList = []
self.treeFilled = False
def uct_search(self, num_playouts, print_=False):
for i in range(num_playouts):
selected_node = self.selection()
rewards = self.simulation(selected_node)
self.backup_rewards(leaf_node=selected_node, rewards=rewards)
if print_:
print(self.getTree(node=self.root), "\n-->Depth: ", self.getMaxDepth(), " Elements: ", len(self.treeList))
self.printTree()
results = {}
self.root.best_child(ucb_const=self.ucb_const)
for child in self.root.children:
results[child.previous_action] = child.visits # child.visits is better than child.value!!!
return results
def selection(self):
current_node = self.root
while not self.gameOver or current_node.is_terminal():
if not current_node.fully_expanded():
return self.expand(current_node)
else:
current_node = current_node.best_child(ucb_const=self.ucb_const)
return current_node
def expand(self, node):
not_visited_actions = deepcopy(node.allowed_actions)
for child in node.children:
not_visited_actions.remove(child.previous_action)
#TODO: check if this should be random or chosen by player policy
chosen_action = random.choice(tuple(not_visited_actions))
schafkopf_env = gym.make("Schafkopf-v1", options_test=opts_RL)
schafkopf_env.setGameState(deepcopy(node.game_state), deepcopy(node.player_hands))
_, self.rewards, self.gameOver, _ = schafkopf_env.stepTest(chosen_action) # state, rewards, terminal, info
new_node = Node(parent=node, game_state=deepcopy(schafkopf_env.getGameState()), previous_action=chosen_action, player_hands=deepcopy(schafkopf_env.getCards()), allowed_actions=schafkopf_env.test_game.getOptionsList())
node.add_child(child_node=new_node)
return new_node
def simulation(self, selected_node):
if self.gameOver: # special case if is already game over do not expand anymore / create new node!
return self.rewards["final_rewards"]
schafkopf_env = gym.make("Schafkopf-v1", options_test=opts_RL)
gameOver= deepcopy(selected_node.game_state)["gameOver"]
schafkopf_env.setGameState(deepcopy(selected_node.game_state), deepcopy(selected_node.player_hands))
while not gameOver:
rewards, round_finished, gameOver = schafkopf_env.test_game.playUntilEnd()
return rewards["final_rewards"]
def backup_rewards(self, leaf_node, rewards):
current_node = leaf_node
while current_node != self.root:
current_node.update_rewards(rewards)
current_node.update_visits()
current_node = current_node.parent
self.root.update_visits()
def get_action_count_rewards(self):
result = {}
for child in self.root.children:
if isinstance(child.previous_action, list):
result[tuple(child.previous_action)] = (child.visits, child.cumulative_rewards)
else:
result[child.previous_action] = (child.visits, child.cumulative_rewards)
return result
## below only printing Tree functions:
def getSimpleDepth(self, node, d=0):
'''get simple depth at first children always'''
if len(node.children)>0:
return self.getDepth(node.children[0], d=d+1)
else:
return d
def getMaxChildren(self):
'''use getTree for that
the child number is the second one
'''
if not self.treeFilled:
self.getTree(self.root)
max = 0
for i in self.treeList:
[_, c, _, _] = i
if c>max: max=c
return max+1 # cause it starts counting from
def getMaxDepth(self):
'''use getTree for that
this is quite easy cause treeList is a list that is already sorted by depth
'''
if not self.treeFilled:
self.getTree(self.root)
return self.treeList[len(self.treeList)-1][0]+1 # cause it starts counting from 0
def subfinder(self, mylist, pattern):
return list(filter(lambda x: x in pattern, mylist))
def getTree(self, node, d=0):
'''getTree(self.root) returns e.g.
self.treeList = [[0, 0, 37, -1], [0, 1, 34, -1], [0, 2, 39, -1], [0, 3, 36, -1], [0, 4, 32, -1], [0, 5, 41, -1], [1, 0, 40, 36], [1, 0, 40, 39], [1, 1, 32, 36], [1, 1, 32, 39], [1, 2, 38, 36], [1, 2, 38, 39], [1, 3, 33, 36], [1, 3, 33, 39], [1, 4, 39, 39], [1, 5, 34, 39], [2, 0, 37, 32], [2, 0, 41, 40], [2, 1, 35, 32], [2, 1, 36, 40]]
with:
[0, 0, 37, -1],
depth, childnumber action parent that action belongs to (-1 means root)
'''
self.treeFilled = False
if len(node.children)>0:
for i,child in enumerate(node.children):
if child.parent.previous_action is None:
p = -1
else:
p = child.parent.previous_action
a = [d, i, child.previous_action, p ]
if len(self.subfinder(self.treeList, [a])) == 0:
self.treeList.append(a)
return self.getTree(child, d=d+1)
if d>0:
return self.getTree(node.parent, d=d-1)
else:
self.treeList.sort()
self.treeFilled = True
return self.treeList
else:
return self.getTree(node.parent, d=d-1)
def printTree(self):
'''[0, 0, 37, -1]
d c a p depth child action parent
Drawing such big trees is hard!!!
This tree is only correct for the layer 0 and layer 1 !
This method does not work cause if there are in one depth duplicates
e.g. multiple play try to play the same option 40....40
then the index finder does not work correctly!
one Example is this wrong tree:
[[0, 0, 20, -1], [0, 1, 3, -1], [1, 0, 17, 3], [1, 0, 17, 20], [1, 1, 21, 3], [1, 1, 21, 20], [2, 0, 24, 17], [2, 0, 24, 21], [2, 1, 29, 17], [2, 1, 29, 21], [3, 0, 6, 24], [3, 0, 6, 29]]
-->Depth: 4 Elements: 12
0--........................20......................3......................--0
1--................17.21...................17.21...........................--1
2--........24.29.29........................................................--2
3--6..6..................................................................--3
'''
res = []
md = self.getMaxDepth() -1
mc = self.getMaxChildren() -1
depth = 0
for i in range(md+1):
# layers zwischenPlatz #namen
one_line = list("---"+len(self.treeList)*4*".."+"---")
one_line[0]=str(i)
one_line[len(one_line)-1]=str(i)
one_line.append("\n")
res.append(''.join(one_line))
for i in self.treeList:
[d, c, a, p]=i
if d>depth:
depth +=1
ol = list(res[d])
if d>0:
e = "".join(res[d-1]).index(str(p))+c*3-8
ol[e:e+2]=str(a).zfill(2)
else:
aa=int(len(one_line)/(mc+2))
e = 3+aa+aa*c
ol[e:e+2]=str(a).zfill(2)
res[d]=''.join(ol)
for line in res: print(line) | true |
41165be624ae8efe5369a99897d331e488d3722d | Python | python-advance/t2-generators-NikitaPO | /fib.py | UTF-8 | 1,142 | 4.5 | 4 | [] | no_license | # Программирование (Python)
# 6 семестр, тема 2
# Лабораторная работа 6
"""
Реализовать функцию, возвращающую список чисел ряда Фибоначчи.
"""
def get_fib_nums_lst(n):
"""
n - количество чисел в списке
"""
if ((type(n) == str) or (n <= 0)):
return None
fib_list = [0]
first = 0
middle = 1
while(n > 1):
fib_list.append(middle)
third = first + middle
first = middle
middle = third
n -= 1
return fib_list
print(get_fib_nums_lst(10))
assert get_fib_nums_lst('0') is None, "неверный аргумент n"
assert get_fib_nums_lst(0) is None, "неверный аргумент n"
assert get_fib_nums_lst(1) == [0], "ряд начинается с 0"
assert get_fib_nums_lst(2) == [0, 1], "ряд длины 2"
assert get_fib_nums_lst(3) == [0, 1, 1], "ряд длины 3"
assert get_fib_nums_lst(5) == [0, 1, 1, 2, 3], "ряд длины 5"
assert get_fib_nums_lst(10) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34], "ряд длины 10" | true |
ae589cb1d6e38950dc564fc337832cf551d69e7f | Python | FakeEmperor/data-hack | /dno/argsparser.py | UTF-8 | 1,867 | 3.03125 | 3 | [] | no_license | import argparse
class ArgsParser:
def __init__(self):
self.parser = argparse.ArgumentParser(
description="This solver can solve numerically, using differential equations,"
"a high altitude precision drop. "
"It takes into account mass, different winds, "
"initial object's velocity and aerodynamic forces to provide accurate results."
"Results are validated using high precision simulation.")
self.parser.add_argument('X', type=float, help='X destination coordinate')
self.parser.add_argument('Y', type=float, help='Y destination coordinate')
self.parser.add_argument('Z', type=float, help='Z destination coordinate')
self.parser.add_argument('-H', dest='H0', type=float,
help='Drop altitude, m', default=1000)
self.parser.add_argument('-V', dest='V0', type=float,
help='Initial velocity in XZ space, m/s', default=250)
self.parser.add_argument('-F', dest='F_path', type=str,
help='Path to a CSV file data with F info', default="data\\F.csv")
self.parser.add_argument('-W', dest='W_path', type=str,
help='path to a CSV file with winds info', default="data\\Wind.csv")
self.parser.add_argument('-a', dest='alpha', type=float,
help='Velocity angle, grads', default=0)
self.parser.add_argument('-m', dest='m', type=float,
help='Object\'s mass, kg', default=100)
self.parser.add_argument('-v', dest='v', type=int,
help='Verbosity level for model simulations', default=50)
def getParser(self):
return self.parser.parse_args()
| true |
c1e11a35ecba6fa4f7b3019d5242a09bb6b890ce | Python | FRReinert/accapi | /account_api/common/tests/test_database.py | UTF-8 | 2,155 | 2.625 | 3 | [
"MIT"
] | permissive | from account_api.common.database import ModelManager
from account_api.models.user import User
import unittest
class TestDatabase(unittest.TestCase):
def test_manager_create(self):
'''Test creation'''
manager = ModelManager('users', test_mode=True)
user_doc = manager.create(User(full_name="Nome Teste", email="teste.20@domain.com", phone="554733820000", birth_day="01/01/1990", document="47506433001"))
assert user_doc.id
assert user_doc.full_name == "Nome Teste"
assert user_doc.email == "teste.20@domain.com"
assert user_doc.phone == "554733820000"
assert user_doc.birth_day == "01/01/1990"
assert user_doc.document == "47506433001"
def test_manager_get(self):
'''Test get documenet'''
manager = ModelManager('users', test_mode=True)
user_doc = manager.create(User(full_name="Nome Teste", email="teste.21@domain.com", phone="554733820000", birth_day="01/01/1990", document="40320277062"))
user_get = manager.get(user_doc.id)
assert user_doc.id == user_get['id']
def test_manager_filter(self):
'''Test filter documents'''
manager = ModelManager('users', test_mode=True)
manager.create(User(full_name="Nome Teste", email="teste.22@domain.com", phone="554733820000", birth_day="01/01/1990", document="58484958078"))
user_filter = manager.filter('email', '==', 'teste.22@domain.com')
assert len(user_filter) > 0
def test_manager_update(self):
'''Test update document'''
manager = ModelManager('users', test_mode=True)
user = manager.create(User(full_name="Nome Teste", email="teste.23@domain.com", phone="554733820000", birth_day="01/01/1990", document="70610161083"))
new_user = User.from_dict(**{'fullName': "Nome Teste 2", 'email': "teste.24@domain.com", 'phone': "554733820022", 'birthDay': "01/01/1991", 'document': "27662641082"})
user_updated = manager.update(user.id, new_user)
assert user_updated.full_name == 'Nome Teste 2'
assert user_updated.email == 'teste.24@domain.com'
assert user_updated.phone == '554733820022'
assert user_updated.birth_day == '01/01/1991'
assert user_updated.document == '27662641082'
| true |
6fd4ac1fcff8f2ebbda32cd1d32d117d2d109094 | Python | shineusn/mylife | /japan_eqs/stressdrops/compute_phi.py | UTF-8 | 1,319 | 2.671875 | 3 | [] | no_license | from numpy import genfromtxt,ones,argmin,where,zeros
afters_thrust='/Volumes/Kanagawa/Slip_Inv/tohoku_10s/analysis/thrust_afters_select.txt'
afters_normal='/Volumes/Kanagawa/Slip_Inv/tohoku_10s/analysis/normal_afters_select.txt'
stress_file='/Users/dmelgar/code/GMT/tohoku/stress.xyz'
normal=genfromtxt(afters_normal)
thrust=genfromtxt(afters_thrust)
stress=genfromtxt(stress_file)
#Assign x variable (fault type)
x=ones(len(normal)+len(thrust))
x[0:len(normal)]=-1
#Now find if the stress drop is positive or negative at the lcoation of the aftershock
y=zeros(len(normal)+len(thrust))
for k in range(len(normal)):
lon=normal[k,0]
lat=normal[k,1]
d=((stress[:,0]-lon)**2+(stress[:,1]-lat)**2)**0.5
imin=argmin(d)
if stress[imin,2]>0:
y[k]=-1.
else:
y[k]=1.
i=0
for k in range(len(normal),len(normal)+len(thrust)):
lon=thrust[i,0]
lat=thrust[i,1]
d=((stress[:,0]-lon)**2+(stress[:,1]-lat)**2)**0.5
imin=argmin(d)
if stress[imin,2]<0:
y[k]=1.
else:
y[k]=-1.
i+=1
#Compute phi
n11=len(where((x==1) & (y==1))[0])
n10=len(where((x==1) & (y==-1))[0])
n01=len(where((x==-1) & (y==1))[0])
n00=len(where((x==-1) & (y==-1))[0])
Nx1=n11+n01
Nx0=n10+n00
N1y=n11+n10
N0y=n01+n00
phi=(n11*n00-n10*n01)/((Nx1*Nx0*N1y*N0y)**0.5)
print phi | true |
46c864738a17502658653449f3d5db48e558f378 | Python | tony-ng/image_classifier | /model_functions.py | UTF-8 | 8,189 | 2.828125 | 3 | [] | no_license | import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import models
from workspace_utils import active_session
vgg_input_size = 25088
densenet_input_size = 2208
def load_model(arch):
"""
Load the pre-trained model
Parameters:
arch - the model architecture, either vgg or densenet
Returns:
model - the pre-trained model
"""
model = None
if arch == "vgg16":
model = models.vgg16(pretrained=True)
elif arch == "densenet161":
model = models.densenet161(pretrained=True)
else:
print("Sorry! The model is not supported")
return model
class Detect_Flower_Network(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):
"""
Define the model structure
Parameters:
input_size - the size of input layer
output_size - the size of output layer
hidden_layers - the size of each hidden layer
drop_p - the dropout rate of each weight
"""
super().__init__()
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
hidden_layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in hidden_layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x):
for linear in self.hidden_layers:
x = F.relu(linear(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def create_classifier(arch, hidden_units):
"""
Create a new network for detecting flower
Parameters:
arch - the model architecture, either vgg or densenet
hidden_units - the number of units on the hidden layer
Returns:
classifier - the network newly created
"""
classifier = None
input_size = None
if arch == "vgg16":
input_size = vgg_input_size
elif arch == "densenet161":
input_size = densenet_input_size
classifier = Detect_Flower_Network(input_size, 102, [hidden_units], drop_p=0.5)
return classifier
def train_model(model, train_dataloader, valid_dataloader, learning_rate, epochs, gpu):
"""
Train the model and the training loss, validation loss, and validation accuracy are printed out
Parameters:
model - the model to be trained
train_dataloader - the dataloader for training dataset
valid_dataloader - the dataloader for validation dataset
learning_rate - the learning rate of the training
epochs - the number of repeated time for training
gpu - use gpu if True; otherwise use cpu
Returns:
model - the trained model
"""
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
print_every = 20
steps = 0
with active_session():
if gpu:
model.to('cuda')
print("Start training")
for e in range(epochs):
model.train()
running_loss = 0
for inputs, labels in iter(train_dataloader):
steps += 1
if gpu:
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
with torch.no_grad():
test_loss, accuracy = validate(model, criterion, valid_dataloader, gpu)
print("Epoch: {}/{}... ".format(e+1, epochs),
"Training Loss: {:.4f}.. ".format(running_loss/print_every),
"Validate Loss: {:.4f}.. ".format(test_loss/len(valid_dataloader)),
"Validate Accuracy: {:.4f}.. ".format(accuracy/len(valid_dataloader)))
running_loss = 0
model.train()
print("Finish training")
return model
def validate(model, criterion, valid_dataloader, gpu):
"""
Validate the loss and accuracy of the model
Parameters:
model - the model to be validated
criterion - the function to calculate the loss
valid_dataloader - the validation dataloader
gpu - use gpu if True; otherwise use cpu
Returns:
test_loss - the total loss of the model for all validation batches
accuracy - the total accuracy in proportion for all validation batches
"""
test_loss = 0
accuracy = 0
for images, labels in iter(valid_dataloader):
if gpu:
images, labels = images.to('cuda'), labels.to('cuda')
output = model.forward(images)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def save_checkpoint(model, save_dir, arch, hidden_units, train_dataset):
"""
Save the checkpoint
Parameters:
model - the model to be saved
save_dir - the directory to save the checkpoint, e.g. 'checkpoints/'
arch - the model architecture
hidden_units - the size of each hidden layer
train_dataset - the dataset for training
"""
input_size = None
if arch == "vgg16":
input_size = vgg_input_size
elif arch == "densenet161":
input_size = densenet_input_size
checkpoint = {"arch": arch,
"input_size": input_size,
"hidden": [hidden_units],
"output_size": 102,
"state_dict": model.classifier.state_dict(),
"class_to_idx": train_dataset.class_to_idx}
torch.save(checkpoint, save_dir)
print("Finish saving checkpoint")
def load_checkpoint(checkpoint_path):
"""
Load the checkpoint
Parameters:
checkpoint_path - the path of the checkpoint file
Returns:
model - the model created from the checkpoint
"""
checkpoint = torch.load(checkpoint_path)
model = load_model(checkpoint['arch'])
lc_model = Detect_Flower_Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden'])
lc_model.load_state_dict(checkpoint['state_dict'])
model.classifier = lc_model
model.class_to_idx = checkpoint['class_to_idx']
return model
def predict(np_img, model, topk, gpu):
'''
Predict the class (or classes) of an image using a trained deep learning model.
Parameters:
np_img - the image in numpy array
model - the trained deep learning model
topk - the number of most likely classes returned
gpu - use gpu if True; otherwise use cpu
Returns:
probs - list of probabilities of classes returned
classes - list of classes returned
'''
img = torch.from_numpy(np_img)
with active_session():
model.eval()
if gpu:
model, img = model.to('cuda'), img.to('cuda')
with torch.no_grad():
img.unsqueeze_(0)
img = img.float()
output = model.forward(img)
ps = torch.exp(output)
probs, index = ps.topk(topk)
if gpu:
probs = probs.cpu()
index = index.cpu()
classes = list()
index_list = index.numpy()[0]
for i in range(topk):
idx = index_list[i]
for img_class, img_idx in model.class_to_idx.items():
if idx == img_idx:
classes.append(img_class)
break
return probs.numpy()[0], classes | true |
ab3fb8ae6911aab5bcc80e8bb52dd41c16562928 | Python | tsunglinneil/FlaskDemo | /Hello_Flask.py | UTF-8 | 843 | 3.359375 | 3 | [] | no_license | from flask import Flask, render_template
import Get_Data
app = Flask(__name__) # define app using flask
@app.route('/')
def test():
return 'It works'
@app.route('/ken')
def test2():
return 'KEN……'
@app.route('/ken/<string:var>')
def test3(var):
return 'KEN{}'.format(var)
@app.route('/hello')
def test4():
# render_template套件注意事項:
# render_template('檔案名稱') => render_template預設會去專案根目錄(以老師的範例來說,專案根目錄就是Flask這個資料夾名稱)
# 底下的templates資料夾內找你指定的檔案名稱 (以這個例子來說就是index.html)
# 1.將回傳的資料顯示於頁面中
customer1 = Get_Data.get_data()
return render_template('index.html', customer = customer1)
if __name__ == '__main__':
app.run(debug=True, port=8080) | true |
eabf1e7456746dc639478365ae02116e176bcb6f | Python | sdzx1985/Project | /Practice/RPA_Basic_Practice/1_Excel/11_Cell_Style.py | UTF-8 | 1,149 | 2.90625 | 3 | [] | no_license | from openpyxl.styles import Font, Border, Side, PatternFill, Alignment
from openpyxl import load_workbook
wb = load_workbook("sample.xlsx")
ws = wb.active
a1 = ws["A1"]
b1 = ws["B1"]
c1 = ws["C1"]
ws.column_dimensions["A"].width = 5
ws.row_dimensions[1].height = 50
a1.font = Font(color="FF0000", italic=True, bold=True)
b1.font = Font(color="CC33FF", name="Arial", strike=True)
c1.font = Font(color="0000FF", size=20, underline="single")
thin_border = Border(left=Side(style="thin"), right=Side(style="thin"), top=Side(style="thin"), bottom=Side(style="thin"))
a1.border = thin_border
b1.border = thin_border
c1.border = thin_border
# if there are over 90, color will be the green
for row in ws.rows:
for cell in row:
cell.alignment = Alignment(horizontal="center", vertical="center")
# center, left, right, top, bottom
if cell.column == 1: # except A column
continue
if isinstance(cell.value, int) and cell.value > 90:
cell.fill = PatternFill(fgColor="00FF00", fill_type="solid")
cell.font = Font(color="FF0000")
ws.freeze_panes = "B2"
wb.save("sample_style.xlsx") | true |
4efeca7534d495b7a7dfe6ce330e618a5d1a73e0 | Python | knts0/atcoder | /AOJ/0168_Kannondou.py | UTF-8 | 347 | 2.96875 | 3 | [] | no_license | import sys
import math
a = []
while True:
n = int(input())
if n == 0:
break
a.append(n)
for n in a:
dp = [0] * (n + 1)
dp[0] = 1
for i in range(n + 1):
if i > 0: dp[i] += dp[i - 1]
if i > 1: dp[i] += dp[i - 2]
if i > 2: dp[i] += dp[i - 3]
print(int(math.ceil(dp[n] / (365 * 10)))) | true |
fa68762a94244296640aa2850b6538963585edbd | Python | pkalkunte18/Mood-Diary | /moodAnalysis.py | UTF-8 | 2,550 | 3.5625 | 4 | [] | no_license | import pandas as pd #for data table stuff
import matplotlib.pyplot as plt #to make plots
from nltk.corpus import stopwords #cleaning up our words
from nltk.tokenize import word_tokenize
import datetime
from datetime import datetime
#import text file
file = open(r'C:\Users\saipr\Desktop\Coding\Python\Mood Diary\Sep to Oct 16.txt', 'r')
Lines = file.readlines()
Date = []
Mood = []
Level = []
Description = []
stops = set(stopwords.words('english')) #list of stopwords used
#for loop: for every line = mod 0 is mood, mod 1 is description, mod 3 is blank
count = -1
for l in Lines:
count +=1
if(count%3 == 0):
print(l)
line = l.strip().split("/") #split by slash
Date.append(line[0]) #append day
Mood.append(line[1]) #append mood
#mood level converseion
if(line[1].strip() == "stressed"):
Level.append(0)
elif(line[1].strip() == "sad"):
Level.append(1)
elif(line[1].strip() == "tired"):
Level.append(2)
elif(line[1].strip() == "ok"):
Level.append(3)
else:Level.append(4)
elif(count%3 == 1):
#description
tokens = word_tokenize(l) #turn descriptor into word tokens
filtered = [w for w in tokens if not w in stops] #filter out stop words
joinedSentence = ' '.join([str(e) for e in filtered]) #list to sentence
Description.append(joinedSentence) #add to new list
else: continue
#merge lists into a dataset named data
listOfLists = [Date, Mood, Level, Description]
data = pd.DataFrame(listOfLists).transpose()
data.columns = ['Date', 'Mood', 'Level', 'Description']
#make sure the columns look like normal (head)
print(data.head())
#make a bar chart of moods and export
data['Mood'].value_counts().plot(kind = 'bar')
plt.title('Frequency of Moods')
plt.xlabel('Mood')
plt.xticks(rotation=70)
plt.ylabel('Frequency')
plt.savefig('moodBar.png')
plt.show()
plt.clf()
#make a line graph of the moods and export
plt.plot(data['Date'], data['Level'])
plt.title('Mood Over Time')
plt.xlabel('Date')
plt.xticks(rotation=70)
plt.ylabel('Mood Level')
plt.savefig('moodLine.png')
plt.show()
plt.clf()
#merge into one text file and export
allText = " ".join(d for d in data['Description']) #merge all text to one bulk
text_file = open("moodText.txt", "w")
text_file.write(allText)
text_file.close()
#if you want to turn your text file into a word cloud:
#https://www.wordclouds.com/ | true |
4dd60ed744b9d09b53ec916a3dbb703bc1e2ceb6 | Python | henroth/favicon-finder | /favicon.py | UTF-8 | 3,028 | 3.015625 | 3 | [] | no_license | import database
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
def compose_link(url, favicon):
return urljoin(url, favicon)
def download_page(url, timeout=3.0):
page = requests.get(url, timeout=timeout)
return (page.url, page.status_code, page.text)
def find_favicon(page):
bs = BeautifulSoup(page, 'html.parser')
tag = bs.find(rel="shortcut icon")
return tag.get('href') if tag else None
def compose_favicon(url, raw_favicon_link):
if raw_favicon_link.startswith('http'):
return raw_favicon_link
return compose_link(url, raw_favicon_link)
def get_favicon(url):
try:
# TODO Will need to check status potentially if we want to
# decide to retry them at a future point
final_url, status, page = download_page(url)
raw_favicon = find_favicon(page)
# Some pages may not have a favicon
if not raw_favicon:
return None
# If the page is redirected and the favicon is relative then it
# will be relative to the final url, not the original
favicon = compose_favicon(final_url, raw_favicon)
return favicon
except Exception as e:
# TODO log the error better
print("Failed getting %s: %s" % (url, e))
return None
class FaviconService(object):
def __init__(self, database):
self.database = database
def get_favicon(self, url, fresh=False):
if fresh:
return self._find_and_store(url)
favicon = self.database.find(url)
if favicon:
return favicon
return self._find_and_store(url)
def _find_and_store(self, url):
favicon = get_favicon(url)
# TODO if the original url is redirect we may want to store both the orig
# and the redirect as having this favicon
self.database.insert(url, favicon)
return database.Favicon(url, favicon)
# These functions are mostly to facilitate the testing of parsing out the favicon link
# without needing to keep downloading the pages
def save_page(filename, page):
with open(filename, 'w') as f:
f.write(page)
def load_page(filename):
with open(filename, 'r') as f:
return f.read()
def download_and_save_page(url, filename):
_, _, page = download_page(url)
save_page(filename, page)
def display_favicon(url):
print('-----')
print("url: %s" % url)
print("fav: %s" % get_favicon(url))
if __name__ == "__main__":
# Save a couple pages locally for testing
#download_and_save_page("http://example.com", "example.html")
#download_and_save_page("http://news.ycombinator.com", "hackernews.html")
#download_and_save_page("http://yahoo.com", "yahoo.html")
#download_and_save_page("http://www.google.com", "google.html")
display_favicon("http://example.com")
display_favicon("http://news.ycombinator.com")
display_favicon("http://yahoo.com")
display_favicon("http://www.google.com")
| true |
c66f377d398282f15def53ae0df946dac76c1774 | Python | 4LittleBlips/MIT-CS-6.0002-Problem-Sets | /graph-theory.py | UTF-8 | 4,931 | 4.0625 | 4 | [] | no_license |
class Node(object):
'''
Node Object has name attribute
'''
def __init__(self, name):
self.name = name
def getName(self):
return self.name
def __str__(self):
return self.getName()
class Edge(object):
'''
Edge object used to link two nodes in one direction.
Links src attribute to dest attribute
'''
def __init__(self, src, dest):
self.src = src
self.dest = dest
def getSrc(self):
return self.src
def getDest(self):
return self.dest
def __str__(self):
edge = "{0} -> {1}".format(self.getSrc(), self.getDest())
return edge
class Digraph(object):
'''
Nodes are stored as keys in dictionary
Linked nodes are values in list associated with keys
'''
def __init__(self):
self.edges = {}
def addNode(self, node):
self.edges[node] = []
def makeEdge(self, edge):
try:
assert edge.getSrc() in self.edges, 'Source Node does not exist.'
assert edge.getDest() in self.edges, 'Destination Node does not exist.'
assert edge.getDest() not in self.edges[edge.getSrc()], 'Edge already exists'
except AssertionError:
return self.edges
self.edges[edge.getSrc()].append(edge.getDest())
return self.edges
def getNeighbors(self, node):
return self.edges[node]
def __str__(self):
result = ''
for src in self.edges:
for dest in self.edges[src]:
result += "{0} -> {1}".format(src, dest) + '\n'
return result[:-1]
def printPath(path):
result = ''
for node in path:
result += node.getName() + '->'
print(result[:-2])
def getAncestry(node, parents):
if parents[node] == None:
return [node]
else:
ancestry = getAncestry(parents[node], parents)
return ancestry + [node]
class Graph(Digraph):
def makeEdge(self, edge):
Digraph.makeEdge(self, edge)
reverse = Edge(edge.getDest(), edge.getSrc())
Digraph.makeEdge(self, reverse)
return self.edges
#Nodes
s = Node('S')
a = Node('A')
z = Node('Z')
b = Node('B')
c = Node('C')
d = Node('D')
nodes = [s, z, a, b, c, d]
#Edges
e1 = Edge(s, a)
e2 = Edge(a, z)
e3 = Edge(s, b)
e4 = Edge(b, c)
e5 = Edge(b ,d)
e6 = Edge(c, d)
edges = [e1, e2, e3, e4, e5, e6]
#Graph
g = Graph()
for node in nodes:
g.addNode(node)
for edge in edges:
g.makeEdge(edge)
print(g)
def BFS_1(graph, start, end):
levels = {start: 0}
parents = {start: None}
i = 1
frontier = [start]
while frontier != []:
next_frontier = []
for node in frontier:
for child in graph.getNeighbors(node):
if not child in levels:
print("Visiting {}".format(child))
levels[child] = i
parents[child] = node
next_frontier.append(child)
else:
print("Already visited {}".format(child))
if child.getName() == end.getName():
path = getAncestry(child, parents)
return path
i += 1
frontier = next_frontier
print("Breadth-First Search S -> Z(Ancestry Implementation): \n")
printPath(BFS_1(g, s, z))
def BFS_2(graph, start, end):
pathQueue = [[start]]
while pathQueue:
lastPath = pathQueue.pop(0) #returns and removes first path in Queue
lastNode = lastPath[-1] #last node in the path
print(f"Visiting {lastNode}")
if lastNode == end:
return lastPath
for child in graph.getNeighbors(lastNode):
if not child in lastPath:
newPath = lastPath + [child]
pathQueue.append(newPath)
else:
print(f"Already Visited {child}")
return None
print("Breadth-First Search S -> C (Qeueue Implementation): \n")
printPath(BFS_2(g, s, c))
#Depth First Search
def DFS(graph, start, end, path=[], shortest=None):
path = path + [start]
if start == end:
return path
print("Current Path: ", end="")
printPath(path)
for child in graph.getNeighbors(start):
if child not in path:
if shortest == None or len(path) < len(shortest):
newPath = DFS(graph, child, end, path, shortest)
if newPath != None:
shortest = newPath
elif newPath == None and shortest != None:
shortest = shortest[:-1] # Removes last node in path if that node leads to an impass
return shortest
print("Depth-First Search (S -> D): \n")
printPath(DFS(g, s, d))
print("Depth-First Search (Z -> B): \n")
printPath(DFS(g, z, b))
| true |
950369b2cab141784d115a9dfa69bb9aae904724 | Python | karansthr/data-structures-and-algorithms | /algorithms/sorting/mergesort.py | UTF-8 | 1,269 | 3.9375 | 4 | [] | no_license | def merge(arr, start, middle, end):
# merge two array
# first : arr [ start to middle ]
# second : arr [ middle + 1 to end ]
A = arr[start:middle + 1]
B = arr[middle + 1:end + 1]
i = j = 0
for k in range(start, end + 1):
if i <= middle-start and j <= end-middle-1:
if A[i] <= B[j]:
arr[k] = A[i]
i = i + 1
else:
arr[k] = B[j]
j = j + 1
else:
if i > middle:
arr[k] = B[j]
j = j + 1
else:
arr[k] = A[i]
i = i + 1
return arr
def mergesort(arr, start, end):
if start >= end:
# in this case the sublist has atmost one element
# so return it is already sorted. Just return it without doing anything
return arr
middle = (start + end) // 2
# recursively called mergesort on two halves of list/sublist
arr = mergesort(arr, start, middle)
arr = mergesort(arr, middle + 1, end)
return merge(arr, start, middle, end)
if __name__ == '__main__':
arr = list(map(int, input('Enter space seperated numbers').split()))
arr = mergesort(arr, 0, len(arr) - 1)
print('After sorting')
print(*arr)
| true |
481f255e4615c9f72881bb6ae5a189d54720da2d | Python | rjimeno/PracticePython | /e22.py | UTF-8 | 260 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
names = dict()
f = open('names.txt', 'r')
line = f.readline()
while line:
name = line.strip()
if name in names.keys():
pass
else:
names[name] = 0
names[name] += 1
line = f.readline()
print(names)
| true |
d2708612bf1bb9b40bfe0446057447f14b8b6f47 | Python | lalitha1201/Leetcode | /Random_questions/Google_CountCompleteTreeNodes.py | UTF-8 | 720 | 3.09375 | 3 | [] | no_license | https://leetcode.com/explore/featured/card/google/61/trees-and-graphs/3071/
class Solution:
def countNodes(self, root: TreeNode) -> int:
if not root:
return 0
left_depth = self.depth(root.left)
right_depth = self.depth(root.right)
if left_depth == right_depth:
return pow(2,left_depth) + self.countNodes(root.right)
else:
return pow(2,right_depth) + self.countNodes(root.left)
def depth(self,root):
if not root:
return 0
return 1 + self.depth(root.left)
| true |
c71f428ac07286887572193377896fede901b077 | Python | universuen/NLP | /exp_3/utilities.py | UTF-8 | 3,498 | 2.859375 | 3 | [] | no_license | import torch
import json
def load_data(file_path):
result = list()
data = json.load(open(file_path, 'r', encoding='utf-8'))
for i in data:
temp_result = list()
tokens = i['tokens']
temp_result.append(tokens)
temp_label = ['O'] * len(tokens)
entities = i['entities']
for e in entities:
e_type = e['type']
e_start = e['start']
e_end = e['end']
temp_label[e_start] = 'B-' + e_type
temp_label[e_start + 1: e_end] = ['I-' + e_type] * (e_end - e_start - 1)
temp_result.append(temp_label)
result.append(temp_result)
return result
def preprocess(text, to_ix):
indexes = [to_ix[i] for i in text]
return torch.tensor([indexes], dtype=torch.long)
class Evaluator:
def __init__(self, model, testing_data, word_to_ix, tag_to_ix):
self.model = model
self.testing_data = testing_data
self.word_to_ix = word_to_ix
self.tag_to_ix = tag_to_ix
self.accuracy = 0
self.precision = 0
self.recall = 0
self.f1 = 0
def evaluate(self):
with torch.no_grad():
label_score = [
{
'tp': 0,
'fp': 0,
'tn': 0,
'fn': 0,
}
for _ in range(len(self.tag_to_ix))
]
for sentence, labels in self.testing_data:
inputs = preprocess(sentence, self.word_to_ix)
targets = torch.squeeze(preprocess(labels, self.tag_to_ix), 0)
prediction = torch.squeeze(self.model(inputs), 0)
# calculate tp, fp, tn and fn in each kind of label
for pred, tag in zip(prediction, targets):
if pred == tag:
for i, _ in enumerate(label_score):
if i == tag:
label_score[i]['tp'] += 1
else:
label_score[i]['tn'] += 1
else:
for i, _ in enumerate(label_score):
if i == pred:
label_score[i]['fp'] += 1
elif i == tag:
label_score[i]['fn'] += 1
else:
label_score[i]['tn'] += 1
# sum all kinds of labels' accuracy, precision, recall, f1 as the final score
total_a = 0
total_p = 0
total_r = 0
total_f1 = 0
cnt = 0
for i, _ in enumerate(label_score):
tp = label_score[i]['tp']
fp = label_score[i]['fp']
tn = label_score[i]['tn']
fn = label_score[i]['fn']
tmp_a = (tp + tn) / (tp + tn + fp + fn) if tp + tn + fp + fn else 0
tmp_p = tp / (tp + fp) if tp + fp else 0
tmp_r = tp / (tp + fn) if tp + fn else 0
tmp_f1 = 2 * tmp_p * tmp_r / (tmp_p + tmp_r) if tmp_p + tmp_r else 0
total_a += tmp_a
total_p += tmp_p
total_r += tmp_r
total_f1 += tmp_f1
cnt += 1
self.accuracy = total_a / cnt
self.precision = total_p / cnt
self.recall = total_r / cnt
self.f1 = total_f1 / cnt
| true |
bb17de822f5c95911d8351e1f9d3d0771104b9ba | Python | Deepak9292J/Selenium_Python_Advanced | /venv/Take_Screenshots/generic_method.py | UTF-8 | 876 | 2.671875 | 3 | [] | no_license | from selenium import webdriver
import time
class screenshot():
def base_method(self):
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.implicitly_wait(10)
driver.get("https://sso.teachable.com/secure/42299/users/sign_in?clean_login=true&reset_purchase_session=1")
driver.find_element_by_xpath("//input[@value='Log In']")
self.reusable_method(driver)
def reusable_method(self, driver):
filename = str(round(time.time() * 1000)) + ".png"
directoryName = "C:\\Users\\c-deepak.jindal\\Capture"
DestinationFileName = directoryName + filename
try:
driver.save_screenshot(DestinationFileName)
print("Screenshot taken")
except NotADirectoryError:
print("Directory not found")
ff = screenshot()
ff.base_method() | true |
5451e4256b092f92887713321bef01345e41f5e0 | Python | ysuurme/game_snakebattle | /snakebattle/snack.py | UTF-8 | 260 | 2.515625 | 3 | [] | no_license | from .config import COLORS, SNACK
class Snack:
def __init__(self, x, y, color=COLORS['WHITE']):
self.x = x # x position in the game 'grid'
self.y = y # y position in the game 'grid'
self.color = color
self.image = SNACK
| true |
b8b80358af4f8ff76aabc2c1eeb9a125e535a9b1 | Python | 8ptk4/oopython_old | /kmom02/ovning1/app.py | UTF-8 | 545 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
"""
My first Flask app
"""
# Importera relevanta moduler
from flask import Flask, render_template, request
import handler
app = Flask(__name__)
@app.route("/")
def main():
""" Main route """
return render_template("index.html")
@app.route("/company", methods=["POST", "GET"])
def company():
""" Company route"""
if request.method == "POST":
handler.add_employee(request.form)
return render_template("company.html", persons=handler.get_persons())
if __name__ == "__main__":
app.run()
| true |
890adf548ad826c86069a8e9425ca0ac22f55d21 | Python | spaceqorgi/ite-428-python | /04-file/ExtraPractice.py | UTF-8 | 2,172 | 3.46875 | 3 | [] | no_license | import csv
n = int(input('Enter Number of New Product : '))
# Write to csv
print("Adding some products to .csv file.")
data = []
for i in range(n):
print("Product number [{}]".format(i+1))
print("====================================")
name = input('Enter product name : ')
price = input('Enter prodcut price : ')
stock = input('Enter product stock : ')
print('')
d = [name, price, stock]
data.append(d)
with open('products.csv', 'w') as f:
fw = csv.writer(f,delimiter=",")
fw.writerows(data)
# Read from csv
print("Reading from csv. Configuration using .ini file is supported.")
def read_file(fname, splitter):
# read file and return a list of list that contain strings
with open(fname, 'r') as f:
return [row.split(splitter) for row in f.read().splitlines()]
def create_config_dict(config):
# create a dictionary from the config list
config_d = {}
for row in config:
config_d[row[0]] = row[1]
return config_d
def calculate_total(d):
nt = 0
for r in d:
total = int(r[1]) + int(r[2])
r.append(total)
nt = nt + total
print(r)
return d, nt
def format_result(d, nt, config):
# convert the list of list to suitable format for writing
# use configuration files to change output style
if config['comma'] == 'yes':
config['comma'] = ','
else:
config['comma'] = ''
return '\n'.join([('{:<20} {:>20' + config['comma']
+ '.'
+ config['decimal_places'] + 'f}')
.format(dp[0], dp[3]) for dp in d]) \
+ '\n{}'.format(config['line'] * 50) \
+ ('\n{:<20} {:>20' + config['comma']
+ '.' + config['decimal_places']
+ 'f} {}').format(
'Total Value', nt,
config['currency_unit'])
def read_csv(fname):
with open(fname, 'r') as f:
fr = csv.reader(f)
return list(fr)
# Main program
result, net_total = calculate_total(read_csv('products.csv'))
config_file = create_config_dict(read_file('appConfig.ini', '='))
print(format_result(result, net_total, config_file))
| true |
ea3708a2efb7900198b66a3ade6401ee5c578f6d | Python | Jacks27/Coop-hse | /app/__init__.py | UTF-8 | 2,720 | 2.515625 | 3 | [] | no_license | "app/__init__.py"
import os
from flask import Flask, url_for, request
from app.v1.db_setup import SetUpDb
from instance.config import configs, Config
from flask_mail import Mail, Message
from app.v1 import my_v1
from app.v1.models.auth_login import UserLogin
from app.v1.models.auth_model import UsersModel
from itsdangerous import URLSafeTimedSerializer
from app.v1.views.auth import hash_password
from flask_uploads import UploadSet, configure_uploads, IMAGES
def create_app(config="development"):
app=Flask(__name__)
app.config.from_object(configs[config])
db = SetUpDb(config)
with app.app_context():
db.create_tables()
create_default_admin()
app.register_blueprint(my_v1, url_prefix='/app/v1')
app.secret_key= Config.SECRET_KEY
app.config['UPLOADED_IMAGES_DEST']=os.path.join('./images')
images = UploadSet('images', IMAGES)
configure_uploads(app, images)
return app
def send_email(email_dict):
"""this method is used to send email in all class and functions
Arguments:
email_dict [{ dictionary with email message ,route}]
"""
s = URLSafeTimedSerializer(Config.SECRET_KEY)
token=s.dumps(email_dict['email'], salt='confirm_email')
app = create_app()
mail=Mail(app)
msg=Message('Hey ,{}'.format(email_dict['msg']), sender= Config.MAIL_USERNAME, recipients=[email_dict['email']])
link = url_for('my_v1.{}'.format(email_dict['route']), token=token, email= email_dict['email'], _external= True)
msg.body = "Click this link {}, please ignore you did not request this service".format(link)
mail.send(msg)
def create_default_admin():
""" Create a default admin for the app"""
firstname = os.getenv('ADMIN_FIRST_NAME')
lastname = os.getenv('ADMIN_LAST_NAME')
othername = os.getenv('ADMIN_OTHERNAME')
email = os.getenv('ADMIN_EMAIL')
phonenumber = os.getenv('ADMIN_PHONENUMBER')
psnumber = os.getenv('ADMIN_PSNUMBER')
password = os.getenv('ADMIN_PASSWORD')
lm=UserLogin()
UM=UsersModel(firstname, lastname, othername,\
email, phonenumber, psnumber, password)
lm.where(dict(email=email))
if lm.get() is None and lm.id is None:
hashedpass= hash_password(UM.password)
UM.insert_data(UM.firstname, UM.lastname, UM.othername,\
UM.email, UM.phonenumber,UM.psnumber , hashedpass, True)
def upload_image():
app = create_app()
images = UploadSet('images', IMAGES)
configure_uploads(app, images)
if request.method=='POST' and "image" in request.files:
filename=images.save(request.files['image'])
url_path=images.url(filename)
return url_path
return False | true |
03b2372f67dac64c3713cba03e75b91b5c82ecd8 | Python | cristian-mercadante/grouppy | /app/trip/forms.py | UTF-8 | 1,326 | 2.625 | 3 | [] | no_license | # -*- coding: utf8 -*-
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, FloatField, DateField, SubmitField
from wtforms.validators import InputRequired, Length
def add_trip_form(friends, **kwargs):
class AddTripForm(FlaskForm):
titolo = StringField('Titolo', validators=[
InputRequired(), Length(max=50)])
data = DateField('Data', validators=[InputRequired()])
partenza = StringField('Partenza', validators=[
InputRequired(), Length(max=50)])
destinazione = StringField('Destinazione', validators=[
InputRequired(), Length(max=50)])
distanza = FloatField('Distanza (km)', validators=[InputRequired()])
ritorno = BooleanField('Andata e ritorno?')
pagato = BooleanField(u'L\'autista è stato pagato?')
speciale = BooleanField(u'Era un\'occasione speciale?')
submit = SubmitField('Conferma')
for f in friends:
label_auto = 'auto' + str(f.key.id())
label_pass = 'pass' + str(f.key.id())
nomecogn = f.nome + ' ' + f.cognome
field_auto = BooleanField(nomecogn)
field_pass = BooleanField(nomecogn)
setattr(AddTripForm, label_auto, field_auto)
setattr(AddTripForm, label_pass, field_pass)
return AddTripForm(**kwargs)
| true |
7df3d1835ee8b9f69045c6c5ddaed6fe5a8f8ff0 | Python | wfbradley/snpko | /check_input.py | UTF-8 | 5,995 | 2.6875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/usr/bin/env python
import numpy as np
import os
import pandas as pd
import utils_snpko as utils
import re
logger = utils.logger
def check_and_convert_input(args):
'''
Check that the input file exists and is well-formed. On failure,
raise exception. On failure, returns data frame with SNP data.
'''
if args.input_file is None:
logger.info('Must specify "--input_file" argument.')
raise Exception
assert not(args.data_prefix.startswith('rs'))
assert not(args.data_prefix == 'r')
assert (args.na_threshold) <= 1.0
logger.info("####################################")
logger.info('Validating original input file %s, sanitizing, and rewriting.' %
args.input_file)
sanitized_outfile = os.path.join(args.working_dir, 'cleaned_input.csv')
if not os.path.exists(args.input_file):
logger.error("Failure to find input file %s" % (args.input_file))
raise OSError
if args.skip_rows is not None:
args.skip_rows = [int(x) for x in args.skip_rows.split(',')]
if (args.input_file).lower().endswith('xlsx'):
df = pd.read_excel(args.input_file, skiprows=args.skip_rows)
else:
df = pd.read_csv(args.input_file, skiprows=args.skip_rows)
# SNPs are columns of the form "rs12345*" where the "*" can be any
# other stuff (that doesn't begin with a digit, of course).
#
# In particular, it may be "rs12345_G", where "G" is the wild type; if
# so, we record that information, then normalize the column name.
#
# If value looks like e.g. "GT", we normalize to "G|T" to be consistent
# with ENSEMBL format.
#
# Input may have base "X" indicating "non-ancestral"; if so, we
# pass that along.
# Truncate column names if necessary to "rs12345".
p = re.compile(r'^rs\d+')
SNP_columns = {}
for c in df.columns:
x = p.match(c)
if x is not None:
SNP_columns[c] = c[:x.end()]
# If "rs12345_G" format, then extract wild-type info.
p2 = re.compile(r'^rs\d+_[ACGT]$')
all_rs_fields_match = True
for c in df.columns:
if p.match(c):
if not(p2.match(c)):
all_rs_fields_match = False
break
if all_rs_fields_match:
SNP_list_1 = []
wild_type_list_1 = []
logger.info('All SNP fields of form "rs12345_G"; extracting wild-type.')
for c in df.columns:
if p2.match(c):
(SNP, wild_type) = c.split('_')
SNP_list_1.append(SNP)
wild_type_list_1.append(wild_type)
df_wild = pd.DataFrame(
{'SNP': SNP_list_1, 'wild_type': wild_type_list_1})
df_wild.to_csv(os.path.join(args.working_dir,
'wild_types.csv'), index=False)
df.rename(columns=SNP_columns, inplace=True)
SNP_columns = SNP_columns.values()
# Convert "GT" to "G|T" (to be consistent with ENSEMBL formatting)
p3 = re.compile(r'^[ACGTX]{2}$')
p4 = re.compile(r'^[ACGTX]|[ACGTX]$')
for SNP in SNP_columns:
for i in xrange(len(df)):
if pd.notnull(df[SNP].values[i]):
geno = df[SNP].values[i].upper()
if (not p3.match(geno)) and (not p4.match(geno)):
logger.info(
'[col=%s, row=%d] Expect genotype like "GT" or "G|T" but got %s' % (
SNP, i, geno))
raise Exception
if len(geno) == 2:
geno = geno[0] + '|' + geno[1]
df[SNP].values[i] = geno
data_columns = [f for f in df.columns if f.startswith(args.data_prefix)]
if len(SNP_columns) == 0:
logger.error(
'SNP columns must be of form "rs12345"; no such columns found.')
raise Exception
if len(data_columns) == 0:
logger.error(
'Some data columns must begin with "%s" (set with --data_prefix); '
'none found.' % args.data_prefix)
raise Exception
relevant_columns = SNP_columns + data_columns
df = df[relevant_columns]
# Drop columns with too many N/As
threshold = int((args.na_threshold) * len(df))
drop_col = []
for f in relevant_columns:
if df[f].isnull().sum() > threshold:
drop_col.append(f)
df.drop(columns=drop_col, inplace=True)
logger.info('Dropping %d columns because of N/As:' % (len(drop_col)))
logger.info(drop_col)
# Drop rows with too many N/As
threshold = int((args.na_threshold) * len(relevant_columns))
drop_row = []
for i in xrange(len(df)):
if df.iloc[i].isnull().sum() > threshold:
drop_row.append(i)
df.drop(labels=drop_row, inplace=True)
logger.info('Dropping %d rows because of N/As:' % (len(drop_row)))
logger.info(drop_row)
# Confirm that data columns have binary (0/1) data
for f in data_columns:
if np.sum(df[f].values == 0) + np.sum(df[f].values == 1) != len(df):
logger.error('Field %s has non-binary data!' % f)
raise Exception
if len(df.columns) == 0:
logger.error('After dropping N/A columns, no SNPs are left!.')
raise Exception
if len(df) == 0:
logger.error('After dropping N/A rows, no patients are left!.')
raise Exception
num_na = df.isnull().sum(axis=1).sum()
logger.info("Detected %d N/A entries in input data" % num_na)
if num_na > 0 and args.never_na:
logger.error('Failing because of %d N/A values!' % num_na)
raise Exception
df.to_csv(sanitized_outfile, index=False)
logger.info('Detected %d SNPs and %d patients.' %
(len(df.columns), len(df)))
logger.info('Sanitized data rewritten as %s' % sanitized_outfile)
return
if __name__ == '__main__':
args = utils.parse_arguments()
utils.safe_mkdir(args.working_dir)
utils.initialize_logger(args)
check_and_convert_input(args)
| true |
f7507c64c030e72712e35d82be0131a573ce1f28 | Python | Bodhinaut/Gateway_Blend | /api_interaction/api_interaction_two_another_way.py | UTF-8 | 598 | 3.265625 | 3 | [] | no_license | # importing the requests library
import requests
# api-endpoint
URL = 'https://jsonplaceholder.typicode.com/todos'
# sending get request and saving the response as response object
r = requests.get(url = URL)
# extracting data in json format
data = r.json()
# Delete a TODO from the list of dictionaries
del data[0]
# Create and POST the old TODO's with one new TODO on the end,
r = requests.post(url = URL, data = data.append({'test' : 'test', 'userId': 201, 'id': 201, 'title': 'zen in every breath', 'completed': True}))
# print TODOs
for item in data:
print ("TODO: " + str(item) )
| true |
67b01fbceeb09e72144076e7801406407a35535f | Python | sombrazorro/my_python_training | /ex04/ex04.py | UTF-8 | 329 | 3.09375 | 3 | [] | no_license | import sys
cmds = sys.argv
print(cmds)
if len(cmds) <= 1:
print("Wrong command syntax. Please see the help.")
else:
print(cmds)
if sys.argv[1] in ("-h", "--help"):
print("Usage: {0} -abc -o1 arg1 -o2 arg2 -o3 -arg3 input").format(sys.argv[0])
elif len(cmds) < 5:
print("There should be at least four arguments")
| true |
0ad6bf8b46af9dc9af1f9a8f7c9b86a4fc918afd | Python | synw/django-jobrunner | /jobrunner/scripts/testjob1.py | UTF-8 | 178 | 2.59375 | 3 | [
"MIT"
] | permissive | import time
from term.commands import rprint
def run():
i = 0
while i < 70:
msg = " Job 1: " + str(i)
rprint(msg)
time.sleep(0.6)
i += 1 | true |
8bb772cd9b2433954591cb9d50b03804202197e4 | Python | ritua2/gib | /slurm-test/json_parser.py | UTF-8 | 398 | 3.34375 | 3 | [] | no_license | #!/usr/bin/python3.6
"""
BASICS
Returns the value for a provided json key in a file
Args:
json_parser.py FILE.json KEY -> VALUE
"""
import json
import sys
if len(sys.argv) != 3:
print("Incorrect Number of Arguments")
sys.exit()
try:
with open(sys.argv[1], 'r') as jfil:
J = json.load(jfil)
print(J[sys.argv[2]])
except:
print("File or key do not exist")
| true |
6562f776926e082c71885b2ce0a499d8840f2c65 | Python | luckyIITR/simple_backtest | /hourly_converter.py | UTF-8 | 915 | 3.015625 | 3 | [] | no_license | from datetime import time
# note data should be today only
import pandas as pd
def convert_data(df):
times = [(time(9, 15), time(10, 15)), (time(10, 15), time(11, 15)), (time(11, 15), time(12, 15)),
(time(12, 15), time(13, 15)), (time(14, 15), time(15, 15))]
data = pd.DataFrame()
for tup in times:
try:
dff = df[df.index.time < tup[1]]
dff = dff[dff.index.time >= tup[0]]
data = pd.concat([data, pd.DataFrame({
'Time': [dff.index[0]],
'Open': [dff['Open'][0]],
'High': [dff['High'].max()],
'Low': [dff['Low'].min()],
'Close': [dff['Close'][-1]],
'Volume': [dff['Volume'].sum()]
})])
except IndexError:
break
data.set_index('Time',inplace=True)
return data
if __name__ == "__main__":
print("Hello")
| true |
0299d8bfeffde89bd26972f745b4539d3e040c19 | Python | gimijuny/RPS-with-AI | /RPSQLearning/submitCard.py | UTF-8 | 1,190 | 2.6875 | 3 | [] | no_license | from RPSTrain import RPSEnvironment, X, W1, b1, input_layer, W2, b2, hidden_layer, W3, b3, output_layer, Y, cost, optimizer
import tensorflow as tf
import random
import sys
#------------------------------------------------------------
# 변수 설정
#------------------------------------------------------------
RPS_PLAYER1 = 1
RPS_PLAYER2 = 2
rules = {0: 1, 1: 2, 2: 0}
playerId = sys.argv[1]
card = int(sys.argv[2])
score = sys.argv[3]
# 환경 인스턴스 생성
env = RPSEnvironment()
env.reset()
# 텐서플로우 테스트
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
a = tf.constant(10)
b = tf.constant(32)
print(sess.run(a+b))
# 텐서플로우 초기화
sess = tf.Session()
sess.run(tf.global_variables_initializer())
currentPlayer = RPS_PLAYER1
if currentPlayer == RPS_PLAYER1:
currentState = env.getState()
else:
currentState = env.getStateInverse()
if random.randint(0, 1) <= 0.4:
action = env.getActionRandom()
else:
action = env.getAction(sess, currentState)
env.act(currentPlayer, action)
env.act(RPS_PLAYER2, card)
print("playerId: ", playerId)
print("card: ", card)
print("score: ", score)
print("action: ", action) | true |
6d4792dce147c0ca043986a3e6261488430f840a | Python | matthewwardrop/formulaic | /formulaic/parser/utils.py | UTF-8 | 8,698 | 3 | 3 | [
"MIT"
] | permissive | import re
from typing import Iterable, Optional, Sequence, Set, Tuple, Type, Union
from formulaic.errors import FormulaSyntaxError
from .types.ast_node import ASTNode
from .types.token import Token
# Exception handling
def exc_for_token(
token: Union[Token, ASTNode],
message: str,
errcls: Type[Exception] = FormulaSyntaxError,
) -> Exception:
"""
Return an exception ready to be raised with a helpful token/source context.
Args:
token: The `Token` or `ASTNode` instance about which an exception should
be raised.
message: The message to be included in the exception.
errcls: The type of the exception to be returned.
"""
token = __get_token_for_ast(token)
token_context = token.get_source_context(colorize=True)
if token_context:
return errcls(f"{message}\n\n{token_context}")
return errcls(message)
def exc_for_missing_operator(
lhs: Union[Token, ASTNode],
rhs: Union[Token, ASTNode],
errcls: Type[Exception] = FormulaSyntaxError,
) -> Exception:
"""
Return an exception ready to be raised about a missing operator token
between the `lhs` and `rhs` tokens/ast-nodes.
Args:
lhs: The `Token` or `ASTNode` instance to the left of where an operator
should be placed.
rhs: The `Token` or `ASTNode` instance to the right of where an operator
should be placed.
errcls: The type of the exception to be returned.
"""
lhs_token, rhs_token, error_token = __get_tokens_for_gap(lhs, rhs)
return exc_for_token(
error_token,
f"Missing operator between `{lhs_token.token}` and `{rhs_token.token}`.",
errcls=errcls,
)
def __get_token_for_ast(ast: Union[Token, ASTNode]) -> Token: # pragma: no cover
"""
Ensure that incoming `ast` is a `Token`, or else generate one for debugging
purposes (note that this token will not be valid `Token` for use other than
in reporting errors).
"""
if isinstance(ast, Token):
return ast
lhs_token = ast
while isinstance(lhs_token, ASTNode):
lhs_token = lhs_token.args[0] # type: ignore
rhs_token = ast
while isinstance(rhs_token, ASTNode):
rhs_token = rhs_token.args[-1] # type: ignore
return Token(
token=lhs_token.source[lhs_token.source_start : rhs_token.source_end + 1]
if lhs_token.source
else "",
source=lhs_token.source,
source_start=lhs_token.source_start,
source_end=rhs_token.source_end,
)
def __get_tokens_for_gap(
lhs: Union[Token, ASTNode], rhs: Union[Token, ASTNode]
) -> Tuple[Token, Token, Token]:
"""
Ensure that incoming `lhs` and `rhs` objects are `Token`s, or else generate
some for debugging purposes (note that these tokens will not be valid
`Token`s for use other than in reporting errors). Three tokens will be
returned: the left-hand side token, the right-hand-side token, and the
"middle" token where a new operator/token should be inserted (may not
be empty depending on context).
"""
lhs_token = lhs
while isinstance(lhs_token, ASTNode):
lhs_token = lhs_token.args[-1] # type: ignore
rhs_token = rhs or lhs
while isinstance(rhs_token, ASTNode):
rhs_token = rhs_token.args[0] # type: ignore
return (
lhs_token,
rhs_token,
Token(
lhs_token.source[lhs_token.source_start : rhs_token.source_end + 1]
if lhs_token.source
and lhs_token.source_start is not None
and rhs_token.source_end is not None
else "",
source=lhs_token.source,
source_start=lhs_token.source_start,
source_end=rhs_token.source_end,
),
)
# Token sequence mutations
def replace_tokens(
tokens: Iterable[Token],
token_to_replace: str,
replacement: Union[Token, Sequence[Token]],
*,
kind: Optional[Token.Kind] = None,
) -> Iterable[Token]:
"""
Replace any token in the `tokens` sequence with one or more replacement
tokens.
Args:
tokens: The sequence of tokens within which tokens should be replaced.
token_to_replace: The string representation of the token to replace.
replacement: The replacement token(s) to insert into the `tokens`
sequence.
kind: The type of tokens to be replaced. If not specified, all
tokens which match the provided `token_to_match` string will be
replaced.
"""
for token in tokens:
if kind and token.kind is not kind or token.token != token_to_replace:
yield token
else:
if isinstance(replacement, Token):
yield replacement
else:
yield from replacement
def insert_tokens_after(
tokens: Iterable[Token],
pattern: Union[str, re.Pattern],
tokens_to_add: Sequence[Token],
*,
kind: Optional[Token.Kind] = None,
join_operator: Optional[str] = None,
) -> Iterable[Token]:
"""
Insert additional tokens into a sequence of tokens after (within token)
pattern matches.
Note: this insertion can happen in the *middle* of existing tokens, which is
especially useful when inserting tokens around multiple operators (which are
often merged together into a single token). If you want to avoid this, make
sure your regex `pattern` includes start and end matchers; e.g.
`^<pattern>$`.
Args:
tokens: The sequence of tokens within which tokens should be replaced.
pattern: A (potentially compiled) regex expression indicating where
tokens should be inserted.
tokens_to_add: A sequence of tokens to be inserted wherever `pattern`
matches.
kind: The type of tokens to be considered for insertion. If not
specified, any matching token (part) will result in insertions.
join_operator: If the insertion of tokens would result the joining of
the added tokens with existing tokens, the value set here will be
used to create a joining operator token. If not provided, not
additional operators are added.
"""
tokens = list(tokens)
if not isinstance(pattern, re.Pattern):
pattern = re.compile(pattern)
for i, token in enumerate(tokens):
if (
kind is not None
and token.kind is not kind
or not pattern.search(token.token)
):
yield token
continue
split_tokens = list(token.split(pattern, after=True))
for j, split_token in enumerate(split_tokens):
yield split_token
m = pattern.search(split_token.token)
if m and m.span()[1] == len(split_token.token):
yield from tokens_to_add
if join_operator:
next_token = None
if j < len(split_tokens) - 1:
next_token = split_tokens[j + 1]
elif i < len(tokens) - 1:
next_token = tokens[i + 1]
if (
next_token is not None
and next_token.kind is not Token.Kind.OPERATOR
):
yield Token(join_operator, kind=Token.Kind.OPERATOR)
def merge_operator_tokens(
tokens: Iterable[Token], symbols: Optional[Set[str]] = None
) -> Iterable[Token]:
"""
Merge operator tokens within a sequence of tokens.
This is useful if you have added operator tokens after tokenization, in
order to allow operator resolution of (e.g.) adjacent `+` and `-` operators.
Args:
tokens: The sequence of tokens within which tokens should be replaced.
symbols: If specified, only adjacent operator symbols appearing within
this set will be merged.
"""
pooled_token = None
for token in tokens:
if (
token.kind is not Token.Kind.OPERATOR
or symbols
and token.token[0] not in symbols
):
if pooled_token:
yield pooled_token
pooled_token = None
yield token
continue
# `token` is an operator that can be collapsed on the left
if pooled_token:
pooled_token = token.copy_with_attrs(token=pooled_token.token + token.token)
if symbols and not pooled_token.token[-1] in symbols:
yield pooled_token
pooled_token = None
continue
pooled_token = token
if pooled_token:
yield pooled_token
| true |
f2edc8d217a72c4c7227c06ef3ec20634c818657 | Python | xatshepsut/python-samples | /music-db/music_db_examples.py | UTF-8 | 2,961 | 2.625 | 3 | [] | no_license | import datetime
import sqlalchemy as sql
from sqlalchemy import Column, \
String, Integer, Date, \
ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
engine = sql.create_engine("sqlite:///music.db", echo=True)
Base = declarative_base()
class Artist(Base):
__tablename__ = "artists"
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
class Album(Base):
__tablename__ = "albums"
id = Column(Integer, primary_key=True)
title = Column(String)
genre = Column(String)
release_date = Column(Date)
tracks_number = Column(Integer)
artist_id = Column(Integer, ForeignKey("artists.id"))
artist = relationship("Artist", backref=backref("albums", order_by=id))
def __init__(self, title, genre, release_date, tracks_number):
self.title = title
self.genre = genre
self.release_date = release_date
self.tracks_number = tracks_number
Base.metadata.create_all(engine)
# Session = sqlalchemy.orm.sessionmaker(bind=engine)
# session = Session()
#
# # Create an artist
# new_artist = Artist("Newsboys")
# new_artist.albums = [Album("Read All About It", "unknown", datetime.date(1988, 12, 01), 12)]
#
# more_albums = [Album("Hell Is for Wimps", "unknown", datetime.date(1990, 07, 31), 12),
# Album("Love Liberty Disco", "unknown", datetime.date(1999, 11, 16), 12),
# Album("Thrive", "unknown", datetime.date(2002, 03, 26), 12)]
# new_artist.albums.extend(more_albums)
#
# session.add(new_artist)
# session.commit()
#
# session.add_all([
# Artist("MXPX"),
# Artist("Kutless"),
# Artist("Thousand Foot Krutch")
# ])
# session.commit()
#
# bred = Artist("Bred")
# bred.albums.append(more_albums[1])
# session.commit()
#
# bred2 = Artist("Bred2")
# bred2.albums.append(more_albums[1])
# session.commit()
# Session = sqlalchemy.orm.sessionmaker(bind=engine)
# session = Session()
#
# artist = session.query(Artist).filter(Artist.name=="Bred2").first()
# if artist is not None:
# print artist.name
# artist.name = "Bob"
# session.commit()
#
#
# artist, album = session.query(Artist, Album).filter(Artist.id==Album.artist_id).filter(Album.title=="Love Liberty Disco").first()
# print artist.name
# print album.title
Session = sql.orm.sessionmaker(bind=engine)
session = Session()
artist = session.query(Artist).filter(Artist.name=="Bob").first()
session.delete(artist)
session.rollback()
session.commit()
result = session.query(Artist).order_by(Artist.name).all()
for artist in result:
print artist.name
try:
res = session.query(Artist).filter(Artist.name.like("B%n")).one()
print res.name
except sql.orm.exc.NoResultFound:
print "No results!!!"
query = session.query(Artist, Album).filter(Artist.id==Album.artist_id)
art, album = query.filter(Artist.name.like("%")).first()
print art.name | true |
89e4f5693dc5f4447507827fa7b6f6c27fcb6346 | Python | MengSunS/daily-leetcode | /snap/84.py | UTF-8 | 414 | 2.734375 | 3 | [] | no_license | class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
stack, ans = [], 0
for i, h in enumerate(heights+[0]):
while stack and h <= heights[stack[-1]]:
height = heights[stack.pop()]
width = i if not stack else i - stack[-1] - 1
ans = max(ans, width * height)
stack.append(i)
return ans
| true |
bab84e0a78bfe04e306755784d4212408db50a82 | Python | hayeonk/leetcode | /sols/factor_combinations.py | UTF-8 | 528 | 2.765625 | 3 | [] | no_license | class Solution(object):
def getFactors(self, n):
ans = []
def makeCombination(picked, n):
if n == 1:
if picked:
ans.append(picked)
return
start = 2 if not picked else picked[-1]
end = n if picked else n - 1
for i in xrange(start, end + 1):
if n % i == 0:
makeCombination(picked + [i], n / i)
makeCombination([], n)
return ans | true |
0ed54e9ac7ae20f1187e884f6af25cad23b6e11e | Python | eamonyates/pp19_decode_a_web_page_two | /PP19_DecodeAWebPageTwo.py | UTF-8 | 602 | 3.21875 | 3 | [] | no_license | #Import requests and beautiful soup modules
import requests
from bs4 import BeautifulSoup
def decode():
#Request for web page http://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture
r = requests.get('http://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture')
#Create Soup
soup = BeautifulSoup(r.text, 'html.parser')
#Print out relevant text
for p in soup(class_='content-section'):
x = p.text + '\n'
print (x.encode('latin-1').decode('utf-8'))
#Allow main program to be run from command line
if __name__ == '__main__':
decode()
| true |
8fb89670b1638aa6690f35cdbe8b74d183d2c868 | Python | anisrfd/Python-Code-Test | /Bongo/Problem3.py | UTF-8 | 956 | 3.84375 | 4 | [] | no_license | class Node:
def __init__(self, parent):
self.parent = parent
self.left = None
self.right = None
def lca(root, node1, node2):
if root is None:
return None
if root.parent == node1 or root.parent == node2:
return root
left_lca = lca(root.left, node1, node2)
right_lca = lca(root.right, node1, node2)
if left_lca and right_lca:
return root
return left_lca if left_lca is not None else right_lca
def binaryTree():
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
root.left.left.left = Node(8)
root.left.left.right = Node(9)
return root
if __name__ == '__main__':
root = binaryTree()
node1 = 8
node2 = 5
findLCA = lca(root, node1, node2)
print("LCA of %d and %d is %d" % (node1, node2, findLCA.parent))
| true |
da17cb73f31c97cf5a62dd4c81e82eaed3b94845 | Python | INSAlgo/trainings-2019 | /MC_2019_10/5_Valid_Parenthesis.py | UTF-8 | 661 | 3.578125 | 4 | [
"MIT"
] | permissive | from collections import deque
class Solution:
def isValid(self, s: str) -> bool:
stack = deque()
open_signs = ['{', '[', '(']
close_signs = ['}', ']', ')']
possible = True
for c in s:
if c in open_signs:
stack.appendleft(c)
else:
if not stack:
possible = False
break
matching_char = stack.popleft()
if matching_char != open_signs[close_signs.index(c)]:
possible = False
break
if stack:
possible = False
return possible
| true |
6fcf5f7b14f62ffe7d2ab126a7fd210f8663673f | Python | solomc1/python | /ics 32/ics 32 larc/ics 32 larc try error.py | UTF-8 | 1,547 | 3.6875 | 4 | [] | no_license | ##def print_strings(lst: [str]) -> list:
## result = []
## for element in lst:
## if type(element) == str:
## result.append(element)
## else:
## result.extend(return_strings(element))
## return result
##
##print(return_string(['a','b',['c',['d']],'e']))
##
##def divider() -> None:
## while True:
## try:
## n = int(n)
## print (n/5)
## except:
## print('Your "number" was invalid.')
##
##divider()
##
##
##
##
##
##def adder()-> None:
## while True:
## try:
## x = int(input("Please enter the number you would like to add by 2: a"))
## print (x+2)
## break
##
## except:
## print("error")
## return
##
##adder()
##
#os.path.exists(path: str)
# Create a program to print out all the files in a path specified by the user.
#Your program should not crash, given bad input.
#os.path.join(p1,p2) =
import os
def print_files(path: str) ->[str]:
result = []
for element in os.listdir(path):
new_path = os.path.join(path,element)
if os.path.isfile(path):
result.append(element)
else:
result.extend(print_files)
return result
#os.path.isfile(path: str)
def path_exists() -> None:
n = input("Enter a path: ")
if os.path.exists(n):
print_files(path)
else:
print("error")
#os.path.exists
| true |
e15874d6898c675e4401d1601f3356db065da018 | Python | foongminwong/python-visualization-practice | /triangulation/sphere/plot_samples.py | UTF-8 | 1,237 | 2.78125 | 3 | [] | no_license | from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
import numpy as np
import bertini_real
#create a new figure
fig = plt.figure ()
q=1
#describe the position of the su plot
ax = fig.add_subplot (1,1,1, projection = '3d', aspect = 1)
ax.set_xlim(-q,q)
ax.set_ylim(-q,q)
ax.set_zlim(-q,q)
#read the most recent data?
data = bertini_real.data.ReadMostRecent();
#tuples - store surface sampler data
tuples = data.surface.surface_sampler_data
"""Extract points from vertices"""
def extractPoints(data):
points = []
for v in data.vertices:
#allocate 3 buckets to q
q=[None]*3
for i in range(3):
#q[0],q[1],q[2]
q[i]=v['point'][i].real
points.append(q)
return points
#points - store extracted points
points = extractPoints(data)
#create an empty array T
T = []
#T=[[points[f],points[s],points[t]]]
"""Questions"""
# len(tuples) - 'int' object is not iterable?
# how to get size of tuples and size of list in tuples?
for i in range(2):
for tri in tuples[i]:
f = int(tri[0])
s = int(tri[1])
t = int(tri[2])
#print(f,s,t)
k = [points[f],points[s],points[t]]
T.append(k)
ax.add_collection3d(Poly3DCollection(T))
plt.show()
| true |
f90f465e9cdcbd198d3e7382187de8dd784cebeb | Python | tariqrahiman/pyComPro | /leetcode/facebook/l272.py | UTF-8 | 865 | 3.25 | 3 | [] | no_license | def isBadVersion(n):
global array
return array[n - 1]
class Solution(object):
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
a, b = 1, n
while b >= a:
m = (b-a)/2 + a
this = isBadVersion(m)
prev = isBadVersion(m - 1) if m > 1 else 0
if this == True and prev == False: return m
if this == True and prev == True: b = m
if this == False: a = m + 1
if __name__ == "__main__":
s = Solution()
global array
array = [0,0,0,1,1]
assert s.firstBadVersion(5) == 4
array = [0,0,0,0,1]
assert s.firstBadVersion(5) == 5
array = [1,1,1,1,1]
assert s.firstBadVersion(5) == 1
array = [1]
assert s.firstBadVersion(1) == 1
array = [0, 1]
assert s.firstBadVersion(2) == 2
print "ok"
| true |
88bd1fcefc2f1c9153813bf547b1f363629faf42 | Python | jareddrayton/Advent-of-Code | /advent_of_code_2019/day-12/day_12.py | UTF-8 | 2,442 | 3.40625 | 3 | [] | no_license | import itertools
with open("day-12-input-example.txt", 'r') as f:
coords = f.readlines()
steps = 12
moon_names = ["Io", "Europa", "Ganymede", "Callisto"]
moons = {}
class Moon:
def __init__(self, position, name):
self.name = name
self.position = position
self.velocity = [0, 0, 0]
self.temp_velocity = None
self.kinetic_energy = 0
self.potential_energy = 0
self.total_energy = 0
def calculate_gravity(self, x_1, x_2):
if x_1 < x_2:
return 1
elif x_1 == x_2:
return 0
elif x_1 > x_2:
return -1
def apply_gravity(self):
self.temp_velocity = []
for moon in moons.keys():
if moon != self.name:
self.temp_velocity.append(list(map(self.calculate_gravity, self.position, moons[moon].position)))
#print(self.temp_velocity)
self.temp_velocity = [[b[i] for b in self.temp_velocity] for i in range(len(self.temp_velocity))]
self.temp_velocity = [sum(a) for a in self.temp_velocity]
#print(self.temp_velocity, "hy")
def apply_velocity(self):
self.velocity = [0, 0, 0]
self.velocity = [sum(i) for i in zip(self.velocity, self.temp_velocity)]
#print(self.velocity,"Vel")
#print(self.position, "hg")
self.position = [sum(i) for i in zip(self.position, self.velocity)]
#print(self.position, "323")
def calculate_energy(self):
self.kinetic_energy = sum(map(abs, self.velocity))
self.potential_energy = sum(map(abs, self.position))
self.total_energy = self.kinetic_energy * self.potential_energy
def parse_input(coords):
for coord, moon in zip(coords, moon_names):
coord = coord.strip().split(',')
moons[moon] = Moon([int("".join([b for b in a if b.isdigit() == True or b == "-"])) for a in coord], moon )
parse_input(coords)
for i in range(steps):
print("Step {}".format(i))
for moon in moons.keys():
print(moons[moon].position)
moons[moon].apply_gravity()
moons[moon].apply_velocity()
moons[moon].calculate_energy()
print(moons[moon].position)
sprint()
#for moon in moons.keys():
## print(moons[moon].position, moons[moon].name)
print(sum(moons[moon].total_energy for moon in moons.keys()))
| true |
b725ffeb95d7ab42ffddfb937db09aa6e364429c | Python | nakaken0629/atcoder | /beginners_selection/shift.py | UTF-8 | 220 | 2.984375 | 3 | [] | no_license | import sys
n = input()
l = [int(s) for s in input().split()]
count = 0
while True:
for i in range(len(l)):
v = l[i]
if v % 2 != 0:
print(count)
sys.exit()
else:
l[i] = v / 2
count += 1
| true |
ce78d441b6021db010d302779d6226b7156c7793 | Python | thedavidharris/advent-of-code-2020 | /day16/16.py | UTF-8 | 2,191 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env pypy3
import itertools
import collections
from collections import defaultdict
with open("input.txt") as f:
input = f.read()
sections = input.split("\n\n")
rules_section = sections[0]
rules = {}
for rule in rules_section.splitlines():
# name, ranges = rule.split(": ")[0]
name, rule = rule.split(": ")
subrules = rule.split(" or" )
x1, y1 = [int(x) for x in subrules[0].split("-")]
x2, y2 = [int(x) for x in subrules[1].split("-")]
rules[name] = set(itertools.chain(range(x1,y1+1), range(x2,y2+2)))
ticket_section = sections[2].splitlines()[1:]
valid_tickets = []
# Part 1
invalid_values = []
total_range = set(itertools.chain(*rules.values()))
tickets = [[int(y) for y in ticket_line.split(",")] for ticket_line in ticket_section]
for ticket in tickets:
is_valid = True
for value in ticket:
if value not in total_range:
invalid_values.append(value)
is_valid = False
if is_valid:
valid_tickets.append(ticket)
# part 1
print("Part 1: " + str(sum(invalid_values)))
num_files = len(valid_tickets)
valid_mapping = collections.defaultdict(list)
for field in rules:
for i in range(len(rules.keys())):
if all(valid_tickets[i] in rules[field] for valid_tickets in valid_tickets):
valid_mapping[field].append(i)
answer_set = {}
# Until all the possible options have been exhausted
while(any(valid_mapping.values())):
for field, possibility in valid_mapping.items():
# For some field, there's going to be only one possible match
if len(possibility) == 1:
correct_field = possibility[0]
answer_set[field] = correct_field
# Iterate through the rest of the possibilities
for other in valid_mapping.values():
# Remove this field as a possible value in the valid mappings
if correct_field in other:
other.remove(correct_field)
break
my_ticket = sections[1].splitlines()[1].split(",")
answer = 1
for field in answer_set:
if "departure" in field:
answer *= int(my_ticket[answer_set[field]])
print("Part 2: " + str(answer))
| true |
f911548ca45f445a83e0ee6ef9197c69917e99ef | Python | JamesRaynor67/poker | /getFiveCardRankListDf.py | UTF-8 | 1,973 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
import itertools
import collections
import logging
import functools
import time
import patternCompareUtils as pcu
import pandas as pd
def generateAllDeckCards():
deckCards = []
for deckCard in itertools.combinations(range(51,-1,-1), 5):
deckCards.append(list(deckCard))
print(len(deckCards))
# TODO: Make sure the combinations algorithm makes sure 5 elements in a deckCard list is sorted descending way
return deckCards
def fiveCardsToID(fiveCard):
return fiveCard[0] + fiveCard[1]*52 + fiveCard[2]*(52**2) + fiveCard[3]*(52**3) + fiveCard[4]*(52**4)
def getFiveCardRankListDf():
if os.path.exists('fiveCardRankList.csv'):
print("Reading from fiveCardRankList.csv ...")
df = pd.read_csv('fiveCardRankList.csv')
df.set_index('id', inplace=True)
return df
else:
print("fiveCardRankList.csv not found, generating new rank list...")
deckCards = generateAllDeckCards()
deckCards.sort(key=functools.cmp_to_key(pcu.compareTwoSuits))
# df = pd.DataFrame(deckCards, columns =['fiveCards_0','fiveCards_1','fiveCards_2','fiveCards_3','fiveCards_4']) #Not necessary if there is id
idList = []
for fiveCard in deckCards:
idList.append(fiveCardsToID(fiveCard))
df = pd.DataFrame(idList, columns=['id'])
rankValue = 0
rankValueList = [0]
for index in range(1, len(deckCards)):
compareResult = pcu.compareTwoSuits(deckCards[index-1], deckCards[index])
if compareResult < 0:
rankValue += 1
elif compareResult > 0:
logging.critical('Unexpected sequence!', index, deckCards[index-1], deckCards[index])
exit()
rankValueList.append(rankValue)
df['rankValue'] = rankValueList
df.set_index('id', inplace=True)
df.to_csv('fiveCardRankList.csv')
return df
| true |
1bdd9a6d5c4a14e4f2ca23dc13a54f2956976a04 | Python | rushabh2390/shoppingwebsites | /src/categories/views.py | UTF-8 | 1,632 | 2.59375 | 3 | [] | no_license | from django.shortcuts import render, redirect
from .forms import CategoryForm
from .models import Category
from products.models import Product
# Create your views here.
def add_category(request):
context = {}
form = CategoryForm(request.POST or None)
error_message =None
success_message = None
if form.is_valid():
catname = form["categoryName"].value()
category = Category.is_duplicate(catname)
if category:
error_message= "Category is already exists."
else:
form.save()
success_message = "Category Added Sucessfully"
context['form'] = form
context['errors'] = error_message
context["sucess"] = success_message
return render(request, "addcategory.html",context)
def view_category(request):
context = {}
error_message =None
context["categorylist"] = None
Categorylist = Category.get_category()
if len(Categorylist) > 0:
context["categorylist"] = Categorylist
else:
error_message = "No category exist"
context['errors'] = error_message
return render(request, "category.html",context)
def catergory_list(request):
if request.GET.get("category"):
context = {}
categoryName = request.GET.get("category")
category = Category.get_category_by_name(categoryName)
product= Product.get_product_by_category(category.categoryID)
if product:
context["products"] = product
return render(request,"category_list.html",context)
else:
return redirect("/")
else:
return redirect("/") | true |
6bc98a2d8320084f0a7a55cb6885fd7e352ab1c8 | Python | joporci/Biopython-1 | /Exercises/tuples/q8.py | UTF-8 | 573 | 4.40625 | 4 | [] | no_license | # This program reads a name and an age for a person, until the name is blank. As each name age pair is entered, store names in a list, and ages in another. Print a list of tuples of paired names and ages.
name = raw_input('Enter name: ')
age = int(raw_input('Enter age: '))
names = []
ages = []
while name != '':
names = names + [name]
ages = ages + [age]
name = raw_input('Enter name: ')
if name != '':
age = int(raw_input('Enter age: '))
name_age = []
for a in range(len(names)):
comb = (names[a],) + (ages[a],)
name_age = name_age + [comb]
print name_age
| true |
987307a74daa8131459928205482ee5310154337 | Python | zeronezer/pydemo | /ssh/client.py | UTF-8 | 350 | 2.640625 | 3 | [] | no_license | import socket
client=socket.socket()
client.connect(('localhost',8000))
while True:
cmd=input('请输入cmd命令:')
client.send(cmd.encode('utf-8'))
data_len=client.recv(1024).decode('utf-8')
print('len:',data_len)
while True:
data=client.recv(1024).decode('utf-8')
print(data)
if len(data.encode('utf-8'))<1024:
break
client.close() | true |
942d2260c1ca4957380f15bfdb132bdba6202531 | Python | cgarrido2412/PythonPublic | /Challenges/Advent Of Code/adventofcode2022/day02.py | UTF-8 | 3,614 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env python3
import os
if __name__ == "__main__":
#Read Puzzle Input. Split by line.
rock_paper_scissors = open(os.getenv('HOMEDRIVE')+os.getenv('HOMEPATH')+'\Desktop\Python\Coding Challenges\\adventofcode2022\day02_input.txt').read().split('\n')
#Map out strategy guide to choices
oppponent_dictionary = {'A': 'rock', 'B': 'paper', 'C': 'scissors'}
player_dictionary = {'X': 'rock', 'Y': 'paper', 'Z': 'scissors'}
#Map out scoring for the selected shape
shape_score = {'rock': 1, 'paper': 2, 'scissors': 3}
#Keeping score
total_score = 0
#Compare each match in the game
for game in range(len(rock_paper_scissors)):
#Convert Opponent Choice to rock, paper, or scissors
opponent_choice = rock_paper_scissors[game][0]
opponent_ouctome = oppponent_dictionary.get(opponent_choice)
#Convert Player Chocie to rock, paper, or scissors
player_choice = rock_paper_scissors[game][2]
player_ouctome = player_dictionary.get(player_choice)
#Start scoring, start with score for shape decision
decision_score = shape_score.get(player_ouctome)
total_score += decision_score
#For Draws
if opponent_ouctome == player_ouctome:
total_score += 3
#If the Opponent Wins, then if player wins
elif opponent_ouctome == 'rock':
if player_ouctome == 'scissors':
total_score += 0
elif player_ouctome == 'paper':
total_score += 6
elif opponent_ouctome == 'paper':
if player_ouctome == 'rock':
total_score += 0
elif player_ouctome == 'scissors':
total_score += 6
elif opponent_ouctome == 'scissors':
if player_ouctome == 'paper':
total_score += 0
elif player_ouctome == 'rock':
total_score += 6
print(total_score) #Part one correct and complete
'''
For Part 2, [X,Y,Z] determine the outcome of the game instead of representing the player choice
'''
#Define outcome dictionary
outcome_dictionary = {'X': 'lose', 'Y': 'draw', 'Z': 'win'}
#Create separate score for part 2
round_2_score = 0
#Compare each match in the game again
for game in range(len(rock_paper_scissors)):
#Convert Opponent Choice to rock, paper, or scissors
opponent_choice = rock_paper_scissors[game][0]
opponent_ouctome = oppponent_dictionary.get(opponent_choice)
#Convert Player outcome to win, lose, draw
player_tactics = rock_paper_scissors[game][2]
game_outcome = outcome_dictionary.get(player_tactics)
if game_outcome == 'draw':
if opponent_ouctome == 'rock':
round_2_score += 4
elif opponent_ouctome == 'paper':
round_2_score += 5
elif opponent_ouctome == 'scissors':
round_2_score += 6
elif game_outcome == 'lose':
if opponent_ouctome == 'rock':
round_2_score += 3
elif opponent_ouctome == 'paper':
round_2_score += 1
elif opponent_ouctome == 'scissors':
round_2_score += 2
elif game_outcome == 'win':
if opponent_ouctome == 'rock':
round_2_score += 8
elif opponent_ouctome == 'paper':
round_2_score += 9
elif opponent_ouctome == 'scissors':
round_2_score += 7
print(round_2_score) #Part 2 Correct and Complete
| true |
0d45ce656e6060f434baf6769c3c44e7e2963c5d | Python | rennat/pyprinttree | /pyprinttree/loaders.py | UTF-8 | 1,444 | 2.671875 | 3 | [] | no_license | from __future__ import absolute_import
from __future__ import unicode_literals
from . import structures
__all__ = ['load_from_pair_list', 'load_from_csv']
def load_from_pair_list(node_or_node_id_edge_pairs):
tree = structures.Tree()
for start_node_or_node_id, end_node_or_node_id in node_or_node_id_edge_pairs:
tree.add(start_node_or_node_id, end_node_or_node_id)
return tree
def _load_from_csv_default_id_validator(value):
return bool(value) or isinstance(value, int)
def load_from_csv(file_like_object, header_row_count=0, id_column_index=0,
parent_column_index=None, child_column_index=None,
id_validator=_load_from_csv_default_id_validator,
**csv_reader_kwargs):
import csv
tree = structures.Tree()
reader = csv.reader(file_like_object, **csv_reader_kwargs)
for i, row in enumerate(reader):
if i < header_row_count:
continue
node_id = row[id_column_index]
if not id_validator(node_id):
continue
tree.add(node_id)
if parent_column_index is not None:
parent_id = row[parent_column_index]
if id_validator(parent_id):
tree.add(parent_id, node_id)
if child_column_index is not None:
child_id = row[child_column_index]
if id_validator(child_id):
tree.add(node_id, child_id)
return tree
| true |
70706113d394cd1e247e0718a4b47dcccbfd528f | Python | Newester/MyCode | /Py/Python/RE.py | UTF-8 | 2,193 | 4 | 4 | [] | no_license | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
#正则表达式 -- 匹配字符串 用一种描述性的语言定义一个规则,凡是符合规则的字符串则认为是合法的
# \d 匹配一个数字 \w 匹配一个字母或数字 . 匹配任意字符 * 表示任意个字符 + 至少一个字符 ? 0个或者1个字符 {n} n个字符 {n,m} n~m 个字符
# \s 匹配一个空格 [] 指定范围 | 表示匹配二者之一 ^ 表示行的开头 $ 表示行的结束
# re 模块
# 建议使用python的 r 字符串,这样不用考虑转义的问题
import re
print(re.match(r'^\d{3}\-\d{3,8}$', '010-12345'))
re.match(r'^\d{3}\-\d{3,8}$','010 12345')
# match() 方法判断字符串是否匹配,如果匹配成功返回一个 match 对象,否则返回 None
test = '用户输入的字符串'
if re.match(r'正则表达式',test):
print('OK')
else:
print('Failed')
# 切分字符串
print('a b c'.split(' '))
# 正则表达式切分字符串
print(re.split(r'\s+','a b c'))
# 分组 用 ()可以提取字符串的子串
m = re.match(r'^(\d{3})-(\d{3,8})$','010-12345')
print(m)
print(m.group(0))
print(m.group(1))
print(m.group(2))
# group(0) 是原始字符串
# 匹配所有时间
t = '19:05:30'
m = re.match(r'^(0[0-9]|1[0-9]|2[0-3]|[0-9])\:(0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9]|[0-9])\:(0[0-9]|[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9]|[0-9])$',t)
print(m.groups())
#正则表达式有时也无法完全验证,比如匹配日期,这时要配合程序一起验证
# 贪婪匹配 -- 正则匹配默认是贪婪匹配,尽可能匹配多的字符
print(re.match(r'^(\d+?)(0*)$','102300').groups())
# 编译 -- re 模块先编译正则表达式,再用编译好的正则表达式取匹配字符串;正则表达式本身不合法,编译过程会报错
# 可以预编译好正则表达式,以后就可以直接拿来用
re_telephone = re.compile(r'^(\d{3})-(\d{3,8})$')
print(re_telephone.match('010-12345').groups())
# practice
def name_of_email(addr):
res = re.match(r'^(\w[\w|-|_]*)@(\w+).([A-za-z]+)$',addr)
if res:
return res.group(0)
else:
return None
assert(name_of_email('hello@qq.com')) == 'hello@qq.com'
print('OK')
| true |
b6a4c7b91c8a6f8ee6b794f85504ae3b2b9929f7 | Python | meettaraviya/Mine-search-AI | /minesweeper.py | UTF-8 | 5,606 | 2.859375 | 3 | [] | no_license | from PIL import Image
import numpy as np
import scipy.signal as ss
class MineSweeper:
ICONS = np.array(Image.open('res/squares.bmp').convert('RGB'))
def __init__(self, N: int, H: int, W: int, seed: int = None):
self.N, self.H, self.W = N, H, W
if seed is None:
seed = np.random.randint(2**32)
np.random.seed(seed)
self.seed = seed
locs = np.random.choice(H*W, N, False)
self.field = np.zeros(H*W, dtype=bool)
self.field[locs] = True
self.field = self.field.reshape(H, W)
self.revealed = np.zeros((H, W), dtype=bool)
self.counts = (ss.convolve2d(self.field, np.ones((3, 3)), mode='same') - self.field).astype(int)
self.game_over = False
self.score = 0
@staticmethod
def beginner(seed: int = None):
return MineSweeper(10, 9, 9, seed=seed)
@staticmethod
def intermediate(seed: int = None):
return MineSweeper(40, 16, 16, seed=seed)
@staticmethod
def expert(seed: int = None):
return MineSweeper(99, 16, 30, seed=seed)
@staticmethod
def from_file(infile: str, validate: bool = True):
with open(infile) as inp:
field = []
revealed = []
given_counts = []
for line in inp.readlines():
syms = line.split('|')[1:-1]
field.append([])
revealed.append([])
given_counts.append([])
for sym in syms:
revealed[-1].append(sym in "@X012345678.")
if sym in "@O":
field[-1].append(True)
elif revealed[-1][-1]:
field[-1].append(False)
else:
field[-1].append(None)
if sym in "012345678":
given_counts[-1].append(int(sym))
else:
given_counts[-1].append(None)
ms = MineSweeper.__new__(MineSweeper)
ms.field = np.array(field).astype(bool)
ms.N = ms.field.sum()
ms.H, ms.W = ms.field.shape
ms.revealed = np.array(revealed).astype(bool)
calc_counts = (ss.convolve2d(ms.field, np.ones((3, 3)), mode='same') - ms.field).astype(int)
# if validate:
# assert ((calc_counts == given_counts) | np.equal(given_counts, None)).all(), "Invalid field"
# assert (ms.field & ms.revealed).sum() <= 1, "More than one mines clicked on"
ms.counts = calc_counts
return ms
def __str__(self):
out = ""
for i in range(self.H):
for j in range(self.W):
out += f"|{self.get(i, j)}"
out += "|\n"
return out
def neighbors(self, i:int, j:int):
nbrs = []
for di in [-1, 0, 1]:
for dj in [-1, 0, 1]:
if 0 <= i + di < self.H and 0 <= j + dj < self.W and (di != 0 or dj != 0):
nbrs.append((i + di, j + dj))
return nbrs
def print(self, outfile: str = None):
if outfile is None:
print(str(self))
else:
with open(outfile, 'w') as out:
out.write(str(self))
def get(self, y: int, x: int):
if self.revealed[y, x]:
if self.field[y, x]:
return "@"
else:
return str(self.counts[y, x])
else:
return " "
def draw(self, outfile: str = None):
img = np.zeros((self.H*16, self.W*16, 3), dtype=np.uint8)
for i in range(self.H):
for j in range(self.W):
if not self.revealed[i][j]:
k = 0
elif self.field[i][j]:
k = 4
else:
k = 13 - self.counts[i][j]
img[i*16:(i+1)*16, j*16:(j+1)*16, :] = MineSweeper.ICONS[k*16:(k+1)*16, :, :]
print(img.shape, self.H, self.W)
if outfile is None:
Image.fromarray(img).show()
else:
Image.fromarray(img).save(outfile)
def reveal(self, y: int, x: int):
self.revealed[y, x] = True
val = self.get(y, x)
if val == "@":
self.game_over = True
if not self.game_over:
self.score += 1
return val
class WindowsMineSweeper(MineSweeper):
"""docstring for WindowsMineSweeper"""
def __init__(self, *args, **kwargs):
super(WindowsMineSweeper, self).__init__(*args, **kwargs)
self.first_move = True
def reveal(self, y: int, x: int):
if self.first_move:
while self.field[y, x]:
locs = np.random.choice(self.H*self.W, self.N, False)
self.field = np.zeros(self.H*self.W, dtype=bool)
self.field[locs] = True
self.field = self.field.reshape(self.H, self.W)
self.counts = (ss.convolve2d(self.field, np.ones((3, 3)), mode='same') - self.field).astype(int)
self.first_move = False
self.revealed[y, x] = True
val = self.get(y, x)
if val == "@":
self.game_over = True
elif val == "0":
for ny, nx in self.neighbors(y, x):
if not self.revealed[ny, nx]:
self.reveal(ny, nx)
if not self.game_over:
self.score += 1
return val
if __name__ == '__main__':
ms = MineSweeper.from_file('eg.field')
ms.draw()
| true |
6e9e993811d1ee7f94e8384a4b7cb66130b7f7e2 | Python | rajgopav/Algorithms | /ArraysAndSorting/GameOfRotation.py | UTF-8 | 1,014 | 3.625 | 4 | [] | no_license | # Importing standard libraries
import sys
'''
Function that computes the pMean for the very first position
'''
def getPMean(a):
if(len(a) == 1): return a
else: return sum([ a[i]*(i+1) for i in range(len(a)) ])
'''
Main function of the program. Take care of the case when
the pMeanMax can be -ve
'''
if __name__ == "__main__":
# Parsing the input
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().rstrip().split()]
# Computing pMeans for current position
pMean = getPMean(a)
pMeanMax = pMean; # Needs to be intialized since it can be -ve
# Computing the sum of all the elems on the conveyor belt
pSum = sum(a)
# Now we compute max sum by iterating ONCE through the array,
# by decreasing pMean by SUm and adding N*a[i] in each step
for i in a:
curPMean = pMean - pSum + n*i
pMean = curPMean
if(curPMean > pMeanMax): pMeanMax = curPMean
# Priniting max pMeans
print pMeanMax | true |
2ad485a4b6942b051aa53abdc84b5d9e3355395e | Python | mkukovacec/Egrader | /rulers/grammar/spellchecker.py | UTF-8 | 604 | 2.640625 | 3 | [
"MIT"
] | permissive | import numpy as np
import nltk
from nltk.corpus import wordnet
import resources as res
class Ruler(object):
title = None
text = None
def __init__(self, title, text):
self.title = title
self.text = text
def run(self):
counter = 0
valid = 0
for word in nltk.word_tokenize(self.text):
if not word[0].isalpha():
continue
counter+=1
if (wordnet.synsets(word.lower()) or word.lower() in res.__words__ or word.lower() in res.__names__):
valid+=1
return float(valid/counter)
| true |
043e9e535b01d48887229034e89fc39342957ff8 | Python | theojepsen/p4app | /examples/ring.p4app/main.py | UTF-8 | 1,779 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | from p4app import P4Mininet
from mininet.topo import Topo
import sys
if len(sys.argv) > 1:
N = int(sys.argv[1])
else:
N = 3
print("Setting-up a %d-switch ring topology" % N)
class RingTopo(Topo):
def __init__(self, n, **opts):
Topo.__init__(self, **opts)
switches = []
for i in range(1, n+1):
host = self.addHost('h%d' % i,
ip = "10.0.0.%d" % i,
mac = '00:00:00:00:00:%02x' % i)
switch = self.addSwitch('s%d' % i)
self.addLink(host, switch, port2=1)
switches.append(switch)
# Port 2 connects to the next switch in the ring, and port 3 to the previous
for i in range(n):
self.addLink(switches[i], switches[(i+1)%n], port1=2, port2=3)
topo = RingTopo(N)
net = P4Mininet(program='basic.p4', topo=topo, enable_debugger=True)
net.start()
for i in range(1, N+1):
sw = net.get('s%d'% i)
# Forward to the host connected to this switch
sw.insertTableEntry(table_name='MyIngress.ipv4_lpm',
match_fields={'hdr.ipv4.dstAddr': ["10.0.0.%d" % i, 32]},
action_name='MyIngress.ipv4_forward',
action_params={'dstAddr': '00:00:00:00:00:%02x' % i,
'port': 1})
# Otherwise send the packet clockwise
sw.insertTableEntry(table_name='MyIngress.ipv4_lpm',
default_action=True,
action_name='MyIngress.ipv4_forward',
action_params={'dstAddr': '00:00:00:00:00:00', # the last hop will set this correctly
'port': 2})
sw.printTableEntries()
net.pingAll()
print("OK")
| true |
132cedd09287edd48c648303199d6796529f1940 | Python | hakank/hakank | /cpmpy/set_covering2.py | UTF-8 | 1,142 | 3.296875 | 3 | [
"MIT"
] | permissive | """
Set covering in cpmpy.
Example 9.1-2, page 354ff, from
Taha 'Operations Research - An Introduction'
Minimize the number of security telephones in street
corners on a campus.
This cpmpy model was written by Hakan Kjellerstrand (hakank@gmail.com)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def set_covering2():
# data
n = 8 # maximum number of corners
num_streets = 11 # number of connected streets
# corners of each street
# Note: 1-based (handled below)
corner = [[1, 2], [2, 3], [4, 5], [7, 8], [6, 7], [2, 6], [1, 6], [4, 7],
[2, 4], [5, 8], [3, 5]]
# declare variables
x = boolvar(shape=n,name="x")
z = intvar(0,n,name="z")
model = Model(minimize=z)
# constraints
# number of telephones, to be minimized
model += [z == sum(x)]
# ensure that all corners are covered
for i in range(num_streets):
# also, convert to 0-based
model += [sum([x[j - 1] for j in corner[i]]) >= 1]
ss = CPM_ortools(model)
if ss.solve():
print("z:", z.value())
print("x:", x.value())
set_covering2()
| true |
76885882dea1df4385dcaab9f4a28a0d81794f77 | Python | navnath-auti/College | /Sem4/Python'/numpy/numpyprac.py | UTF-8 | 623 | 3.609375 | 4 | [] | no_license | import numpy as np
#array method x = [2, 3, 4, 5, 6]
nums = np.array([2, 3, 4, 5, 6])
print(type(nums))
#arrange method
nums=np.arange(2,10)
print(nums)
#zero method
nums=np.zeros((5,4))
print(nums)
#ones method
nums=np.ones((5,4))
print(nums)
#linespace methods
nums=np.linspace(1,10,5)
print(nums)
#eye method
nums=np.eye(4)
print(nums)
#random method
nums = np.random.rand(2,3)
print(nums)
#reshaping array
num=np.arange(1,17)
num2=num.reshape(4,4)
print(num2)
#findind max,min value
random=np.random.randint(1,100,5)
print(random)
xmin=random.min()
print(xmin)
xmax=random.max()
print(xmax)
| true |
82599e123acdbb89647558d8339996af18a37bf6 | Python | cannonja/jc2 | /Rozell/Walt_code/mr/optimize/sweep.py | UTF-8 | 6,887 | 2.875 | 3 | [] | no_license |
from mr.figureMaker import FigureMaker
import collections
from job_stream.inline import Multiple, Work
import math
import os
import pandas
import sklearn
import sys
import traceback
class Sweep(object):
"""Provides facilities for testing a range of parameters uniformly.
"""
def __init__(self):
pass
def sweep(self, paramRanges, nSamples, scoreParams, output = None,
checkpointFile = None):
"""Generate sweeper. Runs a job_stream on the given paramRanges,
sampling scoreParams() nSamples times for each parameter set.
paramRanges - [ ('name', [ values, ... ]), ... ]
nSamples - Number of samples for each parameter set
scoreParams - Function to evaluate a parameter set. Takes parameter set
including 'id'. Returns a dict with keys as attributes, and
values as a single, floating point number.
Average and standard deviation are calculated.
output - Either None to output to stdout, a string to save to the given
file (default type csv), or a function that takes an array of
all of the row dicts that would go in a csv.
"""
# Generate sets of parameters to score
parmSets = []
nParms = len(paramRanges)
stack = [ 0 ] * nParms
carry = 0
while carry == 0:
parms = { 'id': len(parmSets) }
parmSets.append(parms)
for i, (name, vals) in enumerate(paramRanges):
parms[name] = vals[stack[i]]
# Increment and cascade
carry = 1
for i in range(nParms - 1, -1, -1):
if carry == 0:
break
stack[i] += carry
if stack[i] >= len(paramRanges[i][1]):
stack[i] = 0
carry = 1
else:
carry = 0
with Work(parmSets, checkpointFile = checkpointFile) as w:
@w.frame(emit = lambda store: store.result)
def gatherScores(store, first):
if not hasattr(store, 'init'):
store.init = True
store.id = first['id']
store.first = first
store.data = []
return Multiple([ first ] * nSamples)
# We're done! Calculate averages and such
avgs = collections.defaultdict(float)
devs = collections.defaultdict(float)
for d in store.data:
for k, v in d.iteritems():
avgs[k] += v
for k in avgs.keys():
avgs[k] /= len(store.data)
if len(store.data) > 1:
for d in store.data:
for k, v in d.iteritems():
devs[k] += (v - avgs[k]) ** 2
for k in devs.keys():
devs[k] = (devs[k] / (len(store.data) - 1)) ** 0.5
store.result = store.first
for k, v in avgs.iteritems():
store.result[k] = v
store.result[k + '_dev'] = devs[k]
sys.stderr.write("...Finished {}\n".format(store.id))
@w.job
def scoreSet(parms):
return scoreParams(parms)
@w.frameEnd
def aggScores(store, next):
store.data.append(next)
@w.finish
def saveResults(r):
resultColumns = [ 'id' ] + [ p[0] for p in paramRanges ]
for key in sorted(r[0].keys()):
if key not in resultColumns:
resultColumns.append(key)
df = pandas.DataFrame(r, columns = resultColumns)
df.set_index('id', inplace = True)
df.sort_index(inplace = True)
print(df.to_string())
if output is not None:
if isinstance(output, str):
df.to_csv(output)
else:
raise NotImplementedError(output)
def sweepFit(self, learner, paramRanges, trainSet, testSet,
maxIters = None, nSamples = 3, scoreModel = None, output = None,
visualParams = None, imageDestFolder = None, checkpointFile = None):
"""Special version of sweep() for the common use case. Also can
output visual information, as needed.
scoreModel - Either None to just return a dict of model.score(*testSet),
or a function that takes (model, testSet) and returns a dict
with parameters to track.
"""
if not isinstance(trainSet, tuple):
raise ValueError("trainSet must be a tuple: (inputs, expected), or "
"(inputs,) for unsuperved")
if not isinstance(testSet, tuple):
raise ValueError("testSet must be a tuple: (inputs, expected), or "
"(inputs,) for unsuperved")
# Clear out images in imageDestFolder
if visualParams is not None:
if imageDestFolder is None:
raise ValueError("If visualParams is set, imageDestFolder must "
"be set")
# Re-use our folder cleaning code
fm = FigureMaker([], imageDestFolder = imageDestFolder)
elif imageDestFolder is not None:
raise ValueError("imageDestFolder requires visualParams")
# The method to score a param set
def score(parms):
e = sklearn.clone(learner)
pp = dict(parms)
pp.pop('id')
try:
e.set_params(**pp)
e.fit(*trainSet, maxIters = maxIters)
if visualParams is not None:
imgPathBase = os.path.join(imageDestFolder,
"{}".format(store.params['id']))
# Write some images!
e.visualize(visualParams,
path = imgPathBase + ".png")
e.visualize(visualParams,
path = imgPathBase + "_example.png",
inputs = testSet[0][0])
if scoreModel is None:
return dict(score = e.score(*testSet))
else:
return scoreModel(e, testSet)
except:
sys.stderr.write("Error for {}:\n{}\n".format(
parms, traceback.format_exc()))
if e.UNSUPERVISED:
score = 1.0
else:
score = -1.0
e = None
return self.sweep(paramRanges, nSamples, score, output = output,
checkpointFile = checkpointFile)
| true |
5ed8495f80b3a4088a4c786eb423e6fd38f63af3 | Python | ekbdizzy/dvmn-online-library | /arguments_parser.py | UTF-8 | 1,232 | 2.515625 | 3 | [] | no_license | import argparse
from parse_services import get_last_page_for_category
import settings
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dest_folder", dest='dest_folder_path', type=str, required=False,
help="Destination folder of all downloads")
parser.add_argument("--json_path", dest='json_file_path', type=str, required=False,
default="books_info.json", help="Destination folder of json_file with books info")
parser.add_argument("--skip_imgs", dest='skip_imgs', action='store_true', required=False,
default=False, help="Skip downloading images")
parser.add_argument("--skip_txt", dest='skip_txt', action='store_true', required=False,
default=False, help="Skip downloading txt")
parser.add_argument("--start_page", type=int, dest='start_page', required=False,
default=1, help="First page number to parse")
parser.add_argument("--end_page", type=int, dest='end_page', required=False,
default=get_last_page_for_category(settings.CATEGORY_URL),
help="Last page number to parse")
return parser.parse_args()
| true |
5db66cf075f2aa3534c56d375ccb5ae36b0d90b3 | Python | thavel/git-projects | /git_projects/command.py | UTF-8 | 2,154 | 2.546875 | 3 | [] | no_license | import re
from argparse import ArgumentParser
from subprocess import (Popen, PIPE)
from git_projects.shortcut import ShortcutHolder
PROJECT_PREFIX = '@'
SHORTCUT_PREFIX = '--'
class GitError(Exception):
pass
def parse_args():
"""
Build and parse command args.
"""
parser = ArgumentParser(description='git-projects')
parser.add_argument(PROJECT_PREFIX, nargs='+', help='project name')
for name in ShortcutHolder.REGISTRY:
shortcut = ShortcutHolder.REGISTRY[name]
option = SHORTCUT_PREFIX + shortcut.option
description = shortcut.description
parser.add_argument(option, action='store_true', help=description)
return parser.parse_known_args()
def parse_command():
"""
Properly parsing command line arguments.
"""
args, ignored_args = parse_args()
shortcut_args = args.__dict__.copy()
del shortcut_args[PROJECT_PREFIX]
args = args.__dict__.get(PROJECT_PREFIX)
projects = list()
git_args = list()
for arg in args:
if str.startswith(arg, PROJECT_PREFIX):
projects.append(arg)
else:
git_args.append(arg)
for i in range(len(projects)):
projects[i] = projects[i][1:]
return projects, git_args + ignored_args, shortcut_args
def git(target, *args):
"""
Git command opener wrapper.
"""
cmd = list()
for arg in args:
regex = re.compile(r'`(?P<subargs>.+)`')
match = re.search(regex, arg)
if not match:
cmd.append(arg)
continue
subargs = match.group('subargs').split(' ')
output = _exec(target, subargs)
output = re.sub(regex, output, arg)
output = output.replace('\n', '')
cmd.append(output)
cmd = ['git'] + list(cmd)
return _exec(target, cmd)
def _exec(target, *args):
"""
Open a command as a subprocess.
"""
popen = Popen(*args, close_fds=True, cwd=target,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise GitError(err.decode('utf-8'))
return out.decode('utf-8')
| true |
6f0a312cd79bf7aa57b0793d076d029e2fc3aea7 | Python | tjdgus3160/algorithm | /BOJ/Gold5/적록색약.py | UTF-8 | 1,137 | 3.015625 | 3 | [] | no_license | # 10026번
from collections import deque
import sys
input=sys.stdin.readline
def bfs(x,y,c):
dq=deque([[x,y]])
tmp[y][x]=c
while dq:
x,y=dq.popleft()
for nx,ny in [[x-1,y],[x+1,y],[x,y-1],[x,y+1]]:
if 0<=nx<n and 0<=ny<n and arr[ny][nx]==c and not tmp[ny][nx]:
tmp[ny][nx]=c
dq.append([nx,ny])
def bfs2(x,y,c):
dq=deque([[x,y]])
tmp[y][x]=c
while dq:
x,y=dq.popleft()
for nx,ny in [[x-1,y],[x+1,y],[x,y-1],[x,y+1]]:
if 0<=nx<n and 0<=ny<n and arr[ny][nx] in 'RG' and not tmp[ny][nx]:
tmp[ny][nx]=c
dq.append([nx,ny])
n=int(input())
arr=[input().rstrip() for _ in range(n)]
res=[0,0]
tmp=[[0]*n for _ in range(n)]
for y in range(n):
for x in range(n):
if not tmp[y][x]:
res[0]+=1
bfs(x,y,arr[y][x])
tmp=[[0]*n for _ in range(n)]
for y in range(n):
for x in range(n):
if not tmp[y][x]:
res[1]+=1
if arr[y][x]=='B':
bfs(x, y, arr[y][x])
else:
bfs2(x,y,arr[y][x])
print(*res) | true |
f21bab6164c808e07d66d78a4974a8538d3cb3d2 | Python | sebhaub/advpt | /advpt-uebung1/py-matproduct/matproduct.py | UTF-8 | 3,379 | 3.75 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = [
'Jonas Gröger <jonas.groeger@gmail.com>',
'Sebastian Haubner <seb.haubner@gmail.com>']
# Python 2/3 compatible input, see http://stackoverflow.com/a/5868543/488265
# Python 2: Rename raw_input to input
# Python 3: This will do nothing.
try:
input = raw_input
except NameError:
pass
class Matrix(object):
"""The representation of a 2D matrix."""
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
self.cells = [0 for _ in range(self.rows * self.cols)]
def _assert_is_valid_index(self, row, col):
row_inside_matrix = 0 <= row < self.rows
column_inside_matrix = 0 <= col < self.cols
if not all([row_inside_matrix, column_inside_matrix]):
raise IndexError('Index row={} column={} is outside of matrix.', row, col)
def _assert_is_multiplyable(self, other_matrix):
if self.cols != other_matrix.rows:
raise ValueError('The column count of the first matrix must be equal to the row count of the second.')
def __mul__(self, other_matrix):
self._assert_is_multiplyable(other_matrix)
result = Matrix(self.rows, other_matrix.cols)
for row in range(self.rows):
for col in range(other_matrix.cols):
tmp = result[row, col]
for k in range(self.cols):
tmp_self = self[row, k]
temp_other = other_matrix[k, col]
tmp += tmp_self * temp_other
result[row, col] = tmp
return result
def __str__(self):
result = ''
max_width = len(str(max(self.cells))) + 1
for i in range(self.rows):
for j in range(self.cols):
result += ('{0: <' + str(max_width) + '}').format(self[i, j])
result += '\n'
return result
# m[1,2]
def __getitem__(self, key):
row = key[0]
col = key[1]
self._assert_is_valid_index(row, col)
return self.cells[row * self.cols + col]
# m[1,2] = 5
def __setitem__(self, key, value):
row = key[0]
col = key[1]
self.cells[row * self.cols + col] = value
def input_int(prompt=''):
while True:
try:
return int(input(prompt))
except ValueError:
pass
if __name__ == '__main__':
print(('=====================================\n'
'Input dimensions of the two matrices.\n'
'====================================='))
s1 = int(input_int('s1: '))
s2 = int(input_int('s2: '))
s3 = int(input_int('s3: '))
A = Matrix(s1, s2)
B = Matrix(s2, s3)
print(('=================================\n'
'Values of matrix one ({} values).\n'
'================================='
).format(s1 * s2))
for i in range(s1):
for j in range(s2):
A[i, j] = input_int()
print(('=================================\n'
'Values of matrix two ({} values).\n'
'================================='
).format(s2 * s3))
for i in range(s2):
for j in range(s3):
B[i, j] = input_int()
print(('=======================================\n'
'Multiplication of matrices one and two.\n'
'======================================='
))
print(A * B)
| true |