blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e2658e24942980f0679f67c515ec7c6ce134aff3 | Python | syiswell/python-recommender-system | /main.py | UTF-8 | 2,678 | 2.984375 | 3 | [
"MIT"
] | permissive | import pandas as pd
from Lib.ExecEval import Timing
from Dataset import DatasetLoader
from Experiments import ExperimentsResult
from Algorithms.UserKNN import UserKNN
from Algorithms.ItemKNN import ItemKNN
from Algorithms.Averaging.GlobalAverage import GlobalAverage
from Algorithms.Averaging.UserItemAverage import UserItemAverage
from Algorithms.Averaging.UserAverage import UserAverage
from Algorithms.Averaging.ItemAverage import ItemAverage
def main():
results = []
fold_results = []
for algorithm in (GlobalAverage, UserAverage, ItemAverage, UserItemAverage, UserKNN, ItemKNN):
mae = []
pc = []
for fold_id in range(1, 6):
# Start timing for evaluation the execution time
time_evaluator = Timing()
m_lens = DatasetLoader(fold_id, 'u', f'Movie Lens Fold {fold_id}')
rs = algorithm(m_lens.train_ratings, fold_id=fold_id)
rs.train()
# Default is infinity
# rs.k = 5
# Default is 0
# rs.threshold = 0
algorithm_name = rs.__class__.__name__
print(f'\n> Running {algorithm_name} on Movie Lens, Fold: {fold_id}:\n')
predictions = rs.evaluate(m_lens.test_ratings)
experiment_result = ExperimentsResult(predictions)
if hasattr(rs, 'threshold'):
print(f'Threshold: {rs.threshold}, K: {rs.k}')
print(f'MAE: {experiment_result.mae}')
print(f'PCov: {experiment_result.prediction_coverage}')
time_evaluator.end()
time_evaluator.log()
mae.append(experiment_result.mae)
pc.append(experiment_result.prediction_coverage)
fold_results.append([algorithm_name, fold_id,
round(experiment_result.mae, 3),
round(experiment_result.prediction_coverage, 3)])
mae_avg = sum(mae) / len(mae)
pc_avg = sum(pc) / len(pc)
# Keeping more details
results.append([algorithm_name, mae_avg, pc_avg])
print('Average of this algorithm on all folds:')
print(f'\nMAE Average: {mae_avg}')
print(f'PCov Average: {pc_avg}')
print('\n' + '=' * 60 + '\n')
results_df = pd.DataFrame(results, columns=['Algorithm', 'MAE', 'Prediction Coverage'])
fold_results_df = pd.DataFrame(fold_results, columns=['Algorithm', 'fold_id', 'MAE', 'PC'])
print('\nAverage of all algorithms:\n')
print(results_df)
print('\n', '-'*60)
print('\nAverage of all algorithms on different folds:\n')
print(fold_results_df)
if __name__ == '__main__':
main()
| true |
5c361a95706ced4266a1fc6c21c982d55d938850 | Python | apryor6/flaskerize | /flaskerize/utils_test.py | UTF-8 | 1,900 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | from os import path
import pytest
from flaskerize import utils
def test_split_file_factory():
root, app = utils.split_file_factory("wsgi:app")
assert root == "wsgi"
assert app == "app"
def test_split_file_factory_with_other_delim():
root, app = utils.split_file_factory("wsgi::app", delim="::")
assert root == "wsgi"
assert app == "app"
def test_split_file_factory_with_path():
root, app = utils.split_file_factory("my/path/wsgi:app")
assert root == "my/path/wsgi"
assert app == "app"
def test_split_file_factory_with_py_file_existing(tmp_path):
import os
filename = os.path.join(tmp_path, "wsgi.py")
with open(filename, "w") as fid:
fid.write("")
root, app = utils.split_file_factory(f"{filename[:-3]}:app")
assert root == filename
assert app == "app"
def test_split_file_factory_with_a_default_path():
root, app = utils.split_file_factory("shake/and", default_func_name="bake")
assert root == "shake/and"
assert app == "bake"
def test_split_file_factory_respects_explicity_path_over_a_default_path():
root, app = utils.split_file_factory("shake/and:bake", default_func_name="take")
assert root == "shake/and"
assert app == "bake"
def test_split_file_factory_handles_packages(tmp_path):
import os
dirname = path.join(tmp_path, "my_app")
os.makedirs(dirname)
with open(f"{dirname}/__init__.py", "w") as fid:
fid.write("")
root, app = utils.split_file_factory(dirname)
assert "my_app" in root
def test_split_file_factory_raises_on_invalid_packages(tmp_path):
import os
dirname = path.join(tmp_path, "my_app")
os.makedirs(dirname)
with pytest.raises(SyntaxError):
root, app = utils.split_file_factory(dirname)
def test_a():
with pytest.raises(ValueError):
utils.split_file_factory("oops:this:is:wrong:syntax!")
| true |
5318d677e8ff27a6c1dfeb6b6c877274abb65df1 | Python | aplassard/Compiler | /source/TypeChecker/TypeChecker.py | UTF-8 | 653 | 2.96875 | 3 | [] | no_license | from DeclerationAnalyzer import DeclerationAnalyzer
class TypeChecker(object):
def __init__(self,ast):
self.ast = ast
def run(self):
print '------------------------------'
print '---Starting Lexial Analysis---'
print '------------------------------'
print
print 'Program Name: %s' % self.ast.program_header.identifier.identifier.token_content
print
print 'Program Declerations:'
DA = DeclerationAnalyzer()
for dec in self.ast.program_body.declerations:
DA(dec)
for stat in self.ast.program_body.statements:
DA.test_statement(stat)
| true |
94a1e9589c2ffccaa79867e1e39a11df9e4dc855 | Python | gab-guimaraes/python | /applicationsPy/python-POO/3-Spotify/Program.py | UTF-8 | 690 | 2.6875 | 3 | [] | no_license | import mysql.connector
from Artist import Artist
from Music import Music
a = Artist("3 Doors Down", "EUA")
b = Artist("Blink182", "EUA")
c = Artist("HIM", "Finland")
m = Music("Here without u", a, 3.18)
m2 = Music("Kriptonite", a, 4.10)
m3 = Music("Always", b, 3.00)
m4 = Music("Wickd Game", c, 4.53)
listOfMusic = [m, m2, m3, m4]
for v in listOfMusic:
v.imprimeTudo()
print('\n')
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="@blink182",
database="spotify"
)
mycursor = mydb.cursor()
sql = "INSERT INTO Artist (name, country) VALUES (%s, %s)"
val = (a.nome, a.pais)
mycursor.execute(sql, val)
mydb.commit()
print("record inserted")
| true |
f1c4c3b47b219e23234b61c9fa515a4d8e8a7f54 | Python | ne9een/Movie-Trailer-Website | /media.py | UTF-8 | 626 | 3.203125 | 3 | [
"Unlicense"
] | permissive | import webbrowser
class Movie():
"""This program is a web page represent my
favorite movies I watched recently. you also
are able to watch the trailer of each by
clicking on poster image"""
def __init__(self, movie_title, movie_storyline,
poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
"""This module will open the movie's trailer in browser"""
webbrowser.open(self.trailer_youtube_url)
| true |
25b245469bd5e8bbab86ac345da960409c6316de | Python | xiongchenyan/cxPyLib | /IndriRelate/CtfLoader.py | UTF-8 | 2,241 | 2.8125 | 3 | [] | no_license | '''
Created on Dec 5, 2013
load ctf from a file
file is made by c++ calling IndriAPI in query enviroment
will load and keep in class, output as service
@author: cx
'''
import math
class TermCtfC(object):
def __init__(self,InName = ""):
self.Init()
if "" != InName:
self.Load(InName)
def Init(self):
self.TotalCnt = 0
self.hTermCtf = {}
return True
def Load(self,InName):
'''
first line is always total cnt
'''
cnt = 0
for line in open(InName):
vCol = line.strip().split()
if cnt == 0:
cnt += 1
# print vCol[0]
self.TotalCnt = int(vCol[0])
continue
if len(vCol) < 2:
print "ctf error line [%s]" %(line)
continue
term = vCol[0]
value = float(vCol[1])
self.hTermCtf[term] = value
return True
def dump(self,OutName):
out = open(OutName,'w')
print >>out, "%d" %(self.TotalCnt)
l = self.hTermCtf.items()
l.sort(key=lambda item:item[1], reverse=True)
for key,value in l:
print >>out, key + "\t%f" %(value)
out.close()
print "dump to [%s] finished" %(OutName)
return True
def Empty(self):
return self.hTermCtf == {}
def insert(self,term,cnt=1):
self.TotalCnt += cnt
if not term in self.hTermCtf:
self.hTermCtf[term] = cnt
else:
self.hTermCtf[term] += cnt
return True
def GetCtf(self,term):
if not term in self.hTermCtf:
return 0
return self.hTermCtf[term]
def GetCtfProb(self,term):
CTF = self.GetCtf(term)
if 0 == CTF:
return 0.5
return CTF / float(self.TotalCnt)
def GetLogIdf(self,term):
return math.log(1.0/self.GetCtfProb(term))
def UnitTest(TermCtrIn,TestTermIn):
TermCtfCenter = TermCtfC(TermCtrIn)
for line in open(TestTermIn):
term = line.strip()
print term + "\t%f" %(TermCtfCenter.GetCtf(term))
return True | true |
4331bd7c4c1352c128b765802754ec4ba297cea6 | Python | MaterialsDiscovery/PyChemia | /pychemia/analysis/surface.py | UTF-8 | 14,660 | 2.671875 | 3 | [
"MIT"
] | permissive |
import numpy as np
import pychemia
import itertools
import scipy.spatial
from scipy.spatial import qhull
from pychemia.utils.periodic import covalent_radius
# return [x, y, d], that ax + by = d, d = gcd(a, b)
def ext_gcd(a, b):
v1 = [1, 0, a]
v2 = [0, 1, b]
if a > b:
a, b = b, a
while v1[2] > 0:
q = v2[2] / v1[2]
for i in range(0, len(v1)):
v2[i] -= q * v1[i]
v1, v2 = v2, v1
return v2
def print_vector(v):
print('v = ', end='')
for i in range(3):
print(v[i], end='')
print("\n")
def rotate_along_indices(structure, h, k, l, layers, tol=1.e-5):
cell = structure.cell
a1 = np.array(cell[0])
a2 = np.array(cell[1])
a3 = np.array(cell[2])
rcell = structure.lattice.reciprocal().cell
b1 = np.array(rcell[0])
b2 = np.array(rcell[1])
b3 = np.array(rcell[2])
# Solve equation pk + ql = 1 for p and q using extended_Euclidean algorithm
v = ext_gcd(k, l)
p = v[0]
q = v[1]
# print('p = ', p)
# print('q = ', q)
k1 = np.dot(p * (k * a1 - h * a2) + q * (l * a1 - h * a3), l * a2 - k * a3)
k2 = np.dot(l * (k * a1 - h * a2) - k * (l * a1 - h * a3), l * a2 - k * a3)
# print("\n\nk1 = ", k1)
# print("k2 = ", k2)
if abs(k2) > tol:
c = -int(round(k1 / k2))
# print("c = -int(round(k1/k2)) = ", c)
p, q = p + c * l, q - c * k
# Calculate lattice vectors {v1, v2, v3} defining basis of the new cell
v1 = p * (k * a1 - h * a2) + q * (l * a1 - h * a3)
v2 = l * a2 - k * a3
n = p * k + q * l
v = ext_gcd(n, h)
a = v[0]
b = v[1]
v3 = b * a1 + a * p * a2 + a * q * a3
newbasis = np.array([v1, v2, v3])
# transformation = np.array([[p*k+q*l, -h*p, -h], [0, l, -k], [b, a*p, a*q]])
# inv_transformation = np.linalg.inv(transformation)
symbols = []
positions = structure.positions + np.tile(np.dot(structure.cell.T, np.array([0, 0, 0])).reshape(1, 3),
(structure.natom, 1))
reduced = np.linalg.solve(newbasis.T, positions.T).T
for i in range(3):
reduced[:, i] %= 1.0
symbols += structure.symbols
surf = pychemia.Structure(symbols=symbols, reduced=reduced, cell=newbasis)
if layers > 1:
new_surf = surf.supercell((1, 1, layers))
cell = new_surf.cell
# cell[2] = cell[2]+(layers+1)*cell[1]+(layers+1)*cell[0]
surf = pychemia.Structure(symbols=new_surf.symbols, positions=new_surf.positions, cell=cell)
# print '\n\n********* Lattice vectors of the original cell *********\n\n', cell
# print '\n\n********* ATOMIC positions in the original cell **********\n', structure.positions
# print '\nTotal number of atoms in cell = ', structure.natom
# Now create the surface starting from the original structure
# surf = structure.copy()
# surf.set_cell(newbasis)
# print '\n\n********* New basis of the surface cell *********\n\n', surf.cell
# print '\n\n********* Atomic coordinates in the newbasis of surface cell *********\n\n', surf.positions
# surf = surf.supercell((1, 1, layers))
# a1, a2, a3 = surf.cell
# surf.set_cell([a1, a2,
# np.cross(a1, a2) * np.dot(a3, np.cross(a1, a2)) /
# np.linalg.norm(np.cross(a1, a2)) ** 2])
# Change unit cell to have the x-axis parallel with a surface vector
# and z perpendicular to the surface:
# a1, a2, a3 = surf.cell
# surf.set_cell([(np.linalg.norm(a1), 0, 0),
# (np.dot(a1, a2) / np.linalg.norm(a1),
# np.sqrt(np.linalg.norm(a2) ** 2 - (np.dot(a1, a2) / np.linalg.norm(a1)) ** 2), 0),
# (0, 0, np.linalg.norm(a3))])
# Move atoms into the unit cell:
# scaled = surf.reduced
# scaled[:, :2] %= 1
# surf.set_reduced(scaled)
# surf.center(vacuum=vacuum, axis=2)
return surf
def get_surface_atoms(structure):
"""
Returns the list of atoms that belong to the surface of a given structure
The surface atoms are computed as those atom for which the Voronoi tesselation is
open, ie, their voronoi volume associated is infinite.
:param structure: PyChemia Structure (Non-periodic in the current implementation)
:return: (list) List of integers with the indices of the surface atoms
"""
assert (not structure.is_periodic)
voro = scipy.spatial.Voronoi(structure.positions)
surface = [i for i in range(structure.natom) if -1 in voro.regions[voro.point_region[i]]]
return surface
def get_surface_atoms_new(structure, use_covalent_radius=False):
dln = scipy.spatial.Delaunay(structure.positions)
if use_covalent_radius:
simplices = []
for j in dln.simplices:
discard = False
for ifacet in list(itertools.combinations(j, 3)):
for ipair in itertools.combinations(ifacet, 2):
distance = np.linalg.norm(structure.positions[ipair[0]] - structure.positions[ipair[1]])
cov_distance = covalent_radius(structure.symbols[ipair[0]]) + covalent_radius(
structure.symbols[ipair[1]])
if distance > 3.0*cov_distance:
print('Distance: %f Cov-distance: %f' % (distance, cov_distance))
discard = True
break
if not discard:
print(j)
simplices.append(j)
else:
simplices = dln.simplices
c = np.array([[sorted(list(y)) for y in (itertools.combinations(x, 3))] for x in simplices])
d = [list(x) for x in c.reshape((-1, 3))]
ret = []
dups = []
for i in range(len(d) - 1):
if d[i] in d[i+1:]:
dups.append(d[i])
for i in d:
if i not in dups:
ret.append(i)
return np.unique(np.array(ret).flatten())
def get_onion_layers(structure):
"""
Returns the different layers of a finite structure
:param structure:
:return:
"""
assert (not structure.is_periodic)
layers = []
cur_st = structure.copy()
morelayers = True
while morelayers:
pos = cur_st.positions
if len(pos) <= 4:
core = range(cur_st.natom)
layers.append(core)
break
st = pychemia.Structure(positions=pos, symbols=len(pos)*['H'], periodicity=False)
st.canonical_form()
# print('The current volume is %7.3f' % st.volume)
if st.volume < 0.1:
core = range(cur_st.natom)
layers.append(core)
break
try:
voro = scipy.spatial.Voronoi(pos)
surface = [i for i in range(cur_st.natom) if -1 in voro.regions[voro.point_region[i]]]
except qhull.QhullError:
surface = range(cur_st.natom)
morelayers = False
layers.append(surface)
if not morelayers:
break
core = [i for i in range(cur_st.natom) if i not in surface]
if len(core) == 0:
break
symbols = list(np.array(cur_st.symbols)[np.array(core)])
positions = cur_st.positions[np.array(core)]
cur_st = pychemia.Structure(symbols=symbols, positions=positions, periodicity=False)
new_layers = [layers[0]]
included = list(layers[0])
acumulator = 0
for i in range(1, len(layers)):
noincluded = [j for j in range(structure.natom) if j not in included]
# print 'Layer: %3d Atoms on Surface: %3d Internal Atoms: %3d' % (i,
# len(included) - acumulator,
# len(noincluded))
acumulator = len(included)
relabel = [noincluded[j] for j in layers[i]]
included += relabel
new_layers.append(relabel)
return new_layers
def get_facets(structure, surface, seed, distance_tolerance=2.0):
if seed not in surface:
print('Error: seed not in surface')
print('Seed: ', seed)
print('Surface: ', surface)
raise ValueError('Seed not in surface')
idx_seed = surface.index(seed)
pos = structure.positions[surface]
dl = scipy.spatial.Delaunay(pos)
# Delaunay tetrahedrals
# The indices for each tetragon are relative to the surface
# not the structure
tetra = [x for x in dl.simplices if idx_seed in x]
# Determining Facets
facets = []
for x in tetra:
facets += [y for y in list(itertools.combinations(x, 3)) if idx_seed in y]
# Indices of facets relative to surface not the structure
facets = list(tuple([sorted(x) for x in facets]))
selected_facets = []
mintol = 1E5
for x in facets:
dm = scipy.spatial.distance_matrix(pos[x], pos[x])
maxdist = max(dm.flatten())
pair = np.where(dm == maxdist)
atom1 = x[pair[0][0]]
atom2 = x[pair[1][0]]
covrad1 = pychemia.utils.periodic.covalent_radius(structure.symbols[surface[atom1]])
covrad2 = pychemia.utils.periodic.covalent_radius(structure.symbols[surface[atom2]])
if maxdist / (covrad1 + covrad2) < mintol:
mintol = maxdist / (covrad1 + covrad2)
if maxdist < distance_tolerance * (covrad1 + covrad2):
# Converting indices from surface to structure
st_facet = tuple([surface[x[i]] for i in range(3)])
selected_facets.append(st_facet)
return selected_facets, mintol + 0.1
def get_center_vector(structure, facet):
v0 = np.array(structure.positions[facet[0]])
v1 = np.array(structure.positions[facet[1]])
v2 = np.array(structure.positions[facet[2]])
facet_center = 1.0 / 3.0 * (v0 + v1 + v2)
facet_vector = pychemia.utils.mathematics.unit_vector(np.cross(v1 - v0, v2 - v0))
center_mass = structure.center_mass()
if np.dot(facet_center - center_mass, facet_vector) < 0:
facet_vector *= -1
return (v0, v1, v2), facet_center, facet_vector
def attach_to_facet(structure, facet):
(v0, v1, v2), facet_center, facet_vector = get_center_vector(structure, facet)
rnd = np.random.random()
if rnd < 0.4:
return facet_center, facet_vector, tuple(facet)
elif rnd < 0.6:
edge_center = 0.5 * (v0 + v1)
return edge_center, facet_vector, (facet[0], facet[1])
elif rnd < 0.8:
edge_center = 0.5 * (v1 + v2)
return edge_center, facet_vector, (facet[1], facet[2])
else:
edge_center = 0.5 * (v0 + v2)
return edge_center, facet_vector, (facet[0], facet[2])
def random_attaching(structure, seed, target_species, natom_crystal, radius=1.8, basetol=4.0):
lys = pychemia.analysis.surface.get_onion_layers(structure)
surface = lys[0]
counter = 0
while True:
if seed not in surface or counter > 0:
print('Current Seed not in surface, searching a new seed')
seed = find_new_seed(structure, surface, seed, natom_crystal)
tol = basetol
facets = []
while True:
facets, mintol = get_facets(structure, surface, seed, distance_tolerance=tol)
if len(facets) > 0:
print('Possible Facets', facets)
break
elif mintol > 2 * basetol:
return None, None, None, None
else:
tol = mintol
print('No facets found, increasing tolerance to ', tol)
counter = 0
while True:
counter += 1
rnd = np.random.randint(len(facets))
facet_chosen = facets[rnd]
print('Seed: %3d Number of facets: %3d Facet chosen: %s' % (seed, len(facets), facet_chosen))
center, uvector, atoms_facet = attach_to_facet(structure, facet_chosen)
new_sts = {}
good_pos = 0
for specie in target_species:
cov_rad = pychemia.utils.periodic.covalent_radius(specie)
vec = center + radius * cov_rad * uvector
new_symbols = list(structure.symbols) + [specie]
new_positions = np.concatenate((structure.positions, [vec]))
dist_matrix = scipy.spatial.distance_matrix(new_positions, new_positions)
identity = np.eye(len(new_positions), len(new_positions))
mindist = np.min((dist_matrix + 100 * identity).flatten())
if mindist > cov_rad:
good_pos += 1
print('We have a minimal distance of', mindist)
new_sts[specie] = pychemia.Structure(symbols=new_symbols, positions=new_positions, periodicity=False)
if good_pos == len(target_species):
print('Good position selected for all species')
break
else:
print('No enough good positions: %d. One bad position, choosing a new facet' % good_pos)
if counter > len(facets):
break
if good_pos == len(target_species):
break
return new_sts, facet_chosen, center, uvector
def find_new_seed(st, surface, seed, natom_crystal):
"""
Find a new atom to serve as seed for deposition
:param st: Structure
:param surface: Indices of atoms on surface
:param seed: Current seed
:param natom_crystal: Number of atoms in crystal, cluster atoms are always at the end
:return:
"""
candidates = [x for x in surface if x > natom_crystal]
if seed in surface:
new_seed = seed
else:
dists = scipy.spatial.distance_matrix(st.positions[seed].reshape((1, 3)),
st.positions[surface])
# First Option
new_seed = surface[dists[0].argsort()[0]]
# Second option
dists = scipy.spatial.distance_matrix(st.center_mass().reshape((1, 3)),
st.positions[candidates])
if len(candidates) > 0:
dists = scipy.spatial.distance_matrix(st.center_mass().reshape((1, 3)), st.positions[candidates])
npcandidates = np.array(candidates)[dists[0].argsort()]
print('Candidates ordered by distance to CM', npcandidates)
for i in npcandidates:
facets, mintol = get_facets(st, surface, i, distance_tolerance=1.0)
if mintol < 5.0:
new_seed = i
break
print('Seed: %3d => %3d' % (seed, new_seed))
assert (new_seed in surface)
return int(new_seed)
| true |
6aed8030729478f2a1690d5c7941991300cca5fe | Python | ddoyen/premierlangage | /server/serverpl/qa/mixins.py | UTF-8 | 1,106 | 2.78125 | 3 | [] | no_license | # encoding: utf-8
import datetime
from django.utils import timezone
class DateMixin:
"""Provide a method indicating how much time ago something was created according to pub_date
field."""
@staticmethod
def verbose_date(date):
now = timezone.now()
delta = now - date
seconds = delta.seconds
minutes = seconds//60
hours = minutes//60
if delta.days == 0:
if hours == 0:
if minutes == 0:
if seconds >= 1:
return str(seconds) + " seconds ago"
return "just now"
else:
return str(minutes) + " minutes ago"
else:
return str(hours) + " hours ago"
if delta.days < 30:
return str(delta.days) + " days ago"
return str(datetime.datetime.strftime(date, "%Y/%m/%d"))
def pub_date_verbose(self):
return DateMixin.verbose_date(self.pub_date)
def update_date_verbose(self):
return DateMixin.verbose_date(self.update_date)
| true |
b29bf4309bc4b0096a5ce53633d96e73697a5ee6 | Python | elsuavila/Python3 | /Ejercicios Elsy Avila/Ejer8.py | UTF-8 | 932 | 3.734375 | 4 | [] | no_license | # Escriba un algoritmo que da la cnatidad de monedas de 5-10-12,5-25-50 cent y
#1 Bolivar,diga la cntidad de dinero que se tiene en total
print("Bienvedido al Programa".center(50,"-"))
monedas1 = 0
monedas2 = 0
monedas3 = 0
monedas4 = 0
monedas5 = 0
monedas6 = 0
total1 = 0
total2 = 0
total3 = 0
total4 = 0
total5 = 0
total6 = 0
monedas1 = int(input("Ingrese su moneda de 5:."))
total1 = monedas1 * 0.05
monedas2 = int(input("Ingrese su moneda de 10:."))
total2 = monedas2 * 0.10
monedas3 = int(input("Ingrese su moneda de 12,5:."))
total3 = monedas3 * 0.125
monedas4 = int(input("Ingrese su moneda de 25:."))
total4 = monedas4 * 0.25
monedas5 = int(input("Ingrese su moneda de 50:."))
total5 = monedas5 * 0.50
monedas6 = int(input("Ingrese su moneda de 1 Bolivar:."))
total6 = monedas6 * 1
total = total1 + total2 + total3 + total4 + total5 + total6
print ("La cantidad total de dinero que se tiene es de:",total)
| true |
92ff74d84d205bc003e00079264e95f87f74441f | Python | TsunamiBlue/MDP-SSP-Reinforcement-Learning | /material/test_18.py | UTF-8 | 657 | 2.5625 | 3 | [] | no_license | import unittest
from example1 import example_1
from rtdp import RTDP
class Test(unittest.TestCase):
def test(self):
domain = example_1()
rtdp = RTDP(domain, 0.9)
rtdp.run_n_simulations(50, 500)
policy = rtdp.policy()
self.assertEqual(policy[domain.state("NoFork")], domain.action("Pick1"))
self.assertEqual(policy[domain.state("Fork1")], domain.action("Pick2"))
self.assertEqual(policy[domain.state("Fork2")], domain.action("Pick1"))
self.assertEqual(policy[domain.state("Fork12")], domain.action("Eat"))
def main():
unittest.main()
if __name__ == "__main__":
main()
| true |
0274f17f07102ef353c06349496f28fe00c243a8 | Python | ldarrick/Research-Scripts | /Microscopy/CellProfiler_track_cells.py | UTF-8 | 13,329 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
#
# Last modified: 8 June 2016
# Author: Dhananjay Bhaskar <dbhaskar92@gmail.com>
# Requires package: pip install sortedcontainers
#
import sys
import csv
import math
import collections
from scipy.misc import imread
from sortedcontainers import SortedSet
from matplotlib import collections as MC
import numpy as NP
import matplotlib.cm as cmx
import matplotlib.pyplot as PLT
import matplotlib.colors as colors
if (len(sys.argv) != 2):
print 'Usage: python CellProfiler_plot_statistics.py /path/to/AllMyExpt_MyCells.csv'
sys.exit()
# map frames to image path
frame_path_map = {}
with open(sys.argv[1]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
frame_path_map[int(row['Metadata_FrameNumber'])] = row['Metadata_FileLocation']
# track all cells identified at the given time (frame)
def track_cells(time):
frames = SortedSet() # List of image frames
c_area_map = collections.defaultdict(list) # Cell Area
c_aXY_map = collections.defaultdict(list) # AreaShape_Center
c_cmi_map = collections.defaultdict(list) # Location_CenterMassIntensity
c_cnt_map = collections.defaultdict(list) # Location_Center
fh = open(sys.argv[1], "rb", 10485760)
lines = fh.readlines()
linereader = csv.DictReader(lines)
centroid_X = []
c_color_map = {}
for row in linereader:
if int(row['Metadata_FrameNumber']) == time:
centroid_X.append(float(row['AreaShape_Center_X']))
arr = NP.asarray(centroid_X)
bins = []
for i in range(20, 100, 20):
bins.append(NP.percentile(arr, i))
linereader = csv.DictReader(lines)
for row in linereader:
if int(row['Metadata_FrameNumber']) == time:
cent_X = float(row['AreaShape_Center_X'])
c_num = int(row['ObjectNumber'])
if cent_X < bins[0]:
c_color_map[c_num] = 'lime'
elif cent_X < bins[1]:
c_color_map[c_num] = 'dodgerblue'
elif cent_X < bins[2]:
c_color_map[c_num] = 'gold'
elif cent_X < bins[3]:
c_color_map[c_num] = 'aqua'
else:
c_color_map[c_num] = 'magenta'
linereader = csv.DictReader(lines)
for row in linereader:
if int(row['Metadata_FrameNumber']) == time:
frames.add(time)
c_num = int(row['ObjectNumber'])
c_area_map[c_num].append([int(row['AreaShape_Area'])])
c_aXY_map[c_num].append([float(row['AreaShape_Center_X']), float(row['AreaShape_Center_Y'])])
cmi_X = float(row['Location_CenterMassIntensity_X_Outlines'])
cmi_Y = float(row['Location_CenterMassIntensity_Y_Outlines'])
c_cmi_map[c_num].append([cmi_X, cmi_Y])
c_cnt_map[c_num].append([float(row['Location_Center_X']), float(row['Location_Center_Y'])])
# peer into the future
max_frame = get_cell_future(c_num, time, c_area_map, c_aXY_map, c_cmi_map, c_cnt_map)
if max_frame > time:
frames.add(max_frame)
print("DEBUG Area Centroid Track:")
for item in c_aXY_map[c_num]:
print " ".join(map(str, item[:]))
print "\n"
# Print debug information
plotFrame = frames[len(frames)-1]
imgFile = frame_path_map[plotFrame]
print "DEBUG Largest track for time t = " + str(time) + " ends at frame: " + str(plotFrame) + " image file: " + imgFile + "\n"
figcnt = 0
# Tracks over segmented cell image
cnt = 0
lineclr = 'aqua' # aqua for red channel, gold for green channel
for i in range(time, plotFrame+1):
img = "./OutlineCells/OutlineCells" + "{0:0>3}".format(i) + ".png"
fig = PLT.figure(figcnt)
axes = PLT.gca()
axes.set_xlim([0,1600])
axes.set_ylim([0,1200])
axes.invert_yaxis()
axes.xaxis.tick_top()
axes.yaxis.tick_left()
bg = imread(img)
PLT.imshow(bg, zorder=0)
for cid in c_aXY_map.keys():
xdata = []
ydata = []
if cnt == 0:
[x, y] = c_aXY_map[cid][0]
PLT.scatter(x, y, color=lineclr, s=4, zorder=1)
else:
for k in range(0, cnt+1):
try:
xdata.append(c_aXY_map[cid][k][0])
ydata.append(c_aXY_map[cid][k][1])
except IndexError:
break
if len(xdata) == cnt+1:
lines = PLT.plot(xdata, ydata, zorder=1)
PLT.setp(lines, 'color', lineclr, 'linewidth', 1.0)
PLT.scatter(xdata[-1], ydata[-1], color=lineclr, s=4, zorder=2)
PLT.savefig("Track_" + "{0:0>3}".format(i) + ".png", bbox_inches='tight', dpi=200)
PLT.close(fig)
figcnt = figcnt + 1
cnt = cnt + 1
# Tracks color coded by horizontal position
cnt = 0
for i in range(time, plotFrame+1):
fig = PLT.figure(figcnt)
axes = PLT.gca()
axes.set_xlim([0,1600])
axes.set_ylim([0,1200])
axes.invert_yaxis()
axes.xaxis.tick_top()
axes.yaxis.tick_left()
for cid in c_aXY_map.keys():
xdata = []
ydata = []
if cnt == 0:
[x, y] = c_aXY_map[cid][0]
PLT.scatter(x, y, color=c_color_map[cid], s=4)
else:
for k in range(0, cnt+1):
try:
xdata.append(c_aXY_map[cid][k][0])
ydata.append(c_aXY_map[cid][k][1])
except IndexError:
break
lines = PLT.plot(xdata, ydata)
PLT.setp(lines, 'color', c_color_map[cid], 'linewidth', 1.0)
PLT.scatter(xdata[-1], ydata[-1], color=c_color_map[cid], s=4)
PLT.savefig("Color_" + "{0:0>3}".format(i) + ".png", bbox_inches='tight', dpi=200)
PLT.close(fig)
figcnt = figcnt + 1
cnt = cnt + 1
# Calculate displacement, distance, avg. and instantaneous velocity
distance_map = {}
displacement_map = {}
avg_velocity_map = {}
inst_velocity_map = collections.defaultdict(list)
out_csv_file = "Velocity_Frame" + "{0:0>3}".format(time) + ".csv"
time_conv_factor = 0.2 # 5 minutes time interval
dist_conv_factor = 0.8 # 0.8 microns distance
with open(out_csv_file, 'w') as csvfile:
fieldnames = ['ObjectNumber', 'Velocity_X', 'Velocity_Y']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for cid in c_aXY_map.keys():
prev_x = None
prev_y = None
pp_x = None
pp_y = None
first_x = None
first_y = None
num_velocity_vecs = 0
for [cur_x, cur_y] in c_aXY_map[cid]:
if first_x is None and first_y is None:
first_x = cur_x
first_y = cur_y
elif prev_x is None and prev_y is None:
distance_map[cid] = 0
displacement_map[cid] = 0
prev_x = cur_x
prev_y = cur_y
elif pp_x is None and pp_y is None:
distance_map[cid] = (math.hypot(cur_x - first_x, cur_y - first_y))*dist_conv_factor
displacement_map[cid] = displacement_map[cid] + (math.hypot(cur_x - prev_x, cur_y - prev_y))*dist_conv_factor
avg_velocity_map[cid] = 0
pp_x = prev_x
pp_y = prev_y
prev_x = cur_x
prev_y = cur_y
else:
distance_map[cid] = (math.hypot(cur_x - first_x, cur_y - first_y))*dist_conv_factor
displacement_map[cid] = displacement_map[cid] + (math.hypot(cur_x - prev_x, cur_y - prev_y))*dist_conv_factor
new_velocity_vec = (math.hypot(cur_x - pp_x, cur_y - pp_y))*dist_conv_factor*time_conv_factor*0.5
inst_velocity_map[cid].append([cur_x - pp_x, cur_y - pp_y])
if num_velocity_vecs == 0:
writer.writerow({'ObjectNumber': cid, 'Velocity_X': cur_x - pp_x, 'Velocity_Y': cur_y - pp_y})
avg_velocity_map[cid] = ((avg_velocity_map[cid]*num_velocity_vecs) + new_velocity_vec)/(num_velocity_vecs + 1)
pp_x = prev_x
pp_y = prev_y
prev_x = cur_x
prev_y = cur_y
num_velocity_vecs = num_velocity_vecs + 1
# Tracks color coded by speed
cnt = 0
drawscale = 0.1
for i in range(time, plotFrame+1):
fig = PLT.figure(figcnt)
axes = PLT.gca()
axes.set_xlim([0,1600])
axes.set_ylim([0,1200])
axes.invert_yaxis()
axes.xaxis.tick_top()
axes.yaxis.tick_left()
speed = []
for cid in c_aXY_map.keys():
for v_vec in inst_velocity_map[cid]:
speed.append((math.hypot(v_vec[0], v_vec[1]))*dist_conv_factor*time_conv_factor*0.5)
speed.sort()
jet = cm = PLT.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=speed[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
c_f = 0.2 # scaling factor for velocity vector
for cid in c_aXY_map.keys():
xd = []
yd = []
ud = []
vd = []
color_list = []
if cnt == 0:
try:
[x, y] = c_aXY_map[cid][0]
[u, v] = inst_velocity_map[cid][0]
PLT.quiver(x, y, u*c_f, v*c_f, color='limegreen', angles='xy', scale_units='xy', scale=drawscale)
except IndexError:
continue
else:
for k in range(0, cnt+1):
try:
xd.append(c_aXY_map[cid][k][0])
yd.append(c_aXY_map[cid][k][1])
vx = inst_velocity_map[cid][k][0]
vy = inst_velocity_map[cid][k][1]
ud.append(vx)
vd.append(vy)
color_list.append(scalarMap.to_rgba((math.hypot(vx, vy))*dist_conv_factor*time_conv_factor*0.5))
except IndexError:
break
try:
PLT.quiver(xd[-1], yd[-1], ud[-1]*c_f, vd[-1]*c_f, color='limegreen', angles='xy', scale_units='xy', scale=drawscale)
points = NP.array([xd, yd]).T.reshape(-1, 1, 2)
segments = NP.concatenate([points[:-1], points[1:]], axis=1)
lc = MC.LineCollection(segments, colors=color_list, linewidths=1)
PLT.gca().add_collection(lc)
except IndexError:
continue
PLT.savefig("Velocity_" + "{0:0>3}".format(i) + ".png", bbox_inches='tight', dpi=200)
PLT.close(fig)
figcnt = figcnt + 1
cnt = cnt + 1
# Plot histograms
fig = PLT.figure(figcnt)
dist_hist_data = collections.defaultdict(list)
max_dist = 0
for cid in distance_map.keys():
dist_hist_data[c_color_map[cid]].append(distance_map[cid])
if distance_map[cid] > max_dist:
max_dist = distance_map[cid]
bins = NP.linspace(0, max_dist, num=math.ceil(math.sqrt(len(distance_map.keys()))))
f, axarr = PLT.subplots(len(dist_hist_data.keys()), sharex=True)
ind = 0
for clr in dist_hist_data.keys():
axarr[ind].hist(dist_hist_data[clr], bins, normed=False, cumulative=False, color=clr)
axarr[0].set_title('Cell Distance Travelled Histogram')
ind = ind + 1
figcnt = figcnt + 1
axarr[ind-1].set_xlabel(r'Distance ($\mu m$)')
PLT.tight_layout()
PLT.savefig('DistanceHist.png')
fig = PLT.figure(figcnt)
PLT.title('Cell Displacement Histogram')
PLT.xlabel(r'Displacement ($\mu m$)')
PLT.ylabel('Number of Cells')
disp_hist_data = collections.defaultdict(list)
max_disp = 0
for cid in displacement_map.keys():
disp_hist_data[c_color_map[cid]].append(displacement_map[cid])
if displacement_map[cid] > max_disp:
max_disp = displacement_map[cid]
bins = NP.linspace(0, max_disp, num=math.ceil(math.sqrt(len(displacement_map.keys()))))
for clr in disp_hist_data.keys():
PLT.hist(disp_hist_data[clr], bins, normed=False, cumulative=False, color=clr)
PLT.tight_layout()
PLT.savefig('DisplacementHist.png')
figcnt = figcnt + 1
fig = PLT.figure(figcnt)
PLT.title('Cell Avg. Speed Histogram')
PLT.xlabel(r'Speed ($\mu m$ per min)')
PLT.ylabel('Number of Cells')
speed_hist_data = collections.defaultdict(list)
max_speed = 0
for cid in avg_velocity_map.keys():
speed_hist_data[c_color_map[cid]].append(avg_velocity_map[cid])
if avg_velocity_map[cid] > max_speed:
max_speed = avg_velocity_map[cid]
bins = NP.linspace(0, max_speed, num=math.ceil(math.sqrt(len(avg_velocity_map.keys()))))
for clr in speed_hist_data.keys():
PLT.hist(speed_hist_data[clr], bins, normed=False, cumulative=False, color=clr)
PLT.tight_layout()
PLT.savefig('SpeedHist.png')
figcnt = figcnt + 1
# Helper function
def get_cell_future(obj_num, time, area_future, aXY_future, cmi_future, cnt_future):
with open(sys.argv[1]) as csvh:
hreader = csv.DictReader(csvh)
curr_lifetime = -1
curr_frame = -1
curr_obj_num = -1
num_daughters = 0
# For testing
cutoff = 500
for data in hreader:
if int(data['Metadata_FrameNumber']) < time:
continue
elif int(data['Metadata_FrameNumber']) > time + cutoff:
break
elif int(data['Metadata_FrameNumber']) == time and int(data['ObjectNumber']) == obj_num:
curr_lifetime = int(data['TrackObjects_Lifetime_30'])
curr_frame = time
curr_obj_num = obj_num
else:
c_num = int(data['ObjectNumber'])
if curr_lifetime == -1 or curr_frame == -1 or curr_obj_num == -1:
continue
t = int(data['Metadata_FrameNumber'])
parentid = int(data['TrackObjects_ParentObjectNumber_30'])
lifetime = int(data['TrackObjects_Lifetime_30'])
if t > curr_frame + 1:
break
elif parentid == curr_obj_num and lifetime == curr_lifetime + 1 and t == curr_frame + 1:
area_future[obj_num].append([int(data['AreaShape_Area'])])
aXY_future[obj_num].append([float(data['AreaShape_Center_X']), float(data['AreaShape_Center_Y'])])
cmi_X = float(data['Location_CenterMassIntensity_X_Outlines'])
cmi_Y = float(data['Location_CenterMassIntensity_Y_Outlines'])
cmi_future[obj_num].append([cmi_X, cmi_Y])
cnt_future[obj_num].append([float(row['Location_Center_X']), float(row['Location_Center_Y'])])
curr_lifetime = lifetime
curr_frame = t
curr_obj_num = c_num
elif parentid == curr_obj_num and lifetime == 1 and t == curr_frame + 1:
num_daughters = num_daughters + 1
else:
continue
print "DEBUG Cell id: " + str(obj_num) + " Frames found: " + str(curr_frame - time + 1) + " Num daughters: " + str(num_daughters)
return curr_frame
# Main
print "RUNNING TESTS\n"
print "Tracking time t=10\n"
track_cells(10)
| true |
ca6cf69755faf2367be5e11c57c54454351f6473 | Python | kenkainkane/imgpro2020 | /spatial_filtering/noiseRemoval.py | UTF-8 | 578 | 2.828125 | 3 | [] | no_license | import cv2
import numpy as np
img_sp = cv2.imread('../img/saltAndPepper.jpg')
img_gs = cv2.imread('../img/gaussian.jpg')
# smoothing image using average filter
avg_blur_sp = cv2.blur(img_sp, (7, 7))
avg_blur_gs = cv2.blur(img_gs, (7, 7))
# smoothing image using median filter
med_blur_sp = cv2.medianBlur(img_sp, 5)
med_blur_gs = cv2.medianBlur(img_gs, 5)
cv2.imshow('average salt&pepper', avg_blur_sp)
cv2.imshow('average gaussian', avg_blur_gs)
cv2.imshow('median salt&pepper', med_blur_sp)
cv2.imshow('median gaussian', med_blur_gs)
cv2.waitKey()
cv2.destroyAllWindows() | true |
faf68ae9afcd3082aafd28b06ae8bf2d914fde3c | Python | newcanopies/facial-feature-tracking | /helper.py | UTF-8 | 1,010 | 2.96875 | 3 | [] | no_license | '''
File name: helper.py
Author:
Date created:
'''
'''
File clarification:
Include any helper function you want for this project such as the
video frame extraction, video generation, drawing bounding box and so on.
'''
import cv2
import numpy as np
from scipy import signal
def drawBox(img, bbox):
imgwbox = img.copy()
for box in bbox:
l_t_pt = (box[0])[::-1]
r_b_pt = box[3][::-1]
cv2.rectangle(imgwbox,tuple(l_t_pt),tuple(r_b_pt),(0,255,0),2)
return imgwbox
def gaussianPDF_1D(mu, sigma, length):
half_len = length/2
if np.remainder(length, 2)==0:
ax = np.arange(-half_len, half_len, 1)
else:
ax = np.arange(-half_len, half_len + 1, 1)
ax = ax.reshape([-1, ax.size])
den = sigma * np.sqrt(2 * np.pi)
nom = np.exp(-np.square(ax - mu) / (2 * sigma * sigma))
return nom/den
def gaussianPDF(mu, sigma, row, col):
g_row = gaussianPDF_1D(mu, sigma, row)
g_col = gaussianPDF_1D(mu, sigma, col).transpose()
return signal.convolve2d(g_row, g_col, 'full') | true |
f38125d5e1e07958ad5126fe4ec150d761043692 | Python | kbrzust/GuessTheNumber | /GuessTheNumber.py | UTF-8 | 467 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
def guess(x):
random_number = random.randint(1, x)
number = 0
while random_number != number:
number = int(input("Wprowadz liczbe: "))
if number > random_number:
print("Sprobuj ponownie, liczba jest za wysoka. ")
elif number < random_number:
print("Sprobuj ponownie, liczba jest za niska. ")
print("Gratulacje odgadles liczbe! ")
guess(9)
| true |
459ca95d196290fecd1b0d17f729b7fd42437b95 | Python | SebastianCalle/holbertonschool-higher_level_programming | /0x05-python-exceptions/6-raise_exception_msg.py | UTF-8 | 142 | 2.703125 | 3 | [] | no_license | #!/usr/bin/python3
# function that raises a name exception whit a message
def raise_exception_msg(message=""):
raise NameError(message)
| true |
3b2c0ba3d9ca994e32450a1fd32711d6132b6e71 | Python | tmathai/spine | /common/layers/losses.py | UTF-8 | 3,616 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 17 18:40:39 2019
@author: Tejas
"""
import tensorflow as tf
EPS = 0.0000001
def dice(labels, prediction):
with tf.variable_scope('dice'):
## input --> [batch_size (None), height, width, num_classes]
## output --> [batch_size, num_classes]
dc = 2.0 * \
tf.reduce_sum(labels * prediction, axis=[1, 2]) / \
(tf.reduce_sum(labels ** 2 + prediction ** 2, axis=[1, 2]) + EPS)
## input --> [batch_size, num_classes]
## output --> [num_classes]
loss1 = tf.reduce_mean(dc, axis = 0)
## input --> [num_classes]
## output --> [1]
loss2 = tf.reduce_mean(loss1, axis = 0)
dc = loss2
## input labels, prediction --> [None, height, width, num_classes]
# print('labels', labels.get_shape().as_list())
# print('prediction', prediction.get_shape().as_list())
# ## input labels, prediction --> [None, height, width, num_classes]
# ## output --> [None, 2]
# num = tf.reduce_sum(labels * prediction, axis=[1, 2])
#
## print('num', num.get_shape().as_list())
#
# ## output --> [None, 2]
## denom = tf.reduce_sum(labels ** 2 + prediction ** 2, axis=[1, 2])
# denom = tf.reduce_sum(labels, axis=[1, 2]) + tf.reduce_sum(prediction, axis=[1, 2])
#
## print('denom', denom.get_shape().as_list())
#
# ## output --> [None, 2]
# dc = ((2.0 * num) / (denom + EPS))
#
## print('dc', dc.get_shape().as_list())
return dc
def dice_loss(labels, prediction):
with tf.variable_scope('dice_loss'):
## input labels, prediction --> [None, height, width, num_classes]
# print('labels', labels.get_shape().as_list())
# print('prediction', prediction.get_shape().as_list())
## input labels, prediction --> [None, height, width, num_classes]
## output --> [1]
diceScore = dice(labels, prediction)
# print('diceScore', diceScore.get_shape().as_list())
## output --> [1]
dl = 1.0 - diceScore
# print('dl', dl.get_shape().as_list())
return dl
def binary_cross_entropy_2D(labels, logits, reweight=False):
labels_shape = labels.get_shape().as_list()
pixel_size = labels_shape[1] * labels_shape[2]
logits = tf.reshape(logits, [-1, pixel_size])
labels = tf.reshape(labels, [-1, pixel_size])
number_foreground = tf.reduce_sum(labels)
number_background = tf.reduce_sum(1.0 - labels)
weight_foreground = number_background / (number_foreground + EPS)
if reweight:
loss = \
tf.nn.weighted_cross_entropy_with_logits(
targets=tf.cast(labels, tf.float32),
logits=logits,
pos_weight=weight_foreground
)
else:
loss = \
tf.nn.softmax_cross_entropy_with_logits(
labels=tf.cast(labels, tf.float32),
logits=logits,
)
loss = tf.reduce_mean(loss)
return loss
def mse(labels, prediction):
with tf.variable_scope('dice'):
err = labels - prediction
sq_err = tf.square(err)
## output --> [None, 2]
mse_im = tf.reduce_mean(sq_err, axis=[1, 2])
## output --> [2]
loss1 = tf.reduce_mean(mse_im, axis = 0)
loss2 = tf.reduce_mean(loss1, axis = 0)
msev = loss2
return msev
def mse_loss(labels, prediction):
with tf.variable_scope('dice_loss'):
mseScore = mse(labels, prediction)
msel = mseScore
return msel
| true |
527814b97b3bad6a0445677d7a5b00c6d373c3f5 | Python | Wessrow/packetpusher | /main.py | UTF-8 | 976 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python3
"""
Code written by Gustav Larsson
Generating IP traffic with Scapy
"""
from logging_handler import logger
from scapy.all import *
def format_logs(level, message_type, message):
"""
Helper function to format error messages
"""
info = {"type": message_type,
"message": message
}
logger.log(level, info)
def ping(ip_address="8.8.8.8"):
packet = IP(dst=ip_address)/ICMP()
try:
exec = send(packet, loop=1, inter=0.2, count=50)
format_logs(10, "ping", f"sent ping to {ip_address}")
except OSError as error:
format_logs(40, "test", error)
def send_multicast(ip_address="239.0.1.2"):
packet = IP(dst=ip_address)/UDP()
try:
exec = send(packet, loop=1, inter=0.2, count=50)
format_logs(10, "ping", f"sent ping to {ip_address}")
except OSError as error:
format_logs(40, "test", error)
if __name__ == "__main__":
#ping()
send_multicast()
| true |
2f5998d9c450b2be7b72f27c3ecb38401205fc34 | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/gigasecond/aea97a3ac80244c4b02a94a4fcd4a908.py | UTF-8 | 114 | 2.671875 | 3 | [] | no_license | from datetime import timedelta
GIGA = 10**9
def add_gigasecond(base):
return base + timedelta(seconds=GIGA)
| true |
754609d0ab4d31384c578a88e2149eee26d1929f | Python | alanoudalbattah/Fyyur | /starter_code/models.py | UTF-8 | 3,320 | 2.703125 | 3 | [] | no_license | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy() #*remember: to avoid circular import
# many to many relationship, linked by an intermediary table.
#" When using the relationship.backref parameter instead of relationship.back_populates,
# the backref will automatically use the same relationship.secondary argument for the reverse relationship: "
#? Difference between association models and association table
# " The association object pattern is a variant on many-to-many: it’s used when your association table contains
# additional columns beyond those which are foreign keys to the left and right tables. Instead of using the secondary
# argument, you map a new class directly to the association table. " src: https://docs.sqlalchemy.org/en/14/orm/basic_relationships.html#many-to-many
class Venue(db.Model):
__tablename__ = 'venue'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False, unique=True)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
address = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120))
genres = db.Column(db.ARRAY(db.String))
facebook_link = db.Column(db.String(120))
image_link = db.Column(db.String(500))
website = db.Column(db.String(120))
seeking_talent = db.Column(db.Boolean)
seeking_description = db.Column(db.Text)
artists = db.relationship("Artist", secondary="show", lazy="joined", cascade='all, delete')
#? CASCADE ALL, DELETE to delete the children (Shows) automatically before deleting the parent
# TODO: implement any missing fields, as a database migration using Flask-Migrate ✅
class Artist(db.Model):
__tablename__ = 'artist'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120))
image_link = db.Column(db.String(500))
genres = db.Column(db.ARRAY(db.String))
facebook_link = db.Column(db.String(120))
website = db.Column(db.String(120))
seeking_venue = db.Column(db.Boolean)
seeking_description = db.Column(db.Text)
venue = db.relationship("Venue", secondary="show", lazy="joined", cascade='all, delete')
# TODO: implement any missing fields, as a database migration using Flask-Migrate ✅
class Show(db.Model):
__tablename__ = 'show'
id = db.Column(db.Integer, primary_key=True)
venue_id = db.Column(db.Integer, db.ForeignKey('venue.id'), nullable=False)
artist_id = db.Column(db.Integer, db.ForeignKey('artist.id'), nullable=False)
start_time = db.Column(db.DateTime)
venue = db.relationship(Venue, backref=db.backref("shows", lazy=True))
artist = db.relationship(Artist, backref=db.backref("shows", lazy=True))
#src:
# https://michaelcho.me/article/many-to-many-relationships-in-sqlalchemy-models-flask/
# https://docs.sqlalchemy.org/en/14/orm/basic_relationships.html#many-to-many
# i used these links to help me model the many to many relationship using a class instead of an asstioation table
# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.✅ | true |
77ee8bf96b9cd6f18166ebc09dd73f877abc21e3 | Python | srikanthpragada/PYTHON_12_JULY_2021 | /demo/oop/ex_demo.py | UTF-8 | 399 | 3.8125 | 4 | [] | no_license | prices = [100, 200, 300]
try:
count = int(input("Enter a number :"))
r = 10 // count
print(prices[r])
except ValueError:
print("Sorry! Invalid number. Please enter a valid number!")
except ZeroDivisionError:
print("Sorry! Zero is not valid!")
except Exception as ex:
print('Stopped program due to some error -> ', ex)
finally:
print("Finally block!")
print("The End")
| true |
3f0b27c99499256d3bf1b0afa7f2159f22ef108b | Python | Hourout/linora | /linora/data/_utils.py | UTF-8 | 1,941 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | import requests
from linora import gfile
from linora.utils._progbar import Progbar
__all__ = ['get_file']
def assert_dirs(root, root_dir=None, delete=True, make_root_dir=True):
if root is None:
root = './'
assert gfile.isdir(root), '{} should be directory.'.format(root)
if root_dir is not None:
assert isinstance(root_dir, str), '{} should be str.'.format(root_dir)
task_path = gfile.path_join(root, root_dir)
if gfile.exists(task_path):
if delete:
gfile.remove(task_path)
gfile.makedirs(task_path)
else:
if make_root_dir:
gfile.makedirs(task_path)
return task_path
else:
gfile.makedirs(root)
return root
def get_file(url, root_file, verbose=1, retries=3, chunk_size=5120):
"""Request url and download to root_file.
Args:
url: str, request url.
root_file: str, downloaded and saved file name.
verbose: Verbosity mode, 0 (silent), 1 (verbose)
retries: retry counts.
chunk_size: the number of bytes it should read into memory.
Return:
str, downloaded and saved file name.
"""
for i in range(retries):
try:
r = requests.get(url, stream=True)
content_type = r.headers.get('Content-Length')
total_size = None if content_type is None else int(content_type.strip())
p = Progbar(total_size, verbose=verbose)
down_size = 0
with open(root_file, 'wb') as f:
for chunk in r.iter_content(chunk_size):
p.add(chunk_size)
f.write(chunk)
down_size += len(chunk)
if down_size==total_size:
break
raise 'download failed'
except:
if i==retries-1:
raise f'{url} download failed'
return root_file | true |
a613ece30131cb34c1b41ab7d77a1e60e8ced9f2 | Python | ITihiy/gb_algo_solutions | /lesson_01_slyusar_roman/01. task 1.py | UTF-8 | 877 | 4.40625 | 4 | [] | no_license | """
1. Найти сумму и произведение цифр трехзначного числа, которое вводит пользователь.
"""
three_digit_number = int(input("Введите трехзначное число: "))
# Можно и математическими операциями наити единица, десятки и сотни, но так проще
srt_three_digit_number = str(three_digit_number)
if len(srt_three_digit_number) != 3:
print("Error! This is not three-digit number.")
else:
units = int(srt_three_digit_number[-1])
tens = int(srt_three_digit_number[-2])
hundreds = int(srt_three_digit_number[-3])
print(f"Сумма цифр трехзначного числа: {units+tens+hundreds}")
print(f"Произведение цифр трехзначного числа: {units*tens*hundreds}")
| true |
b1e5420b2379fae1d60da24005875fbb6093767b | Python | eagle750/PySc | /youtube downloader/youtubedownloader.py | UTF-8 | 415 | 3.125 | 3 | [] | no_license | import pytube
print("Enter the video link")
link = input()
yt = pytube.YouTube(link)
stream = yt.streams.first()
#videos = yt.get_videos()
#s=1
#for v in videos:
# print(str(s)+". "+str(v))
# s += 1
#print("Enter the number of videos: ")
#n = int(input())
#vid = videos[n-1]
print("Enter the location:")
destination = input()
stream.download(destination)
print("\nhas been successfully downloaded ")
| true |
bf93aa0e2694eea5870d0d4a007a790629726532 | Python | samir711/webdriverpythonappium | /PythonAppiumProject/PythonTraining/Day4/Assignment/Day4Assignment4Q3.py | UTF-8 | 529 | 3.28125 | 3 | [] | no_license | # Q3. Find out the pypi module available which can be used to perform the following activity -
#
# a. read and write excel file
# b. generate logs.
# Excel packages packages such as pandas, openpyxl, xlrd, xlutils and pyexcel.
# https://www.geeksforgeeks.org/reading-excel-file-using-python/
import xlrd # a. read and write excel file
# location of the file
loc = ".\..\..\data\data.xlsx"
# To open Workbook
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
# For row 0 and column 0
print(sheet.cell_value(0, 0))
| true |
48285dd8d9304573ac9bf75f8ee60f57c09ee60f | Python | woshiZS/Snake-Python-tutorial- | /Chapter8/admin.py | UTF-8 | 551 | 2.984375 | 3 | [] | no_license | from users import User
class Privilege:
def __init__(self):
self.privileges=['can add post','can delete post','can ban user']
def show_privileges(self):
for privilege in self.privileges:
print(privilege)
class Admin(User):
def __init__(self, first_name, last_name, **user_info):
super().__init__(first_name, last_name, **user_info)
self.privileges=Privilege()
if __name__=="__main__":
adm_0=Admin('Jason','Heywood',age=18,hobby='play basketball')
adm_0.privileges.show_privileges() | true |
eba3b447858f0402fe4b04d81dec4d38e506d2ff | Python | yugalk14/Web-Scraper | /first_web_scrape.py | UTF-8 | 1,328 | 2.921875 | 3 | [] | no_license | from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def scraper(max_pages):
for i in range(1,max_pages):
my_url='https://www.monster.com/jobs/search/?q=Software-Engineer&where=USA&intcid=skr_navigation_nhpso_searchMain&jobid=0a71a749-66d0-49d5-9dd8-40599a4337b3&page='+str(i)
#Opening up connection, grabbing the content from the
uClient = uReq(my_url)
page_html=uClient.read()
uClient.close()
#HTML parser
page_soup=soup(page_html,"html.parser")
#Grabs each job post
containers=page_soup.findAll("section",{"class":"card-content"})
filename="Jobs.csv"
f=open(filename,"w")
headers="Jobs, Comapny, Location\n"
f.write(headers)
for container in containers[1:]:
job=container.a.text.replace("\r\n","")
company=container.find("div",{"class":"company"}).span.text
location=container.find("div",{"class":"location"}).span.text.replace("\r\n","")
print("Job: "+job)
print("Comapany: "+company)
print("Location: "+location)
f.write(job +","+ company +","+ location+"\n")
f.close()
pages=input("How many page data you want? = ")
print("Scrapping the data from given website, Please wait")
scraper(int(pages))
print("\n\n\nScrapping is Done, Please check Output on Jobs.csv file\n\n")
| true |
3e7c6c2059f182019a0cb34b13f008a7988bcaef | Python | ym7979/dict | /dict_db.py | UTF-8 | 2,081 | 3.25 | 3 | [] | no_license | import hashlib # 加密
import pymysql
def change_passwd(passwd):
hash = hashlib.md5() # 使用md5对象加密
hash.update(passwd.encode()) # 加密(只可用字节串)
return hash.hexdigest()
class Database:
def __init__(self):
# 连接数据库
self.db = pymysql.connect(host='localhost',
port=3306,
user='root',
password='123456',
database='dict',
charset='utf8')
# 生成游标对象 (操作数据库,执行sql语句,获取结果)
self.cur = self.db.cursor()
def close(self):
# 关闭游标和数据库连接
self.cur.close()
self.db.close()
def register(self, name, passwd): # 注册
sql = "select name from user where name = '%s';" % name
self.cur.execute(sql)
# 如查到内容,返回FALSE
if self.cur.fetchone():
return False
# 插入数据库
sql = "insert into user (name,password) values(%s,%s);"
passwd = change_passwd(passwd) # 在数据库中密码加密
try:
self.cur.execute(sql, [name, passwd])
self.db.commit()
return True
except:
self.db.rollback()
return False
def login(self, name, passwd): # 登录
sql = "select name from user " \
"where name=%s and password=%s;"
passwd = change_passwd(passwd)
self.cur.execute(sql, [name, passwd])
if self.cur.fetchone():
return True
else:
return False
def query(self, word): # 查单词
sql="select mean from words where word=%s;"
self.cur.execute(sql,[word])
r=self.cur.fetchone()
#r-->(xxx,) None 若能查到返回元祖
if r:
return r[0]
if __name__ == '__main__':
db = Database()
db.register('Tom', '123')
db.login('Tom', '123')
db.close()
| true |
10fb3674d148f601f3661276891f273186193dda | Python | sspenst/synacor-challenge | /teleporter.py | UTF-8 | 1,270 | 3.53125 | 4 | [] | no_license | """
Runs an optimized version of the function at address 6027 to find
the input that will allow the teleporter to reach the second location.
"""
import sys
mod = 32768
def check_r7_val(r7):
"""
Checks if a specific value of reg7 would result in a successful
outcome for the function call at address 6027.
"""
cache = {}
def f6027(r0, r1, r7):
"""
Optimized implementation of the function found at address 6027.
"""
if (r0, r1) in cache:
return cache[(r0, r1)]
elif r0 == 2:
ret = (2*r7 + r1*(r7+1) + 1) % mod
elif r0 == 3 and r1 == 0:
ret = (r7*(r7+3) + 1) % mod
elif r1 == 0:
ret = f6027(r0 - 1, r7, r7)
else:
t1 = f6027(r0, r1 - 1, r7)
ret = f6027(r0 - 1, t1, r7)
# update cache
cache[(r0, r1)] = ret
return ret
# initialize the cache
for i in range(mod):
f6027(3, i, r7)
# if our return value is 6, then the teleporter works!
if f6027(4, 1, r7) == 6:
print('REG7 SHOULD BE SET TO ' + str(r7))
sys.exit()
# search through all possible values of r7
for r7 in range(1, mod):
check_r7_val(r7) | true |
9692bf180d2d945299e7a5a0b422964b754750d4 | Python | OhDakyeong/p2_201611084 | /w3Main_temperature.py | UTF-8 | 211 | 3.859375 | 4 | [] | no_license | temp=raw_input("user input temperature: ")
sel=raw_input("F or C: ")
temp=int(temp)
if(sel=="F"):
print ((temp-32)/1.8),"C"
elif(sel=="C"):
print ((temp*1.8)+32),"F"
else:
print "Input Error"
| true |
de9a346685829488f5ba04c7307b5c4b15a35040 | Python | xylong/python | /object/generator.py | UTF-8 | 634 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-09-11 13:31:27
# @Author : xyl (416319808@qq.com)
# @Link : https://github.com/xylong
# @Version : 1.1
class Libs(object):
"""斐波拉契数列"""
def __init__(self, n):
self.n = n
self.a = 0
self.b = 1
def __iter__(self):
return self
def __next__(self):
self.a, self.b = self.b, self.a + self.b
if self.a > self.n:
raise StopIteration
return self.a
f = Libs(20)
for x in f:
print(x)
def lib(n):
'获取n以内斐波拉契数列'
a = 0
b = 1
while True:
a, b = b, a + b
if a > n:
break
yield a
res = lib(20)
for x in res:
print(x)
| true |
adc7f444ae488b7e01df483c08780eae86b47241 | Python | Alexander-Nalbandyan/learning-python | /ch-01/for_tests.py | UTF-8 | 696 | 4.9375 | 5 | [] | no_license | # iterates over given list and on each iteration assigns next value from list to the i variable.
for i in [1, 2, 3, 6, 9, 10]:
print(i)
# iterates over characters of the string on each iteration assigning next character of the string to the variable i.
# This works because strings in Python are sequences which are ordered collection of objects.
for i in "Hellow":
print(i)
# range(start, end) generates sequence from start to end(exclusive) range(end) = range(0, end)
# for is iterated 10 times because the range(10) gives sequence of 10 numbers 0, 1, 2, ....., 9
# So this can be used to specify how many times cycle needs to be iterated.
for i in range(10):
print("Hello ", i)
| true |
19ea7a395562c873e3b4b3dc49cbbad865d93b85 | Python | daniel-reich/ubiquitous-fiesta | /djJpmZPPBx3JaAqcK_10.py | UTF-8 | 293 | 3.109375 | 3 | [] | no_license |
def maya_number(n):
return [to_maya(k) for k in reversed(base20(n))]
def to_maya(n):
nlines, ndots = divmod(n,5)
return 'o'*ndots + '-'*nlines if n else '@'
def base20(n):
n20 = []
while n:
n,r = divmod(n,20)
n20.append(r)
return n20 or [0]
| true |
0d5083961207903c2e0c3fbd93ca25ff08982380 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_46/50.py | UTF-8 | 992 | 3 | 3 | [] | no_license | with open("A.in") as infile:
with open("A.out",mode="wt") as outfile:
cases = int(infile.readline())
for ncase in range(cases):
# Perform all nessesary calculation
size = int(infile.readline())
row = [0] * size
for i in range(size):
nums = [j*int(x) for j, x in enumerate(infile.readline().strip())]
row[i] = max(nums)
# Reorder
num = 0
for i in range(size):
if row[i] <= i: continue
for j in range(i+1,size):
if row[j] <= i:
#print("{n}: {j} => {i}".format(n=ncase,i=i,j=j))
num += j - i
row[i:i] = [row[j]]
del row[j+1]
break
outfile.write("Case #{nc}: {data}\n".format(nc=ncase+1,data=num))
print("Ready")
| true |
f292252aeb3c681dab1a4f1f193f6b4bc667ef04 | Python | DylanClarkOffical/CodeWars-Python | /Challenges-Beginner/multiplicationTable.py | UTF-8 | 140 | 3.46875 | 3 | [] | no_license | def multiplication_table(size):
return [[x * y for y in range(1, size + 1)] for x in range(1, size + 1)]
print(multiplication_table(3)) | true |
892ad1fae4154f591a922fd0026f1de575a2215b | Python | noahlove/manim-intro | /shape_trace/shape_trace.py | UTF-8 | 637 | 2.84375 | 3 | [] | no_license | from manim import *
class PointWithTrace(Scene):
def construct(self):
path = VMobject()
dot = Dot()
path.set_points_as_corners([dot.get_center(), dot.get_center()])
def update_path(path):
previous_path = path.copy()
previous_path.add_points_as_corners([dot.get_center()])
path.become(previous_path)
path.add_updater(update_path)
self.add(path, dot)
self.play(Rotating(dot, radians=PI, about_point=RIGHT, run_time=2))
self.wait()
self.play(dot.animate.shift(UP))
self.play(dot.animate.shift(LEFT))
self.wait() | true |
d59326882f94c43dd3484a73e120b9fd3d7b92aa | Python | anderson89marques/Santos | /santos/example.py | UTF-8 | 1,003 | 2.84375 | 3 | [
"MIT"
] | permissive | __author__ = 'anderson'
# -*- coding: utf-8 -*-
from santos import ThreadSchedule
import time
def f(schedule, job_name):
time.sleep(10)
schedule.pause_job(job_name)
print("//a//")
def f1(schedule, job_name):
time.sleep(20)
schedule.resume_job(job_name)
print("//b//")
def f2(schedule, job_name):
time.sleep(25)
schedule.remove_job(job_name)
print("//c//")
print("len: {}".format(len(schedule)))
def funcao(a):
print(a)
def func(a):
print(a)
schedule = ThreadSchedule()
schedule.add_job(funcao, seconds="4", id="func1", kwargs={"a": "A"})
print("len1: {}".format(len(schedule)))
schedule.add_job(funcao, seconds="3", id="func2", kwargs={"a": "B"})
print("len2: {}".format(len(schedule)))
schedule.add_job(func, day_of_the_week='Tu', time_of_the_day="02:16:50", id="func3", kwargs={"a": "Time_of"})
print("len3: {}".format(len(schedule)))
if __name__ == '__main__':
f(schedule, "func1")
f1(schedule, "func1")
f2(schedule, "func2")
| true |
e6c312af118ac0392bcb18f302a31b9f16616c08 | Python | 1Moiz/Python-Basics | /lab2.py | UTF-8 | 4,329 | 3.953125 | 4 | [] | no_license | #Assignment # 2
#Chapter # 02
#Q no 1
username = 'Moiz Ahmed'
print(username)
#Q no 2
message = 'Hello World'
print(message)
#Q no 3
age = '21'
certified = 'Certified in Python Language'
print(username + "his age is " + age + " "+ certified)
#Q no 4
print(" pizza \n pizz \n piz \n pi \n p")
#Q no 5
email = 'mmoizahmed8@gmail.com'
print("Hello My email Address is : " + email)
#Q no 6
book = 'A samrt way to learn Python'
print("I am trying to learn book " + book)
print("======================================================")
#Chapter # 03
#Q no 1
age = '21'
print("I am " + age + " Years old")
#Q no 2
visit = '45'
print("You have visited this site "+visit+ " times" )
#Q no 3
birthyear = 1997
print("My birth year is ",birthyear)
print("Data type of my declared variable is number")
#Q no 4
visitor_name = 'Moiz Ahmed'
product_title = 'T-Shirts'
quantity = '5'
print(visitor_name+" ordered " + quantity+" " + product_title + " On Lucky One Mall")
print("======================================================")
#Chapter # 05
_1 = '_'
print("A variable name only contain in python is : "+ _1+" Called Hyphen")
print("======================================================")
# Chapter # 04
_1st_no = int(input("Enter 1st Number : "))
_2nd_no = int(input("Enter 2nd Number : "))
print("Sum of 1st and 2nd Number is : ",_1st_no + _2nd_no)
print("Sub of 1st and 2nd Number is : ",_1st_no - _2nd_no)
print("Mul of 1st and 2nd Number is : ",_1st_no * _2nd_no)
print("Div of 1st and 2nd Number is : ",_1st_no / _2nd_no)
a = 7
print("Initial value is : " ,a)
a =+1
print("Value after increment is : ",a)
ticket = 600
print("Total cost to buy 5 tickets to a movie is ",ticket*5," PKR" )
print("----------Print any table----------")
a = int(input("Enter number to print table : "))
for i in range(1,11):
print(a,'x',i,'=',a*i)
print("======================================================")
print("----------Temperature Converter--------")
cel = float(input("Enter temperature in Celcius : "))
fah = (cel*9/5)+32
print("Celcius temperature in Fahrenheit",fah)
fah = int(input("Enter temperature in Fahrenheit : "))
cel = (fah-32)*5/9
print("Fahrenheit temperature in Celcius",cel)
print("----------Shopping Cart----------")
p1 = int(input("Enter Price of 1st item : "))
q1 = int(input("Enter Quantity of 1st item is : "))
p2 = int(input("Enter Price of 2nd item : "))
q2 = int(input("Enter Quantity of 2nd item is : "))
shipping_charges = 100
print("Total cost of your item with Shipping charges is Rs 100 : ",(p1*q1)+(p2*q2)+100)
print("======================================================")
print("----------Calculating Percentage----------")
total_marks = 800
obtained_marks = 700
per = total_marks/obtained_marks*100
print("Total Marks : ",total_marks)
print("Marks Obtained : ",obtained_marks)
print("Percentage : ",per)
print("----------Currancy in PKR----------")
us = int(input("Enter US dollar : "))
sad = int(input("Enter Saudi Riyal : "))
print("Your US dollar in PKR is : ",us*141.62)
print("Your Saudi Riyal in PKR is : ",sad*37.76)
print("======================================================")
print("---------Age Calculater----------")
cy = int(input("Enter Current year : "))
by = int(input("Enter Birth year : "))
print("Your age is : ",(cy-by))
print("======================================================")
print("----------The Geometrizer----------")
radius = int(input("Enter Radius of circle : "))
print("Radius of circle is : ",radius)
print("The circumference is : ",2*3.14*radius)
print("The Area is : ",3.14*radius*radius)
print("======================================================")
print("----------The Lifetime Supply Calculator----------")
snake = 'Lays'
current_age = 15
maximum_age = 65
amount = 3
print("Favourite Snake : ",snake)
print("Current age : ",current_age)
print("Estimated age : ",maximum_age)
print("Amount of Snake per day : ",amount)
print("You will need ",(maximum_age-current_age)*amount ," Lays to last until the ripe old age of 65")
| true |
0a550375eb69aaf960cc7f73bdbf16779898d95f | Python | sparisi/tensorl | /common/plotting.py | UTF-8 | 1,580 | 3.046875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
plt.ion()
#matplotlib.use('TKagg')
import numpy as np
class RT3DPlot:
'''
It creates a new figure with two subfigures (surf and contourf + colorbar)
and allows to update them in real time with new z-values without creating a
new plot.
'''
def __init__(self, xmin, xmax, ymin, ymax, title, n=10):
self.name = 'my3dplot'
self.fig = plt.figure()
self.fig.suptitle(title)
self.n = n
self.ax_surf = self.fig.add_subplot(121, projection='3d')
self.ax_contour = self.fig.add_subplot(122)
x = np.linspace(xmin, xmax, n)
y = np.linspace(ymin, ymax, n)
self.xx, self.yy = np.meshgrid(x, y)
self.XY = np.vstack((self.xx.flatten(),self.yy.flatten())).T # shape is (n^2,2)
self.surf = self.ax_surf.plot_surface(self.xx, self.yy, np.zeros((n,n)), cmap=cm.coolwarm)
self.contour = self.ax_contour.contourf(self.xx, self.yy, np.zeros((n,n)), cmap=cm.coolwarm)
self.ax_cbar = plt.colorbar(self.contour).ax
def update(self, z):
self.fig.canvas.flush_events()
self.ax_surf.cla()
self.ax_contour.cla()
self.ax_cbar.cla()
self.surf = self.ax_surf.plot_surface(self.xx, self.yy, z.reshape((self.n,self.n)), cmap=cm.coolwarm)
self.contour = self.ax_contour.contourf(self.xx, self.yy, z.reshape((self.n,self.n)), cmap=cm.coolwarm)
plt.colorbar(self.contour, cax=self.ax_cbar)
plt.draw()
| true |
907096426d7b4f9f92a24317997b4be0cd35083c | Python | jorgediazjr/dials-dev20191018 | /base/lib/python2.7/site-packages/wx-3.0-gtk2/wx/lib/pdfviewer/__init__.py | UTF-8 | 2,690 | 2.59375 | 3 | [
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"BSD-3-Clause"
] | permissive | # Name: __init__.py
# Package: wx.lib.pdfviewer
#
# Purpose: A PDF file viewer
#
# Author: David Hughes dfh@forestfield.co.uk
# Copyright: Forestfield Software Ltd
# Licence: Same as wxPython host
# History: Created 17 Aug 2009
#
#----------------------------------------------------------------------------
"""
wx.lib.pdfviewer
The wx.lib.pdfviewer pdfViewer class is derived from wx.ScrolledWindow
and can display and print PDF files. The whole file can be scrolled from
end to end at whatever magnification (zoom-level) is specified.
The viewer uses pyPdf to parse the pdf file so it is a requirement that
this must be installed. The pyPdf home page is http://pybrary.net/pyPdf/
and the library can also be downloaded from http://pypi.python.org/pypi/pyPdf/1.12
There is an optional pdfButtonPanel class, derived from wx.lib.buttonpanel,
that can be placed, for example, at the top of the scrolled viewer window,
and which contains navigation and zoom controls. Alternatively you can drive
the viewer from controls in your own application.
Externally callable methods are: LoadFile, Save, Print, SetZoom, and GoPage
viewer.LoadFile(pathname)
Reads and displays the specified PDF file
viewer.Save()
Opens standard file dialog to specify save file name
viewer.Print()
Opens print dialog to choose printing options
viewer.SetZoom(zoomscale)
zoomscale: positive integer or floating zoom scale to render the file at
corresponding size where 1.0 is "actual" point size (1/72").
-1 fits page width and -2 fits page height into client area
Redisplays the current page(s) at the new size
viewer.GoPage(pagenumber)
Displays specified page
The viewer renders the pdf file content using Cairo if installed,
otherwise wx.GraphicsContext is used. Printing is achieved by writing
directly to a wx.PrintDC and using wx.Printer.
Please note that pdfviewer is a far from complete implementation of the pdf
specification and will probably fail to display any random file you supply.
However it does seem to be OK with the sort of files produced by ReportLab that
use Western languages. The biggest limitation is probably that it doesn't (yet?)
support embedded fonts and will substitute one of the standard fonts instead.
The icons used in pdfButtonbar are Free Icons by Axialis Software: http://www.axialis.com
You can freely use them in any project or website, commercially or not.
TERMS OF USE:
You must keep the credits of the authors: "Axialis Team", even if you modify them.
See ./bitmaps/ReadMe.txt for further details
"""
from viewer import pdfViewer
from buttonpanel import pdfButtonPanel
| true |
59353c35e175fde37bbe9a190980d1e3e75ddaaf | Python | OlgaBrozhe/PythonProject | /check_db_connection.py | UTF-8 | 412 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | import pymysql.cursors
# DB API 2.0
connection = pymysql.connect(host="127.0.0.1", database="addressbook", user="root", password="")
try:
# Point to the data stored in the database
cursor = connection.cursor()
# Query the data from the DB and print row by row
cursor.execute("select * from address_in_groups")
for row in cursor.fetchall():
print(row)
finally:
connection.close() | true |
525a43560f05706f3401773fa7269aeca920f235 | Python | vinoddiwan/HackerRank-Solutions- | /2D Array - DS/hourglassSum.py | UTF-8 | 377 | 3.453125 | 3 | [] | no_license | def hourglassSum(arr):
maxGlass = float('-inf') # select minimum number
for i in range(len(arr)-2): # only two more values needed
for j in range(len(arr)-2): # same for below
currSum = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]
maxGlass = max(currSum, maxGlass) # new max
return maxGlass
| true |
fb81f924f2ce8422b4ebd3c8dbe3bf4e8dad84d5 | Python | modalsoul0226/LeetcodeRepo | /easy/Largest Subarray.py | UTF-8 | 1,610 | 3.296875 | 3 | [] | no_license | # O(n) solution
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
res = -(2 ** 32)
temp = 0
for i in nums:
if temp < 0:
temp = i
else:
temp += i
if temp > res: res = temp
return res
# class Solution:
# def maxSubArray(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# if not nums or len(nums) == 0: return None
# return self.findMax(nums, 0, len(nums) - 1)
# def findMax(self, nums, start, end):
# if start == end:
# return nums[start]
# mid = (start + end) / 2
# left_max = self.findMax(nums, start, mid)
# right_max = self.findMax(nums, mid + 1, end)
# temp_max = left_max if left_max > right_max else right_max
# max_sum = 0
# curr_sum = 0
# i = mid + 1
# while i <= end:
# curr_sum += nums[i]
# if curr_sum > max_sum: max_sum = curr_sum
# i += 1
# i = mid - 1
# curr_sum = max_sum
# while i >= start:
# curr_sum += nums[i]
# if curr_sum > max_sum: max_sum = curr_sum
# i -= 1
# max_sum += nums[mid]
# return max_sum if max_sum > temp_max else temp_max
if __name__ == '__main__':
sol = Solution()
# print(sol.maxSubArray([[8,-19,5,-4,20]]))
print(sol.maxSubArray([-2,1,-3,4,-1,2,1,-5,4])) | true |
b522d6675a8901bb50c944cb1302e9e868bc865a | Python | fdermer/mg2 | /recipe/management/commands/count_recipes_with_photos.py | UTF-8 | 452 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from recipe.models import Recipe
import os
root_dir = "/Users/Fred/www/mg2/mg2/recipe/static/"
count = 0
for recipe in Recipe.objects.all():
if recipe.image_slug and recipe.image_name:
if os.path.exists(os.path.join(root_dir, recipe.get_image_url())):
count +=1
print "http://127.0.0.1:8000/static/" + recipe.get_image_url()
print "Nb de photos trouvées: ", count | true |
ea3f709d4461cf29dbd93dd74d6ef947f97bc12d | Python | wolfela/SecondYear | /coolbeans/app/views/quiz.py | UTF-8 | 6,679 | 2.59375 | 3 | [
"BSD-2-Clause"
] | permissive | from django.views import View
from coolbeans.app.forms import QuizForm
from coolbeans.app.models.quiz import QuizModel
from coolbeans.app.models.question import MultipleChoiceModel, WordScrambleQuestionModel, WordMatchingModel, GapFillQuestionModel, CrosswordQuestionModel, BaseQuestionModel
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect
from django.http import HttpResponse
class QuizListView(View):
"""
A view for listing all quizzes this user is allowed to edit.
"""
pass
class QuizCreateView(View):
"""
A view for creating quizzes.
"""
template_name = "app/quiz/Quiz-Create.html"
def load(request):
"""
Load the right quiz edit page with an id
:return: redirect to quiz edit page
"""
form = QuizForm(request.POST)
quiz = form.save()
string = '/quiz/edit/' + str(quiz.pk)
return redirect(string)
class QuestionEditView(View):
"""
A view for editing quizzes
"""
template_name = "app/quiz/Quiz-Create.html"
def submitQuiz(request, pk):
"""
Method for handling different submit quiz types
:param pk: primary key
:return: redirect to save quiz, add question or cancel
"""
if 'save_form' in request.POST:
return QuestionEditView.saveQuiz(request, pk)
elif 'add_question' in request.POST:
return QuestionEditView.addQuestion(request, pk)
elif 'cancel_form' in request.POST:
return QuestionEditView.cancel(request, pk)
def addQuestion(request, pk):
"""
Method for adding questions to quizzes
:param pk: primary key
:return: http response redirect back to quiz edit page
"""
quiz = get_object_or_404(QuizModel, pk=pk)
quiz.title = request.POST.get('title')
quiz.language = request.POST.get('language')
quiz.author = request.POST.get('author')
quiz.save()
type = request.POST.get('type')
return HttpResponseRedirect('/' + type + '/' + str(pk) + '/' + str(len(quiz.questions)))
def editQuiz(request, pk, questiontype='', questionid=''):
"""
Method for displaying edit quiz page with the updated question list
:param pk: primary key
:param questiontype: type of a question
:param questionid: question id
:return: updated view
"""
quiz = get_object_or_404(QuizModel, pk=pk)
if questiontype is not '' and questionid is not '':
if questiontype == 'mc':
questiontitle = "Multiple Choice"
elif questiontype == 'ws':
questiontitle = "Word Scramble"
elif questiontype == 'wm':
questiontitle = "Word Match"
elif questiontype == 'cw':
questiontitle = "Crossword"
elif questiontype == 'gf':
questiontitle = "Gapfill"
quiz.questions.append(questiontype + '/question/' + str(questionid))
quiz.questiontitles.append(questiontitle)
quiz.save()
return render(request, 'app/quiz/Quiz-Create.html', {'quiz': quiz})
def saveQuiz(request, pk):
"""
Method for saving the quiz
:param pk: primary key
:return: http response redirect to show id page
"""
instance = get_object_or_404(QuizModel, pk=pk)
if request.method == 'POST':
form = QuizForm(request.POST)
if form.is_valid():
instance.title = form.data['title']
instance.language = form.data['language']
instance.save()
return HttpResponseRedirect('/quiz/' + pk + '/showid/')
else:
form = QuizForm()
def showId(request, pk):
"""
Method for displaying the show id page
:param pk: primary key
:return: return the quiz id page with form data
"""
quiz = get_object_or_404(QuizModel, pk=pk)
return render(request, 'app/quiz/Quiz-ID.html', {'quiz': quiz})
def cancel(request, pk):
return HttpResponseRedirect('/')
class QuizAttemptView(View):
"""
A view for attempting quizzes
"""
def score(request, pk, score):
"""
Method for displaying the results page
:param pk: primary key
:param score: final score
:return: upadated results view with the forms data
"""
quiz = get_object_or_404(QuizModel, pk=pk)
questioncount = len(quiz.questions)
return render(request, 'app/quiz/Quiz-Result.html', {'score': score, 'quiz': quiz, 'questioncount': questioncount})
def attemptQuiz(request, pk):
"""
Method for starting the quiz attempt
:param pk: primary key
:return: redirect to the first question
"""
instance = get_object_or_404(QuizModel, pk=pk)
return HttpResponseRedirect('/' + instance.questions[0] + '/0')
def nextQuestion(request, pk, i, score):
"""
Method that returns the next question
:param pk: primary key
:param i: position in the quiz
:param score: current score
:return: http response redirect to the next question
"""
instance = get_object_or_404(QuizModel, pk=pk)
if (int(i)+1 < len(instance.questions)):
if request.is_ajax():
message = '/' + instance.questions[int(i)+1] + '/' + score
return HttpResponse(message)
else:
return HttpResponseRedirect('/' + instance.questions[int(i)+1] + '/' + score)
else:
if request.is_ajax():
message = '/quiz/score' + score
return HttpResponse(message)
else:
return HttpResponseRedirect('/quiz/score/' + pk + '/' + score)
def findQuizPage(request):
"""
Main/Find quiz page display
:return: find quiz template
"""
return render(request, 'app/quiz/Quiz-Find.html')
def findQuiz(request):
"""
Method for redirecting to the searched quiz or making an alert if it doesnt exist
:return: http response redirect to the quiz attempt for that quiz
"""
pk = request.POST.get('pk')
if(pk.isdigit()):
quiz = QuizModel.objects.filter(pk=pk)
if(quiz):
return HttpResponseRedirect('/quiz/attempt/' + pk + '/')
return render(request, 'app/quiz/Quiz-Find.html', {'alert': "Wrong Quiz Code. Try again!"})
| true |
5f3c98e5f40ef4d56868e435322c196ab02889b2 | Python | szyymek/Python | /String_transformer.py | UTF-8 | 347 | 3.734375 | 4 | [] | no_license | def string_transformer(s):
s = s.split(" ")
s = s[::-1]
print(s)
s = " ".join(s)
print(s)
result = ""
for letter in s:
if letter.islower():
result += letter.upper()
elif letter.isupper():
result += letter.lower()
else:
result +=letter
return result
| true |
4bcfd700b7ff817ecc8b2ec935daac0499b0d69d | Python | PacktPublishing/Software-Architecture-with-Python | /Chapter08/eventlet_chat_server.py | UTF-8 | 1,626 | 2.921875 | 3 | [
"MIT"
] | permissive | # Code Listing #8
"""
Multiuser chat server using eventlet
"""
import eventlet
from eventlet.green import socket
participants = set()
def new_chat_channel(conn):
""" New chat channel for a given connection """
data = conn.recv(1024)
user = ''
while data:
print("Chat:", data.strip())
for p in participants:
try:
if p is not conn:
data = data.decode('utf-8')
user, msg = data.split(':')
if msg != '<handshake>':
data_s = '\n#[' + user + ']>>> says ' + msg
else:
data_s = '(User %s connected)\n' % user
p.send(bytearray(data_s, 'utf-8'))
except socket.error as e:
# ignore broken pipes, they just mean the participant
# closed its connection already
if e[0] != 32:
raise
data = conn.recv(1024)
participants.remove(conn)
print("Participant %s left chat." % user)
if __name__ == "__main__":
port = 3490
try:
print("ChatServer starting up on port", port)
server = eventlet.listen(('0.0.0.0', port))
while True:
new_connection, address = server.accept()
print("Participant joined chat.")
participants.add(new_connection)
print(eventlet.spawn(new_chat_channel,
new_connection))
except (KeyboardInterrupt, SystemExit):
print("ChatServer exiting.")
| true |
b165e73cf199a927015b1759d3d502ca19719b28 | Python | chungyang/CS514 | /HW4/heavyHitter.py | UTF-8 | 2,999 | 3.59375 | 4 | [] | no_license | import numpy as np
import math
class hash_function:
def __init__(self, w):
self.w = w
self.a, self.b = np.random.randint(0, w, 2)
def hash(self, n):
"""
:param n: value to hash
:return: a hash_value
"""
hash_value = (self.a * n + self.b) % self.w
return hash_value
class count_min_sketch:
def __init__(self, epislon, delta):
# Error parameters. Count min sketch guarantees that the estimate a' <= a + epislon * N with probability 1 - delta
# a': estimate frequency
# a: true frequency
# N: stream size
self.epislon = epislon
self.delta = delta
# Number of hash function, or the row number of count min sketch table
self.d = math.ceil(math.log(1 / self.delta))
# hash function that does following h: {1,2,....m} ----> {1,2,.....w}, or column number of count min sketch table
self.w = math.ceil(math.exp(1) / self.epislon)
self.count_table = np.zeros((self.d, self.w))
self.hash_functions = []
self.__generateHashFuntions()
def __generateHashFuntions(self):
# Get hash functions
for i in range(self.d):
self.hash_functions.append(hash_function(self.w))
def listen(self, x):
"""
This function listens to the stream data and perform count min sketch on the stream data
:param x: Stream data
"""
for i in range(len(self.hash_functions)):
hash_value = self.hash_functions[i].hash(x)
self.count_table[i][hash_value] += 1
def queryFrequency(self, x):
"""
This function returns the estimated frequency of stream data x
:param x: stream data
:return: estimated frequency of stream data
"""
counts = []
for i in range(len(self.hash_functions)):
hash_value = self.hash_functions[i].hash(x)
count = self.count_table[i][hash_value]
counts.append(count)
return min(counts)
import heapq
h = []
# Minimum frequency to be considered as a heavy hitter
k = 30
c = count_min_sketch(epislon = 0.1, delta = 0.1)
total_count = 0
for i in range(10000):
data = np.random.randint(0, 499)
c.listen(data)
total_count += 1
f = c.queryFrequency(data)
if f > total_count / k:
# Delete previous occurrence of data
for j in range(len(h)):
if h[j][1] == data:
h.pop(j)
break
heapq.heappush(h, (f, data))
# Delete elements that has count less than total_count / k
while h:
element = h.pop(0)
# Min element is bigger than total_count / k, stop deleting and put it back to the heap
if element[0] >= total_count / k:
heapq.heappush(h, element)
break
print("list of heavy hitters")
for element in h:
print("value: ", element[1], "count: ", element[0])
| true |
53368ebf7d59407040e16b5520fd7f48cb68c513 | Python | ursg/analysator | /pyCalculations/pitchangle.py | UTF-8 | 2,487 | 2.921875 | 3 | [] | no_license | import numpy as np
import pylab as pl
def pitch_angles( vlsvReader, cellid, cosine=True, plasmaframe=False ):
''' Calculates the pitch angle distribution for a given cell
:param vlsvReader: Some VlsvReader class with a file open
:type vlsvReader: :class:`vlsvfile.VlsvReader`
:param cellid: The cell id whose pitch angle the user wants NOTE: The cell id must have a velocity distribution!
:param cosine: True if returning the pitch angles as a cosine plot
:param plasmaframe: True if the user wants to get the pitch angle distribution in the plasma frame
:returns: pitch angles and avgs [pitch_angles, avgs]
.. code-block:: python
# Example usage:
vlsvReader = VlsvReader("fullf.0001.vlsv")
result = pitch_angles( vlsvReader=vlsvReader, cellid=1924, cosine=True, plasmaframe=False )
# Plot the data
import pylab as pl
pl.hist(result[0].data, weights=result[1].data, bins=100, log=False)
'''
# Read the velocity cells:
velocity_cell_data = vlsvReader.read_velocity_cells(cellid)
# Read bulk velocity:
if vlsvReader.read_variable("rho", cellid) != 0.0:
bulk_velocity = np.array(vlsvReader.read_variable("rho_v", cellid) / vlsvReader.read_variable("rho", cellid), copy=False)
else:
bulk_velocity = 0.0
# Calculate the pitch angles for the data:
B = vlsvReader.read_variable("B", cellid)
B_unit = B / np.linalg.norm(B)
# Get cells:
vcellids = velocity_cell_data.keys()
# Get avgs data:
avgs = velocity_cell_data.values()
# Get a list of velocity coordinates:
if plasmaframe == True:
v = vlsvReader.get_velocity_cell_coordinates(vcellids) - bulk_velocity
else:
v = vlsvReader.get_velocity_cell_coordinates(vcellids)
# Get norms:
v_norms = np.sum(np.abs(v)**2,axis=-1)**(1./2)
# Get the angles:
if cosine == True:
pitch_angles = v.dot(B_unit) / v_norms
units = "radian"
else:
pitch_angles = np.arccos(v.dot(B_unit) / v_norms) / (2*np.pi) * 360
units = "degree"
# Return the pitch angles and avgs values:
from output import output_1d
if vlsvReader.read_variable("rho", cellid) != 0.0:
return output_1d([pitch_angles, avgs], ["Pitch_angle", "avgs"], [units, ""])
else:
return output_1d([[0], [1e-9]], ["Pitch_angle", "avgs"], [units, ""])
#pl.hist(pitch_angles, weights=avgs, bins=bins, log=log)
| true |
98699779792566ebfaece37e789e95b5a0e3456d | Python | aog11/python-training | /scripts/countdown.py | UTF-8 | 369 | 3.421875 | 3 | [] | no_license | # Chapter 15
# Simple Countdown Program Project
#! python3
# Importing the needed modules
import time, os, subprocess
timeLeft = 60
while timeLeft > 0:
print(timeLeft, end='')
time.sleep(1)
timeLeft-= 1
# Going to the location of alarm.wav
os.chdir('')
# At the end of the countdown, play a sound file
subprocess.Popen(['start','alarm.wav'],shell=True) | true |
eadce458aff15f2755ba7a955815c7f191343606 | Python | ThomasR75/python_work | /Euler51with strings.py | UTF-8 | 1,392 | 3.671875 | 4 | [] | no_license | #Euler51 Prime Digit replacements
# replace 2 digits in a number with same numbers and find smallest that is 8 number sequence prime
#create primes
from time import time
from collections import Counter
begin = time()
primemax = 1000000
marked = [0] * primemax
primes = [2, ]
value = 3
while value < primemax:
if marked[value] == 0:
primes.append(value)
i = value
while i < primemax:
marked[i] = 1
i += value
value += 2
#reduce primes to primes with triplicates
primes = [x for x in primes if len(str(x)) - len(set(str(x))) >= 3]
print(len(primes))
checked =[]
#defination to swap duplicates and make list of all
def pdr(s):
s = str(s)
sol = []
for duplicate in (Counter(s) - Counter(set(s)) ):
a = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
temp = [int(s.replace(duplicate, x)) for x in a]
sol.append(temp)
return sol
#definition to check if prime numbers
def check(l):
for i in l:
checked.append(i)
if i not in primes:
l.remove(i)
return l
flag = True
i = 0
while flag:
if primes[i] not in checked:
replacements = pdr(primes[i])
for j in replacements:
if len(check(j)) == 8:
print(j[0])
flag = False
break
i += 1
end = time()
print ("Time: ", end - begin) | true |
d9fc4e8243e5cda6e58f006d8b8293f475d54ed9 | Python | pasbahar/python-practice | /Remaining_str.py | UTF-8 | 1,251 | 4.1875 | 4 | [] | no_license | '''Given a string without spaces, a character, and a count, the task is to print the string after the specified character has occurred count number of times.
Print “Empty string” incase of any unsatisfying conditions.
(Given character is not present, or present but less than given count, or given count completes on last index).
If given count is 0, then given character doesn’t matter, just print the whole string.
Input:
First line consists of T test cases. First line of every test case consists of String S.Second line of every test case consists of a character.Third line of every test case consists of an integer.
Output:
Single line output, print the remaining string or "Empty string".
Constraints:
1<=T<=200
1<=|String|<=10000
Example:
Input:
2
Thisisdemostring
i
3
geeksforgeeks
e
2
Output:
ng
ksforgeeks'''
for i in range(int(input())):
s=input()
ch=input()
c=int(input())
count,ind=0,-1
for j in range(len(s)):
if s[j]==ch:
count+=1
if count==c:
ind=j+1
break
if c==0:
print(s)
elif ind!=-1 and ind!=len(s):
s=list(s)
s=s[ind:]
s=''.join(s)
print(s)
else:
print('Empty string') | true |
df3ed79b74df3e598854915509f9a7173b5548e8 | Python | guzmananthony37/Homework-3 | /11.22.py | UTF-8 | 208 | 3.0625 | 3 | [] | no_license | #Anthony Guzman 11.22 CIS 2348 1503239
input_list = input()
list = input_list.split()
for word in list:
frequency=list.count(word)
print(word,frequency) | true |
0bc7d4694eac5e14a508e09fb86fba895ed58594 | Python | Ceruleanacg/Crack-Interview | /LeetCode/Array and Strings/66. Plus One/solution.py | UTF-8 | 558 | 3.375 | 3 | [
"MIT"
] | permissive | class Solution:
def plusOne(self, digits: list):
"""
:type digits: List[int]
:rtype: List[int]
"""
carry = 0
digits[-1] += 1
for i in reversed(range(0, len(digits))):
num = digits[i]
num += carry
carry = 0
if num >= 10:
carry = 1
num %= 10
digits[i] = num
if carry == 1:
digits.insert(0, 1)
return digits
print(Solution().plusOne([1, 2, 3]))
print(Solution().plusOne([9, 9]))
| true |
2335e9c9b4bd5453e7ba6ed05eae3ee83d08e190 | Python | Code0N/PythonKPLab2 | /01.py | UTF-8 | 940 | 3.484375 | 3 | [] | no_license | from sys import argv
from os.path import exists
if len(argv) == 1:
print('Укажите файл для обработки')
exit()
if exists(argv[1]) == False:
print('Файл не существует')
exit()
try:
file = open(argv[1], 'rt', 512, 'utf-8')
except:
print('Эксепшн')
finally:
file.close()
alltextfiltered = ''
for line in file:
for i in line:
if i.isalpha():
alltextfiltered += i #Because Duke say's Fuck you optimization
file.close()
letters = 'qwertyuiopasdfghjklzxcvbnmйцукенгшщзхъфывапролджэячсмитьбю'.upper() #Не будем заморачиваться
alltextfiltered = alltextfiltered.upper()
resultstring = ''
for i in letters:
numcount = alltextfiltered.count(i)
if numcount != 0:
print("Буква {} встречается в тексте {} раз".format(i, numcount))
#resultstring += ' |{} : {}|\n '.format(i, numcount)
#print(resultstring) | true |
835dccc91ea460cdcf9f0388f8678776aba5c428 | Python | WinstonChenn/trolly-sim | /src/sim_utils.py | UTF-8 | 8,856 | 2.84375 | 3 | [
"MIT"
] | permissive | """
Winston Chen
5/3/2021
Utilities for trolly problem simulation enviorment setup
"""
import random
from enum import Enum
import numpy as np
class LossType(Enum):
TELE = "teleology"
DEON = "deontology"
class Simulator:
def __init__(self, n, full_info, seed, track_max=5, pass_max=5):
"""
n: number of trollies in the simulation
full_info: wether or not trollies have full information
see definition in proposal
seed: random seed
track_max: maximum possible number of people tied on one track
pass_max: maximum possible number of passengers on a trolly
"""
self.n = n
self.full_info = full_info
self.seed = seed
self.track_max = track_max
self.pass_max = pass_max
# random.seed(seed)
# random number of people tied to each track
self.track_nums = [random.randint(0, track_max) for i in range(n)]
self.trolly_pass_nums = [random.randint(0, pass_max) for i in range(n)]
# n trolly objects need to be manually set later with object calls
self.trollies = [None for i in range(n)]
# accumulators for each trials
self.total_pass = 0 # total number of passengers
self.total_track = 0 # total number of people on the track
self.total_trials = 0 # total number of trials done so far
# total passengers & track people kileed by each trolly
self.trolly_kill_dict = [{"pass": 0, "track": 0} for i in range(n)]
self.total_pass_kill = 0 # total number of passengers killed
self.total_track_kill = 0 # total number of people on the track killed
# total passengers & track people encountered by each trolly
# (include people from both tracks)
self.trolly_tot_dict = [{"pass": 0, "track": 0} for i in range(n)]
def clear_records(self):
# accumulators for each trials
self.total_pass = 0 # total number of passengers
self.total_track = 0 # total number of people on the track
self.total_trials = 0 # total number of trials done so far
# total passengers & track people kileed by each trolly
self.trolly_kill_dict = [{"pass": 0, "track": 0} for i in range(self.n)]
self.total_pass_kill = 0 # total number of passengers killed
self.total_track_kill = 0 # total number of people on the track killed
# total passengers & track people encountered by each trolly
# (include people from both tracks)
self.trolly_tot_dict = [{"pass": 0, "track": 0} for i in range(self.n)]
def trolly_track_lookup(self, trolly_idx):
"""
helper function that returns the 2 track indices belong to the given
indexed trolly
"""
if trolly_idx < 0 or trolly_idx >= self.n:
raise ValueError(f"trolly index out of bound, n={self.n}, trolly_idx={trolly_idx}")
if trolly_idx == self.n-1:
return (trolly_idx, 0)
else:
return (trolly_idx, trolly_idx+1)
def trolly_neighbor_lookup(self, trolly_idx):
"""
helper function that returns the 2 neighbor indices belong to the given
indexed trolly
(default_track_neighbor_idx, alternative_tracek_neighbor_idx)
"""
if trolly_idx < 0 or trolly_idx >= self.n:
raise ValueError(f"trolly index out of bound, n={self.n}, trolly_idx={trolly_idx}")
if trolly_idx == 0:
return (self.n-1, 1)
elif trolly_idx == self.n-1:
return (self.n-2, 0)
else:
return (trolly_idx-1, trolly_idx+1)
def get_trolly_str_by_idx(self, idx):
return str(self.trollies[idx])
def get_trolly_str_arr(self):
return [str(agent) for agent in self.trollies]
def set_trolly_by_idx(self, idx, trolly_obj):
trolly_obj.set_pass_num(self.trolly_pass_nums[idx])
self.trollies[idx] = trolly_obj
def batch_set_trollies(self, trolly_arr):
assert len(trolly_arr) == self.n
assert None not in trolly_arr
self.trollies = trolly_arr
def shuffle_trolly_arr(self):
random.shuffle(self.trollies)
def refresh_track_nums(self):
"""update the number of people on all the tracks """
self.track_nums = [random.randint(0, self.track_max) for i in range(self.n)]
def refresh_pass_nums(self):
"""update the number of people on all the tracks """
self.trolly_pass_nums = [random.randint(0, self.pass_max) for i in range(self.n)]
def get_tot_tele_loss(self):
return (self.total_pass_kill + self.total_track_kill) / (self.total_pass + self.total_track)
def get_tot_deon_loss(self):
return (self.total_pass_kill) / (self.total_pass)
def get_tele_loss_by_idx(self, idx_arr):
tot_kills = 0
tot_ecounter = 0
for i in idx_arr:
tot_kills += self.trolly_kill_dict[i]['pass']
tot_kills += self.trolly_kill_dict[i]['track']
tot_ecounter += self.trolly_tot_dict[i]['pass']
tot_ecounter += self.trolly_tot_dict[i]['track']
return tot_kills/tot_ecounter
def get_deon_loss_by_idx(self, idx_arr):
tot_pass_kills = 0
tot_pass_ecounter = 0
for i in idx_arr:
tot_pass_kills += self.trolly_kill_dict[i]['pass']
tot_pass_ecounter += self.trolly_tot_dict[i]['pass']
return tot_pass_kills/tot_pass_ecounter
def get_top_bot_n_trolly_idx(self, n, loss_type):
"""
n: number of returned top idices\n
loss_type: currently either teleology loss or deontology loss\n
reverse: False - top n lowest loss; True - top n highest loss\n
"""
if not isinstance(loss_type, LossType):
raise TypeError('loss type must be an instance of LossType')
assert n <= self.n
loss_arr = []
for i in range(len(self.trollies)):
if loss_type == LossType.TELE:
loss = self.get_tele_loss_by_idx([i])
elif loss_type == LossType.DEON:
loss = self.get_deon_loss_by_idx([i])
loss_arr.append(loss)
sort_idx = np.argsort(loss_arr)
top_n = sort_idx[:n]
bot_n = sort_idx[::-1][:n]
return top_n, bot_n
def run_trial(self):
track_chosen_arr = []
self.total_trials += 1
self.total_pass += sum(self.trolly_pass_nums)
self.total_track += sum(self.track_nums)
for idx, trolly in enumerate(self.trollies):
# update passanger number
trolly.set_pass_num(self.trolly_pass_nums[idx])
(def_idx, alt_idx) = self.trolly_track_lookup(idx)
trolly.set_track_nums(def_num=self.track_nums[def_idx], \
alt_num=self.track_nums[alt_idx])
if self.full_info:
(def_neigh_idx, alt_neigh_idx) = self.trolly_neighbor_lookup(idx)
def_neigh_pass_num = self.trolly_pass_nums[def_neigh_idx]
alt_neigh_pass_num = self.trolly_pass_nums[alt_neigh_idx]
else:
def_neigh_pass_num = None
alt_neigh_pass_num = None
self.trolly_tot_dict[idx]["pass"] += trolly.get_pass_num()
self.trolly_tot_dict[idx]["track"] += sum(trolly.get_track_nums())
decision = trolly.make_decision(def_neigh_pass_num, alt_neigh_pass_num)
track_chosen_arr.append(self.trolly_track_lookup(idx)[decision])
# record kill stats for individual trolly
for i, track_idx in enumerate(track_chosen_arr):
trolly_idx = [i for i, e in enumerate(track_chosen_arr) if e == track_idx]
assert len(trolly_idx) == 1 or len(trolly_idx) == 2
assert i == trolly_idx[0] or i == trolly_idx[1]
if len(trolly_idx) == 2:
self.trolly_kill_dict[i]["pass"] += self.trolly_pass_nums[i]
self.trolly_kill_dict[i]['track'] += self.track_nums[track_idx]
# record total kill stats
for i, track_idx in enumerate(set(track_chosen_arr)):
trolly_idx = [i for i, e in enumerate(track_chosen_arr) if e == track_idx]
assert len(trolly_idx) == 1 or len(trolly_idx) == 2
if len(trolly_idx) == 2:
self.total_pass_kill += ((self.trolly_pass_nums[trolly_idx[0]] \
+ self.trolly_pass_nums[trolly_idx[1]]))
self.total_track_kill += self.track_nums[track_idx]
# print(track_chosen_arr)
# print(self.total_pass_kill, self.total_track_kill)
# print(self.total_trials, self.total_pass, self.total_track)
# print(self.trolly_kill_dict)
# print(self.trolly_tot_dict)
return self.trolly_kill_dict
| true |
a7be6acb923de11d700e5052cc71bc9d5fb8dd47 | Python | SagarPatel-O1/Python-Sem-3-Practicals- | /Python Prac/practical 7/Inherex.py | UTF-8 | 2,752 | 4.25 | 4 | [] | no_license | class Parent():
def first(self):
print('first function')
class Child(Parent):
def second(self):
print('second function')
ob = Child()
ob.first()
ob.second()
#subclass
class Parent:
def __init__(self , fname, fage):
self.firstname = fname
self.age = fage
def view(self):
print(self.firstname , self.age)
class Child(Parent):
def __init__(self , fname , fage):
Parent.__init__(self, fname, fage)
self.lastname = "Xyz"
def view(self):
print("I am", self.firstname, self.lastname,".", "My age is ", self.age)
ob = Child("Abc" , '28')
ob.view()
#Single Inheritance
class Parent:
def func1(self):
print("this is function one")
class Child(Parent):
def func2(self):
print(" this is function 2 ")
ob = Child()
ob.func1()
ob.func2()
#Multiple Inheritance
class Parent:
def func1(self):
print("this is function 1")
class Parent2:
def func2(self):
print("this is function 2")
class Child(Parent , Parent2):
def func3(self):
print("this is function 3")
ob = Child()
ob.func1()
ob.func2()
ob.func3()
#Multilevel Inheritance
class Parent:
def func1(self):
print("this is function 1")
class Child(Parent):
def func2(self):
print("this is function 2")
class Child2(Child):
def func3(self):
print("this is function 3")
ob = Child2()
ob.func1()
ob.func2()
ob.func3()
#Hierarchical Inheritance
class Parent:
def func1(self):
print("this is function 1")
class Child(Parent):
def func2(self):
print("this is function 2")
class Child2(Parent):
def func3(self):
print("this is function 3")
ob = Child()
ob1 = Child2()
ob1.func1()
ob1.func3()
#Hybrid Inheritance
class Parent:
def func1(self):
print("this is function one")
class Child(Parent):
def func2(self):
print("this is function 2")
class Child1(Parent):
def func3(self):
print(" this is function 3")
class Child3(Child1):
def func4(self):
print(" this is function 4")
ob = Child3()
ob.func1()
#Python super() Function
class Parent:
def func1(self):
print("this is function 1")
class Child(Parent):
def func2(self):
super().func1()
print("this is function 2")
ob = Child()
ob.func2()
#Method Overriding
class Parent:
def func1(self):
print("this is parent function")
class Child(Parent):
def func1(self):
print("this is child function")
ob = Child()
ob.func1()
| true |
cfc0438d3560d779e960efdae32d83126b107f67 | Python | hyanwya/scaled | /scaled.py | UTF-8 | 186 | 2.921875 | 3 | [] | no_license | def scaled(r):
sum = 0
for char in r:
sum += ord(char.lower()) - 96
finished = abs(sum) % 10
if finished == 0:
return 10
else:
return finished | true |
abe606867ecfe493e6e9a3fcf536c6f7450a91d6 | Python | netsus/Rosalind | /DAG.py | UTF-8 | 1,042 | 3.53125 | 4 | [] | no_license | # coding: utf-8
"""
난이도 : 6.5
문제 : 처음에 그래프 개수(k)가 주어지고,
한줄 띄고, 정점 개수(v)와 간선 개수(e)가 주어지고, 다음줄에 시작 정점과 도착 정점 (방향 그래프)이 주어진다. -> 이렇게 그래프 개수만큼 주어진다.
주어진 방향 그래프에 대해 사이클이 있으면(cyclic) -1, 사이클이 없으면(acylcic) 1을 출력.
알고리즘 :
"""
import networkx as nx
from IPython.core.display import Image
from networkx.drawing.nx_pydot import to_pydot
def make_graph(g,f,v,e):
for i in range(1,e+1):
start,end = map(int,f.readline().split())
g.add_edge(start,end)
return g
with open('rosalind_dag.txt') as f:
k = int(f.readline())
for i in range(k):
f.readline()
v,e = map(int,f.readline().split())
g = nx.DiGraph()
graph = make_graph(g,f,v,e)
try:
nx.find_cycle(graph,orientation='original')
print(-1,end=' ')
except:
print(1,end=' ')
| true |
e5a5d4c50172452a9cd4315f12552a663febf9a8 | Python | d-sanchez/Game-Engine-Architecture | /as05/selectionMgr.py | UTF-8 | 1,380 | 2.734375 | 3 | [] | no_license | import ogre.io.OIS as OIS
class SelectionMgr:
def __init__(self, engine):
self.engine = engine
def init(self):
self.keyboard = self.engine.inputMgr.keyboard
self.toggle = 0.1
def tick(self, dt):
if self.toggle >=0:
self.toggle -= dt
selectedEntIndex = self.engine.entityMgr.selectedEntIndex
#print "selected: ", str(selectedEntIndex)
if self.toggle < 0 and self.keyboard.isKeyDown(OIS.KC_TAB):
self.toggle = 0.4
#print "tab test"
if self.keyboard.isKeyDown(OIS.KC_LSHIFT):
self.addNextEnt()
else:
self.selectNextEnt()
# ent = self.engine.entityMgr.selectedEnt
# ent.node.showBoundingBox(False)
# ent = self.engine.entityMgr.getNextEnt()
# self.engine.entityMgr.selectedEntities = []
# ent.node.showBoundingBox(True)
def addNextEnt(self):
ent = self.engine.entityMgr.getNextEnt()
ent.node.showBoundingBox(True)
self.engine.entityMgr.selectedEntities.append(ent)
def selectNextEnt(self):
for ent in self.engine.entityMgr.selectedEntities:
ent.node.showBoundingBox(False)
self.engine.entityMgr.selectedEntities = []
self.addNextEnt()
def stop(self):
pass
| true |
fefda8c7ede3336c44efee5612a7ee0cf75f1b00 | Python | ArmanHome24/ArmanRepo | /Udemy_Advance_Python/App-4-Photo-Searcher/main.py | UTF-8 | 1,095 | 2.859375 | 3 | [] | no_license | import requests
import wikipedia
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import Screen, ScreenManager
Builder.load_file('frontend.kv')
class Wiki:
def find_image_url(self, query):
page = wikipedia.page(query)
return page.images[0]
class Download:
def download_image(self, url) -> bytes:
return requests.get(url).content
class FirstScreen(Screen):
def get_image_url(self):
query = self.manager.current_screen.ids.txt.text
image_url = Wiki().find_image_url(query)
print(image_url)
return image_url
def get_image(self):
image_path = 'resources/image2.jpg'
image = Download().download_image(self.get_image_url())
print(image)
with open(image_path, 'wb') as file:
file.write(image)
return image
def set_image(self):
self.manager.current_screen.ids.img.source = self.get_image()
class RootWidget(ScreenManager):
pass
class MainApp(App):
def build(self):
return RootWidget()
MainApp().run()
| true |
cc10ec942bcbeea28e82a5a11a0116faae77af8e | Python | AlJamilSuvo/LeetCode | /code23.py | UTF-8 | 1,644 | 3.4375 | 3 | [] | no_license | from queue import PriorityQueue
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
st=str(self.val)+'->'
if self.next !=None:
st+=str(self.next)
return st
class Solution(object):
def mergeKLists(self, lists):
if len(lists)==0:
return None
if len(lists)==1:
return lists[0]
elif len(lists)==2:
return self.mergeTwoLists(lists[0],lists[1])
else:
mid=int(len(lists)/2)
res1=self.mergeKLists(lists[:mid])
res2=self.mergeKLists(lists[mid:])
return self.mergeTwoLists(res1,res2)
def mergeTwoLists(self, l1, l2):
dummyList=ListNode(-1000)
cur=dummyList
while l1!=None or l2!=None:
if l1==None:
cur.next=l2
cur=l2
l2=l2.next
elif l2==None:
cur.next=l1
cur=l1
l1=l1.next
elif l1.val<=l2.val:
cur.next=l1
cur=l1
l1=l1.next
else:
cur.next=l2
cur=l2
l2=l2.next
return dummyList.next
def makeList(nums):
head=ListNode(nums[0])
cur=head
for i in range(1,len(nums)):
cur.next=ListNode(nums[i])
cur=cur.next
return head
lists=[]
lists.append(makeList([1,4,5]))
lists.append(makeList([1,3,4]))
lists.append(makeList([2,6]))
s=Solution()
print(s.mergeKLists(lists))
| true |
09872d7051d70d2b46824db62f0a53cce16e2b8e | Python | huangshunliang/DLCV2018spring | /final/relation_network/utils/utils.py | UTF-8 | 803 | 2.859375 | 3 | [] | no_license | from scipy.misc import imread, imsave
import os
def listdir(directory, key=None):
if key:
return sorted(os.listdir(directory), key=key)
else:
return sorted(os.listdir(directory), key=lambda x: int(os.path.splitext(x)[0]))
def mkdir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
# image process (read, save, plot)
def read_image(image_dir, type='png'):
image_files = [os.path.join(image_dir, image) for image in listdir(image_dir) if image.endswith(type)]
image_files = [imread(image) for image in image_files]
return image_files
def save_image(image, filename):
imsave(filename, image)
if __name__ == '__main__':
mkdir('output')
image = read_image('../datasets/test')
#save_image(image, 'output/test.png')
| true |
c3f221259f8a43ff05da323a0e9616ad1e4901d3 | Python | mykespb/edu | /homeinf/closest.py | UTF-8 | 2,199 | 3.875 | 4 | [] | no_license | #!/usr/bin/env python
# Mikhail Kolodin
# 2022-04-22 2022-04-22 v.1.1
# ~ Дан список случайных натуральных чисел.
# ~ Найти пару ближайших чисел.
# ~ (Если их несколько, показать все).
from random import randint as ri
# параметры
MAX = 100 # макс. нат. число
LEN = 10 # длина списка
# формируем случайный список
lon = [ri(1, MAX) for _ in range(LEN)]
print("исходный список:\n", lon)
# находим ближайшие
# упорядочиваем список
lon.sort()
print("упорядоченный список:\n", lon)
# строим список расстояний
lod = [(lon[i] - lon[i-1], lon[i-1], lon[i]) for i in range(1, len(lon))]
print("список расстояний:\n", lod)
# сортируем список расстояний
lod.sort()
print("упорядоченный список расстояний:\n", lod)
# находим мин. расстояние
mindist = lod[0][0]
print("\nрезультат:\nмин. расстояние равно", mindist)
# распечатываем все пары с минимальным расстоянием
for element in lod:
if element[0] == mindist:
print("пара: от", element[1], "до", element[2])
else:
break
# прощаемся
print("\nконец. спасибо всем...")
# ~ исходный список:
# ~ [11, 3, 80, 27, 48, 93, 12, 70, 69, 10]
# ~ упорядоченный список:
# ~ [3, 10, 11, 12, 27, 48, 69, 70, 80, 93]
# ~ список расстояний:
# ~ [(7, 3, 10), (1, 10, 11), (1, 11, 12), (15, 12, 27), (21, 27, 48), (21, 48, 69), (1, 69, 70), (10, 70, 80), (13, 80, 93)]
# ~ упорядоченный список расстояний:
# ~ [(1, 10, 11), (1, 11, 12), (1, 69, 70), (7, 3, 10), (10, 70, 80), (13, 80, 93), (15, 12, 27), (21, 27, 48), (21, 48, 69)]
# ~ результат:
# ~ мин. расстояние равно 1
# ~ пара: от 10 до 11
# ~ пара: от 11 до 12
# ~ пара: от 69 до 70
# ~ конец. спасибо всем...
| true |
1648df4b8b344126c6a08980c63eb57a12342275 | Python | pboechler/TwitchMIDI | /TwitchMIDI.py | UTF-8 | 1,564 | 3.078125 | 3 | [] | no_license | """
TwitchMIDI
The following script requires you to have a Twitch account.
A channel which you're gather chat stream data from does not need to be joined.
v1.0 - 05/19/20
"""
server = 'irc.chat.twitch.tv'
port = 6667
nickname = #Enter your Twitch username
token = #Enter your Twitch authorization token "oauth:XXXXXX..."
channel = #Enter the channel name you wish to gather chat data from (always starts with with #)
import socket
import logging
import time
import mido
sock = socket.socket()
sock.connect((server, port))
sock.send(f"PASS {token}\n".encode('utf-8'))
sock.send(f"NICK {nickname}\n".encode('utf-8'))
sock.send(f"JOIN {channel}\n".encode('utf-8'))
resp = sock.recv(2048).decode('utf-8')
resp
#Use MIDO to determine your MIDI outputs and then add it to the argument
port = mido.open_output('')
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s — %(message)s',
datefmt='%Y-%m-%d_%H:%M:%S',
handlers=[logging.FileHandler('chat.log', encoding='utf-8')])
logging.info(resp)
from emoji import demojize
#This will continue to run until the program is stopped.
#For testing, consider making this a finite loop that will only repeat a set number of times.
while True:
resp = sock.recv(2048).decode('utf-8')
if resp.startswith('PING'):
sock.send("PONG\n".encode('utf-8'))
elif len(resp) > 0:
logging.info(demojize(resp))
# Here is where the MIDI note will be output.
msg = mido.Message('note_on', note=60, time=2)
port.send(msg)
| true |
de3d62c3827235fd0d793848f3a43978dd534279 | Python | wwlee94/idus-product-list-crawling | /crawling_Idus.py | UTF-8 | 3,460 | 2.71875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import requests
import bs4
from bs4 import BeautifulSoup
import io
import csv
import re
index = 0
wf = io.open('idus_item_list.csv', 'wb')
writer = csv.writer(wf)
writer.writerow([index, 'thumbnail_520', 'thumbnail_720', 'thumbnail_list_320', 'title', 'seller', 'cost', 'discount_cost', 'discount_rate', 'description'])
rf = io.open('idus_item_url.csv','rb')
reader = csv.reader(rf)
for URL_BASE in reader:
req = requests.get(URL_BASE[0])
html = req.text
soup = BeautifulSoup(html, 'html.parser')
p = re.compile('url\((.*)\)')
# 이미지 리스트 - 320사이즈
product_image = soup.select(
'#content > div.inner-w.layout-split > section.prd-imgs > div > fieldset > ul'
)[0].children
image_thumbnail_list_320 = []
for child in product_image:
if type(child) is not bs4.element.NavigableString:
image_style = child
url = p.findall(image_style.get('style'))[0]
image_thumbnail_list_320.append(url.encode('utf-8'))
image_thumbnail_520 = image_thumbnail_list_320[0].split('_')[0] + '_520.jpg'
image_thumbnail_720 = image_thumbnail_list_320[0].split('_')[0] + '_720.jpg'
# 문자열로 바꾼 이미지 리스트
image_list = '#'.join(image_thumbnail_list_320)
# 가격 (원가, 할인가, 할인률)
# cost_size = 3 -> cross: 원가, strong: 할인가, point: 할인율
# cost_size = 1 -> strong : 원가 나머지 None
product_cost = soup.select(
'#content > div.inner-w.layout-split > section.ui-product-detail > div.prd-cost > span.txt-cross'
)
if product_cost:
product_cost = product_cost[0].text.encode('utf-8')
else: product_cost = None
product_discount_cost = soup.select(
'#content > div.inner-w.layout-split > section.ui-product-detail > div.prd-cost > span.txt-strong'
)[0].text.encode('utf-8')
product_discount_rate = soup.select(
'#content > div.inner-w.layout-split > section.ui-product-detail > div.prd-cost > span.txt-point'
)
if product_discount_rate:
product_discount_rate = product_discount_rate[0].text.encode('utf-8')
else: product_discount_rate = None
if product_discount_rate == None and product_cost == None:
product_cost = product_discount_cost
product_discount_cost = None
# 상품명
product_title = soup.select(
'#content > div.inner-w.layout-split > section.ui-product-detail > h1'
)[0].text.encode('utf-8')
# 셀러
product_seller = soup.select(
'#content > div.inner-w.layout-split > section.ui-product-detail > div.circ-card > a.circ-label.fl > span'
)[0].text.encode('utf-8')
# 설명
product_description = soup.select(
'#prd-info > p'
)[0].text.encode('utf-8')
index += 1
writer.writerow([
index,
image_thumbnail_520,
image_thumbnail_720,
image_list,
product_title,
product_seller,
product_cost,
product_discount_cost,
product_discount_rate,
product_description
])
print(str(product_title) + ' 완료 !!')
# print(image_thumbnail_520)
# print(image_thumbnail_720)
# print(image_list)
# print(product_title)
# print(product_seller)
# print(product_cost)
# print(product_discount_cost)
# print(product_discount_rate)
# print(product_description)
rf.close()
wf.close()
| true |
bea59256f48993381788090d7ee7ef8969731dd6 | Python | JacProsser/college | /Assignment 1 - Procedural Programming/Python Challenges (1-30)/Challenge 8.py | UTF-8 | 1,101 | 4.09375 | 4 | [] | no_license | #importing packages
import colorama
from colorama import Fore
import os
#clears the console screen
os.system("cls")
def process():
#clears the console screen
os.system("cls")
#printing "welcome message" saying what the program is and who it is made by
print("ASCII values in", Fore.YELLOW+"python.", Fore.RESET+"| Made by", Fore.CYAN+"Jac Prosser\n", Fore.RESET)
#asking the user their name
name = input("What is your name? ")
#defining "your_name" as the users input
your_name = name
#printing the ASCII values of the name (in a list format)
print("ASCII Values: ", [ord(x) for x in your_name])
#calling function so it actually runs
process()
#while true loop to ask if the user wants to enter a different name. if yes then it will restart the program. if no it will exit the console
while True:
again = str(input(Fore.RESET+"\nWould you like to type a different name? ["+Fore.GREEN+"Y"+Fore.RESET+"/"+Fore.RED+"N"+ Fore.RESET+"]: ")).lower()
if again == "y":
print(Fore.RESET)
process()
elif again == "n":
exit()
| true |
422a571fd7e048009cb2d6f6d5edc487ad534185 | Python | Aasthaengg/IBMdataset | /Python_codes/p02580/s342547913.py | UTF-8 | 1,213 | 2.96875 | 3 | [] | no_license | def main():
H, W, M = map(int, input().split())
# H, W, M = (3 * 10 ** 5, 3 * 10 ** 5, 3 * 10 ** 5)
row = [0] * (H + 1)
row_set = [set() for _ in range(H+1)]
column = [0] * (W + 1)
column_set = [set() for _ in range(W+1)]
# import random
ms = []
positions = set()
for m in range(M):
h, w = map(int, input().split())
positions.add((h, w))
# h, w = (random.randrange(3*10**5), random.randrange(3*10**5))
row[h] += 1
row_set[h].add(w)
column[w] += 1
column_set[w].add(h)
max_rows = set()
maxR = -1
for i, v in enumerate(row[1:]):
if v > maxR:
max_rows = set()
max_rows.add(i+1)
maxR = v
elif v == maxR:
max_rows.add(i+1)
max_cols = set()
maxC = -1
for i, v in enumerate(column[1:]):
if v > maxC:
max_cols = set()
max_cols.add(i+1)
maxC = v
elif v == maxC:
max_cols.add(i + 1)
for y in max_rows:
for x in max_cols:
if not (y, x) in positions:
print(maxR + maxC)
exit()
print(maxR + maxC - 1)
main()
| true |
d31903bdac4b1bdd1f8631f8edee12e488de899a | Python | movingpictures83/Statistics | /StatisticsPlugin.py | UTF-8 | 1,173 | 2.828125 | 3 | [
"MIT"
] | permissive | import sys
import numpy
#import PyPluMA
class StatisticsPlugin:
def input(self, filename):
self.myfile = filename
def run(self):
filestuff = open(self.myfile, 'r')
firstline = filestuff.readline()
self.bacteria = firstline.split(',')
if (self.bacteria.count('\"\"') != 0):
self.bacteria.remove('\"\"')
self.n = len(self.bacteria)
self.ADJ = []
self.m = 0
for line in filestuff:
contents = line.split(',')
self.ADJ.append([])
for j in range(self.n):
value = float(contents[j+1])
self.ADJ[self.m].append(value)
self.m += 1
def output(self, filename):
means = []
for j in range(self.n):
vec = []
for i in range(self.m):
vec.append(round(self.ADJ[i][j]*100, 2))
means.append((numpy.mean(vec), numpy.std(vec), self.bacteria[j], vec))
means.sort()
means.reverse()
print("OTU\tMean\tStd Dev\tAbundances")
for element in means:
if (element[0] >= 0.5):
print(element[2], "\t", round(element[0], 2), "%\t", round(element[1], 2), "%\t", element[3])
| true |
8ba00400bafcc24e2ec80b756981c83962d7b405 | Python | arata15/create_sql | /create_sql.py | UTF-8 | 1,960 | 3 | 3 | [] | no_license | import pandas as pd
import openpyxl as px
import re
import json
import requests as rq
#行の値,ファイルの番号
count = file_count = 1
#ループの条件
end_flg = False
#EXCELファイル内のワークブック読み込み
work_book = px.load_workbook("任意のディレクトリ/Excelファイル名")
#シートの情報読み込み
sheet = work_book.active
sheet = work_book.get_sheet_by_name("Excelシート名")
#SQLに設定する値
text = file_name = ""
upd_tbl_nm = sheet.cell(row=2, column=4)
upd_column = sheet.cell(row=2, column=5)
where_column = sheet.cell(row=2, column=6)
#SQL文の記載されたファイル作成
def create_file(update_text,where_text,text,file_name,file_count):
file_name = "任意のディレクトリ/SQL" + str(file_count) + ".txt"
text = update_text + "\n" + "END" + "\n" + where_text[:-1] + ")"
file = open(file_name, "w")
file.write(text)
file.close()
#SQL文作成
try:
while end_flg == False:
count += 1
columnA = sheet.cell(row=count, column=1)
columnB = sheet.cell(row=count, column=2)
if count % 1000 == 2 or count == 2:
update_text = "UPDATE " + str(upd_tbl_nm.value) + " SET " + str(upd_column.value) + " = CASE " + str(where_column.value)
where_text = "WHERE " + str(where_column.value) + " IN("
if str(columnA.value) == str(None):
create_file(update_text,where_text,text,file_name,file_count)
end_flg = True
else:
update_text = update_text + "\nWHEN" + "'" + str(columnA.value) + "'" + "THEN" + "'" + str(columnB.value) + "'"
where_text = where_text + "\n'" + str(columnA.value) + "',"
if count % 1000 == 1:
create_file(update_text,where_text,text,file_name,file_count)
file_count += 1
except:
import traceback
traceback.print_exc()
| true |
85cf526875c01371753f0e9f9e466bfe6011d362 | Python | huangliu0909/Pinyin-Chinese-character-conversion | /main_trie.py | UTF-8 | 749 | 2.640625 | 3 | [] | no_license | from PinyinDict import pinyinDict
from PinyinDict import is_Chinese
from learn import learn
from pypinyin import lazy_pinyin
from Ngram import get2grams
import numpy as np
# 拼音切割的实例
pinyinString = "maixiangchongmanxiwangdexinshiji"
filename = 'data_trie'
f = open(filename, 'r', encoding='UTF-8').read()
res = ""
str_pin = []
str_word = []
for i in f:
if is_Chinese(i):
res += i
str_word += i
str_pin += lazy_pinyin(i)
input_learn = []
for c in res:
input_learn.append(c)
# 字和字之间的2-gram
ngram = get2grams(str_word)
# 拼音和字的配对,字典树
pd, tree = pinyinDict(input_learn)
obs = tree.split_spell(pinyinString).split(" ")
print(pinyinString)
print(obs)
| true |
bb8b3c66ba72be1dbb3e06e4b6c812376fb4c953 | Python | PinkShnack/ETF_Portfolio_Manager | /portfolio/etf.py | UTF-8 | 5,570 | 3.265625 | 3 | [] | no_license |
import matplotlib.pyplot as plt
import portfolio.io as port_io
import portfolio.setup_data as setup_data
class ETF:
def __init__(self, ticker, country, dummy_data=False):
'''
ETF class allows users to interact with single ETFs, and can be
imagined as a subset of the Portfolio class.
Use the Portfolio class to view data from several ETFs.
Parameters
----------
Attributes
----------
Examples
--------
>>> from portfolio.etf import ETF
>>> ticker = 'CSPX'
>>> SP500 = ETF(ticker=ticker, country="UK")
>>> SP500
<ETF, CSPX, iShares Core S&P 500 UCITS ETF USD (Acc)>
The df attribute is just a Pandas dataframe, allowing you to use Pandas
for any data analysis you wish.
>>> df = SP500.df
Check out the first 3 lines in the Pandas dataframe
>>> SP500.df.head(3)
>>> ax = SP500.plot_summarised_etf(groupby="Sector")
Look at the weighting of a single company
>>> SP500.get_company_info(company_name="Apple")
APPLE INC has a weighting of 6.4 % in this ETF.
('APPLE INC', 6.4)
Example of dummy data
>>> etf1 = ETF(ticker='EUNK', country="GER", dummy_data=True)
>>> print(etf1.df.head())
>>> ax = etf1.plot_summarised_etf(groupby="Sector")
'''
self.ticker = ticker
self.country = country.upper()
self.default_sort_values_by = "Weight (%)"
self._etf_ticker_name_init()
self.df = None # will be replaced with df when loaded
self.dummy_data = dummy_data
self._load_tickers_init()
def _load_tickers_init(self):
""" Load the chosen ticker."""
if not isinstance(self.ticker, str):
raise ValueError("ticker must be a string")
df_list = port_io.load_tickers(ticker_list=[self.ticker],
country=self.country,
dummy_data=self.dummy_data)
if len(df_list) == 1:
df = df_list[0]
else:
raise ValueError("Problem with loading ETF, should be 1 ticker. "
f"Got {len(df_list)} instead.")
self.df = df
self._handle_other_languages()
def _handle_other_languages(self):
"""Create English versions of each important column"""
if self.country == "GER":
self.df["Ticker"] = self.df["Emittententicker"]
self.df["Weight (%)"] = self.df["Gewichtung (%)"]
self.df["Sector"] = self.df["Sektor"]
print(self.df.head())
def __repr__(self):
return '<%s, %s, %s>' % (
self.__class__.__name__,
self.ticker,
self.etf_name
)
def _etf_ticker_name_init(self):
info = setup_data.get_info_using_ticker_list(
[self.ticker], 'name', self.country)
self.etf_name = info[0]
def summarise(self, groupby, sort_values_by="auto"):
if sort_values_by == "auto":
sort_values_by = self.default_sort_values_by
df_grouped = self.df.groupby([groupby.capitalize()],
as_index=False).sum().sort_values(
by=sort_values_by, ascending=False, ignore_index=True)
return df_grouped
def plot_summarised_etf(
self, groupby, sort_values_by="auto", kind="barh",
legend=False, save=False, **kwargs):
if sort_values_by == "auto":
sort_values_by = self.default_sort_values_by
df_grouped = self.summarise(groupby=groupby)
ax = df_grouped.plot(x=groupby, y=sort_values_by,
kind=kind, legend=legend, **kwargs)
plt.title(self.etf_name)
plt.tight_layout()
if save:
plt.savefig(f"{self.ticker}_{groupby}.png")
return ax
def get_company_info(self, company_name, sort_values_by="auto"):
if sort_values_by == "auto":
sort_values_by = self.default_sort_values_by
full_name, company_weight = self._company_weighting(
company_name, sort_values_by)
print(f"{full_name} has a weighting of {company_weight} % "
"in this ETF.")
return(full_name, company_weight)
def _company_weighting(self, company_name, sort_values_by="auto"):
if sort_values_by == "auto":
sort_values_by = self.default_sort_values_by
df_copy = self.df.copy()
# Make the company name the index so we can filter by it
df_copy.drop(df_copy.tail(1).index, inplace=True)
df_copy.set_index("Name", inplace=True)
# find the company name in the new name index
full_name = []
for index_name in df_copy.index:
if company_name.upper() in index_name.upper():
full_name.append(index_name)
# check if there is only one matching name
if len(full_name) == 1:
full_name = full_name[0]
company_weight = df_copy.at[full_name, sort_values_by]
return(full_name, company_weight)
elif len(full_name) > 1:
raise ValueError("More than one company found, maybe the name you "
"gave was ambiguous.")
else:
return('', None)
| true |
44680cd5366e0c3c7ea31be050d4cf656d29a0ee | Python | duubyPlz/prOve_it | /.name/scrape.py | UTF-8 | 350 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
import urllib.request
import sys
if (len(sys.argv) != 2):
str = ''
for arg in sys.argv:
str += arg + ' '
raise ValueError('Usage: ./scrape.py <url>, current command: ' + str);
url = sys.argv[1]
# print(" > url: " + url + "\n\n\n\n")
page = urllib.request.urlopen(url)
herp = page.read()
print(herp) | true |
90f8489c3a060ce558ecc11f9c55890913ac1bf0 | Python | Wu-zpeng/PythonLearn | /day11/动态传参.py | UTF-8 | 136 | 2.859375 | 3 | [] | no_license | # def func(**kwargs):
# print(kwargs)
#
# func(a=1, b=2, c=3)
def fun(**kwargs):
print(kwargs)
dic = {'a':1,'b':2}
fun(**dic)
| true |
440a375a0729c705f0afb7dd17f2da7d802e6046 | Python | KeYunYun/studens_python | /基础编程/TCP服务器.py | UTF-8 | 563 | 2.765625 | 3 | [] | no_license | from socket import *
HOST=''
PROT=8000
BUFSIZE=1024
ADDR=(HOST,PROT)
tcpSocketSer=socket(AF_INET,SOCK_STREAM)
tcpSocketSer.bind(ADDR)
tcpSocketSer.listen(5)
while True:
clientSocket,clientInfo=tcpSocketSer.accept()
print('链接成功,客户端的ip为和端口为%s'%str(clientInfo))
while True:
recvDate=clientSocket.recv(1024)
if not recvDate:
break
print('接收到的数据为%s'%recvDate)
data=input('>>>>>')
clientSocket.send(data.encode())
clientSocket.close()
tcpSocketSer.close()
| true |
df5fac789d8944d5eb0ff923fa17a84df604162b | Python | josephduarte104/data-science-exercises | /py4e/assignment7_1.py | UTF-8 | 166 | 3.53125 | 4 | [] | no_license | # Use words.txt as the file name
fname = input("Enter file name: ")
fh = open(fname)
for line in fh:
fz = line.strip()
fy = fz.upper()
print(fy)
| true |
9c9d94b247364b83aed15e2bc863a5a1d64cdcef | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/kindergarten-garden/0b1a939ff6974472b62b59d972032092.py | UTF-8 | 1,039 | 3.296875 | 3 | [] | no_license | import re
class Garden:
Students = []
Line1 = ""
Line2 = ""
def __init__(self,GardenRaw, students = ''):
Garden = re.sub(r'\W+', '',GardenRaw)
self.Line1, self.Line2 = Garden[:len(Garden)/2], Garden[len(Garden)/2:]
if students != '':
for student in students:
self.Students.append(student.lower()[0])
self.Students.sort()
else:
self.Students = list(map(chr, range(ord('a'), ord('l')+1)))
def plants(self,name):
num = self.getstudentnum(name)
return self.getplantsonline(self.Line1, num) + self.getplantsonline(self.Line2, num)
def getstudentnum(self, name):
count = 1
for student in self.Students:
if student == name.lower()[0]:
return count
count += 1
def getplantsonline(self, line, num):
output = []
count = 1
for letter in line:
if count == (num*2) or count == (num*2-1):
output.append(self.plantname(letter))
count += 1
return output
def plantname(self,letter):
return {
'G' : 'Grass',
'C' : 'Clover',
'R' : 'Radishes',
'V' : 'Violets',
}[letter]
| true |
f34a9ca7eeed630ecfadbc575573153d0dc29e9a | Python | johnstsai/Codecademy_practice | /Python/count.py | UTF-8 | 807 | 4.96875 | 5 | [] | no_license | #count
#Great work so far. Let's finish up by practicing with a few functions that take lists as arguments.
#1.Define a function called count that has two arguments called sequence and item.
#Return the number of times the item occurs in the list.
#For example: count([1, 2, 1, 1], 1) should return 3 (because 1 appears 3 times in the list).
#There is a list method in Python that you can use for this, but you should do it the long way for practice.
#Your function should return an integer.
#The item you input may be an integer, string, float, or even another list!
#Be careful not to use list as a variable name in your code—it's a reserved word in Python!
def count(sequence, item):
count = 0
for i in sequence:
if i == item:
count += 1
return count
print count([1,3,2,4,5,1,1], 1) | true |
98611d62f43329744f097852a60e7e55347b1231 | Python | puneethprog9/Python | /multithreading.py | UTF-8 | 454 | 3.609375 | 4 | [] | no_license | #!/usr/bin/python
import time
import threading
def fn_sqrt(numbers):
for n in numbers:
time.sleep(0.2)
print("square",n*n)
def fn_cube(numbers):
for n in numbers:
time.sleep(0.2)
print("cube",n*n*n)
arr=[2,3,4,5]
t=time.time()
t1=threading.Thread(target=fn_sqrt,args=(arr,))
t2=threading.Thread(target=fn_cube,args=(arr,))
t1.start()
t2.start()
t1.join()
t2.join()
print("done in :",time.time()-t)
| true |
da879370f1cd0a1e8eb26b3500a6f1785947f78a | Python | jzhuo/blackjack-gambling-model | /blackjack.py | UTF-8 | 15,628 | 4.0625 | 4 | [] | no_license | from deck import Deck
class Blackjack():
"""
Represents the game and state of Blackjack for two players.
"""
def __init__(self):
self.deck = Deck(5)
self.playerHand = list() # player
self.dealerHand = list() # dealer
self.confidence = .5 # player hits if probability of bust is below confidence
self.playerWins = 0 # number of times the player wins
self.dealerWins = 0 # number of times the dealer wins
self._reset_game()
# resets the game for another run of simulation
def _reset_game(self):
self.deck.reset_deck()
self._reset_players()
# resets the hand of the players without
def _reset_players(self):
self.playerHand = list() # player
self.dealerHand = list() # dealer
self._initialize_player_1()
self._initialize_player_2()
# sets the confidence for the player in the model
def set_confidence(self, newConfidence):
self.confidence = newConfidence
# deals the first two cards to the player at the start of the game
def _initialize_player_1(self):
for card in range(2):
self._deal_player_1()
# deals the first two cards to the dealer at the start of the game
def _initialize_player_2(self):
for card in range(2):
self._deal_player_2()
# deals a card to the first player and adds the card to their hand
def _deal_player_1(self):
# retrieve the card to deal from deck
card = self.deck.deal_card()
# deals the card to the player hand
self.playerHand.append(card)
# deals a card to the second player and adds the card to their hand
def _deal_player_2(self):
# retrieve the card to deal from deck
card = self.deck.deal_card()
# deals the card to the player hand
self.dealerHand.append(card)
# declares the winner of the game at the end of simulation with pretty prints
def _declare_winner(self, winner):
if winner == "None":
announcementString = "The game ended in a tie. NO WINNER!"
else:
announcementString = "The winner of the game is: " + winner
# print for game visuals
print("\n******************************")
print(announcementString)
print("******************************\n")
# compares the hand of the two players and returns the winner
def _compare_hand_values(self, playerHand, dealerHand):
playerSum = self._calculate_hand_sum(playerHand)
dealerSum = self._calculate_hand_sum(dealerHand)
if playerSum > dealerSum:
return "Player"
if playerSum < dealerSum:
return "Dealer"
if playerSum == dealerSum:
return "None"
# checks to see if the player's hand is a Blackjack
def _check_blackjack(self, playerHand):
handValue = self._calculate_hand_sum(playerHand)
if handValue == 21:
return True
return False
# checks to see if the player's hand is a BUST
def _check_bust(self, playerHand):
handValue = self._calculate_hand_sum(playerHand)
if handValue > 21:
return True
return False
# sums the value of a player's hand
def _calculate_hand_sum(self, hand):
handValue = 0
aceCount = 0
# values of J, Q, and K is 10
for card in hand:
if card == 'A':
aceCount += 1
elif card == 'J' or card == 'Q' or card == 'K':
handValue += 10
else:
handValue += int(card)
# calculating the values for the aces
handValue += self._determine_ace_value(handValue, aceCount)
return handValue
# determines the value of the aces if there are more than 1 in the hand
def _determine_ace_value(self, handValue, aceCount):
noAceValue = 0
lowAceValue = 1
highAceValue = 11
if aceCount == 0:
return noAceValue
elif aceCount == 1:
if handValue + highAceValue > 21:
return lowAceValue
else:
return highAceValue
elif aceCount > 1:
lowValueCount = aceCount - 1
highValueCount = 1
if handValue + highAceValue + lowAceValue*lowValueCount > 21:
return highAceValue + lowAceValue*lowValueCount
else:
return lowAceValue * aceCount
return aceCount
# computes the probability that the next hit is a BUST
def _compute_bust_probability(self, playerHand):
handValue = self._calculate_hand_sum(playerHand)
# number of cards that would make the next hit a BUST
bustCount = 0
for card in self.deck.cards:
if card == "A":
cardValue = 1
elif card == "J" or card == "Q" or card == "K":
cardValue = 10
else:
cardValue = int(card)
if handValue + cardValue > 21:
bustCount += 1
# probability of bust is number of bust cards over total number of cards left
bustProbability = bustCount / len(self.deck.cards)
return bustProbability
# plays out the game by simulating both players with game visuals
def simulate_game_with_visuals(self):
dealerWin = "Dealer"
playerWin = "Player"
# print for game visuals
print("\n********** Begin simulation for one game of Blackjack! **********\n")
# first simulate player's decisions with the probabilistic model
self._simulate_player_1_with_visuals()
# print for game visuals
print("~~~~~")
# if player busts, then dealer wins automatically
if self._check_bust(self.playerHand):
# print for game visuals
print("\nPlayer is BUSTED!")
self._declare_winner(dealerWin)
return dealerWin
# if player gets a Blackjack, then player wins right away
if self._check_blackjack(self.playerHand):
# print for game visuals
print("\nPlayer has Blackjack!")
self._declare_winner(playerWin)
return playerWin
# second simulate the dealer's decisions if player isn't a bust
self._simulate_player_2_with_visuals()
# if dealer busts, then player wins automatically
if self._check_bust(self.dealerHand):
# print for game visuals
print("\nDealer is BUSTED!")
self._declare_winner(playerWin)
return playerWin
# if dealer gets a Blackjack, then dealer wins right away
if self._check_blackjack(self.playerHand):
# print for game visuals
print("\nDealer has Blackjack!")
self._declare_winner(dealerWin)
return dealerWin
# compare hands to declare winner
winningPlayer = self._compare_hand_values(self.playerHand, self.dealerHand)
self._declare_winner(winningPlayer)
return winningPlayer
# simulates the player and their actions with game visuals
def _simulate_player_1_with_visuals(self):
# the total value of player's cards
playerSum = self._calculate_hand_sum(self.playerHand)
# print for game visuals
print("Player Sum:", playerSum, "Player Hand:", self.playerHand)
# current bust probability
bustProb = self._compute_bust_probability(self.playerHand)
# continue to hit if bust proability is below 50%
while bustProb < self.confidence:
self._deal_player_1()
# update the new sum
playerSum = self._calculate_hand_sum(self.playerHand)
# print for game visuals
print("Player Sum:", playerSum, "Player Hand:", self.playerHand)
# update bust probability
bustProb = self._compute_bust_probability(self.playerHand)
# simulates the dealer and their actions with game visuals
def _simulate_player_2_with_visuals(self):
# the model assumes this player is the dealer, with actions to Hit until card value is >= 17
dealerSum = self._calculate_hand_sum(self.dealerHand)
# print for game visuals
print("Dealer Sum:", dealerSum, "Dealer Hand:", self.dealerHand)
while dealerSum < 17:
self._deal_player_2()
# update the new sum
dealerSum = self._calculate_hand_sum(self.dealerHand)
# print visuals to keep track of dealer hand
print("Dealer Sum:", dealerSum, "Dealer Hand:", self.dealerHand)
# plays out the game by simulating both players without game visuals
def simulate_game_no_visuals(self):
dealerWin = "Dealer"
playerWin = "Player"
# first simulate player's decisions with the Markov Chain model
self._simulate_player_1_no_visuals()
# if player busts, then dealer wins automatically
if self._check_bust(self.playerHand):
return dealerWin
# if player gets a Blackjack, then player wins right away
if self._check_blackjack(self.playerHand):
return playerWin
# second simulate the dealer's decisions if player isn't a bust
self._simulate_player_2_no_visuals()
# if dealer busts, then player wins automatically
if self._check_bust(self.dealerHand):
return playerWin
# if dealer gets a Blackjack, then dealer wins right away
if self._check_blackjack(self.playerHand):
return dealerWin
# compare hands to declare winner
winningPlayer = self._compare_hand_values(self.playerHand, self.dealerHand)
return winningPlayer
# simulates the player and their actions without game visuals
def _simulate_player_1_no_visuals(self):
# current bust probability
bustProb = self._compute_bust_probability(self.playerHand)
# continue to hit if bust proability is below 50%
while bustProb < self.confidence:
self._deal_player_1()
# update bust probability
bustProb = self._compute_bust_probability(self.playerHand)
# simulates the dealer and their actions without game visuals
def _simulate_player_2_no_visuals(self):
# the model assumes this player is the dealer, with actions to Hit until card value is >= 17
dealerSum = self._calculate_hand_sum(self.dealerHand)
while dealerSum < 17:
self._deal_player_2()
# update the new sum
dealerSum = self._calculate_hand_sum(self.dealerHand)
# simulates a given numer of games and records statistics of the player's wins
def simulate_multiple_games(self, numGames):
print("\n********** Begin simulation of", numGames, "games! **********")
playerWins = 0
dealerWins = 0
tieCount = 0
# simulate the game for the specified number of times
for episode in range(numGames):
# every episode is a simulation of a single game
winningPlayer = self.simulate_game_no_visuals()
# reset reset the game for simulation of game with replacement cards
self._reset_game()
if winningPlayer == "Player":
playerWins += 1
elif winningPlayer == "Dealer":
dealerWins += 1
else:
tieCount += 1
playerWinrate = playerWins / numGames
dealerWinrate = dealerWins / numGames
tieRate = tieCount / numGames
print("\nPlayer winrate:", str(round(playerWinrate * 100, 1)) + "%")
print("Dealer winrate:", str(round(dealerWinrate * 100, 1)) + "%")
print("Tie rate:", str(round(tieRate * 100, 1)) + "%\n")
def calculate_valid_player_hands(self, combinationNum):
""" Computes all possible hands for the player. """
# ranks of cards in a deck of cards
cardRanks = ["A"] + [str(cardRank) for cardRank in range(2, 11)] + ["J", "Q", "K"]
# valid list of hands
validHands = []
# list of hands
currList = [[cardRank] for cardRank in cardRanks]
iterList = copy.deepcopy(currList)
'''
The largest hand is len([A, A, A, A, 2, 2, 2, 2, 3, 3, 3]) = 11.
However, that is not computationally friendly, so I am computing
the valid starting hands and not all valid hands.
'''
for iteration in range(combinationNum-1):
tempList = []
# for every hand in the growing iterList
for iterHandIndex in range(len(iterList)):
# for every rank of the cards
for rankIndex in range(len(cardRanks)):
currentIterHand = iterList[iterHandIndex]
currentCardRank = cardRanks[rankIndex]
tempList.append( currentIterHand + [currentCardRank] )
iterList = tempList
currList += iterList
# pruning hands that aren't valid
for hand in currList:
# largest hand is len([A, A, A, A, 2, 2, 2, 2, 3, 3, 3]) = 11
if len(hand) >= 2 and len(hand) <= 11:
# calculate total value of hand
handValue = self._calculate_hand_sum(hand)
if handValue <= 21:
validHands.append(hand)
# return sorted list of valid hands
return validHands
def calculate_hand_statistics(self, handCombinations):
""" calculates all valid player hands for statistical analysis in Math456 - Mathematical Modeling. """
# create 5 decks
fiveDeck = Deck(5)
# list of all valid starting hands
playerHandsList = self.calculate_valid_player_hands(handCombinations)
for playerHand in playerHandsList:
fiveDeckCards = fiveDeck.cards
# remove the cards in the hand
for card in playerHand:
fiveDeckCards.remove(card)
# calculate bust probability for the hand
bustProb = self._compute_bust_probability(playerHand)
# print the hand and bust probability
print("Hand:", playerHand, " - ", "Bust Probability:", bustProb)
# reset the cards in the deck
fiveDeck.reset_deck()
return
def simulate_varying_confidence(self, numGames):
""" Simulates multiple games of Blackjack with varying confidence levels. """
for confidence in range(1,10):
self.set_confidence(confidence/10)
print("\n********** ********** ********** ********** **********")
print("********** Simulating player with confidence:", confidence/10, "**********")
print("Player hits when probability of busting is below:", confidence/10)
self.simulate_multiple_games(numGames)
print("\n\n")
if __name__ == '__main__':
game = Blackjack()
# play the game once with visuals
#game.simulate_game_with_visuals()
# simulate multiple games with no visuals
#game._simulate_multiple_games(100000)
# simulate multiple games with varying confidence
gamesToPlay = 1000000
game.simulate_varying_confidence(gamesToPlay)
# print bust probability for all starting hands
handCombinations = 2
game.calculate_hand_statistics(handCombinations)
| true |
694e6c76d2dfec8d92fa4155efd6097ff3865096 | Python | peter-dinh/cryptography | /public key/mod.py | UTF-8 | 335 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | def power(x, b, n):
a = x
y = 1
while b > 0:
if b % 2 != 0:
y = (y * a) %n
b = b >> 1
a = (a * a) % n
return y
if __name__ == '__main__':
x = power(7, 21, 100)
for m in range(30, 2000):
if power(100, 293, m) == 21:
print(m)
break
print (x) | true |
09961fd82fe02e0cc9960972ae821e3c53b4b20c | Python | drmrgd/matchbox_api_utils | /bin/map_msn_psn.py | UTF-8 | 6,105 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# TODO: We'll need to configure this for other MATCHBox systems once we get
# the all worked out. For now, just take the MATCHBox arg as a
# "placeholder", and use it for live connections. But later, need it to
# figure out which JSON file to load.
"""
Input a MSN, BSN, or PSN, and return the other identifiers. Useful when trying
to retrieve the correct dataset and you only know one piece of information.
Note: We are only working with internal BSN, MSN, and PSN numbers for now and
can not return Outside Assay identifiers at this time.
"""
import sys
import os
import json
import csv
import argparse
import re
from pprint import pprint as pp
from matchbox_api_utils import MatchData
version = '4.1.101218'
def get_args():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument(
'matchbox',
metavar='<matchbox>',
help='Name of MATCHBox to which we make the connection. Valid systems '
'are: "adult", "adult-uat", "ped".'
)
parser.add_argument(
'ids',
metavar='<IDs>',
nargs='?',
help='MATCH IDs to query. Can be single or comma separated list. '
'Must be used with PSN or MSN option.'
)
parser.add_argument(
'-t', '--type',
choices=['psn','msn','bsn'],
required=True,
type=str.lower,
help='Type of query string input. Can be MSN, PSN, or BSN'
)
parser.add_argument(
'-j,', '--json',
metavar='<mb_obj.json>',
default='sys_default',
help='MATCHBox JSON database to use for the lookup if one does not '
'want to use the system default. If one would like to make a '
'live call instead, input `None`.'
)
parser.add_argument(
'-f', '--file',
metavar="<input_file>",
help='Load a batch file of all MSNs or PSNs to proc.'
)
parser.add_argument(
'-o', '--outfile',
metavar='<outfile>',
help='File to which output should be written. Default: STDOUT.'
)
parser.add_argument(
'-v','--version',
action='version',
version = '%(prog)s - ' + version
)
args = parser.parse_args()
return args
def read_batchfile(input_file):
with open(input_file) as fh:
return [ line.rstrip('\n') for line in fh ]
def map_id(mb_data, id_list, qtype):
"""
Call to MATCHBox and return PSN, MSN, or BSN data based on the qtype.
"""
results = []
# MSN and BSN results returns lists. Cat for easier str output.
for pt in id_list:
if qtype == 'psn':
if pt not in mb_data.data.keys():
sys.stderr.write('WARN: No such patient with ID: %s.\n' % pt)
continue;
bsn = mb_data.get_bsn(psn=pt)
if bsn:
bsn = cat_list(bsn)
msn = mb_data.get_msn(psn=pt)
if msn:
msn = cat_list(msn)
else:
msn = '---'
psn = 'PSN' + pt.lstrip('PSN')
results.append((psn, bsn, msn))
elif qtype == 'msn':
psn = mb_data.get_psn(msn=pt)
if psn:
msn = 'MSN' + pt.lstrip('MSN')
bsn = cat_list(mb_data.get_bsn(msn=pt))
results.append((psn, bsn, msn))
elif qtype == 'bsn':
psn = mb_data.get_psn(bsn=pt)
if psn:
msn = mb_data.get_msn(bsn=pt)
if msn:
msn = cat_list(msn)
else:
msn = '---'
results.append((psn, pt, msn))
return results
def cat_list(l):
return ';'.join(l)
def print_results(data, outfh):
if data:
sys.stdout.write('\n') # pad output from stderr msg if printing to stdout
outfh.write(','.join(['PSN', 'BSN', 'MSN']))
outfh.write('\n')
for i in sorted(data, key = lambda x: x[1]):
outfh.write(','.join(i))
outfh.write('\n')
def validate_list(id_list,qtype):
"""
Validate the ID string matches the query type, or skip this entry and print
a warning. Can only work with normal MATCH samples right now and will not
work with Outside Assays until I have some idea of the pattern needed.
"""
valid_list = []
type_regex = {
'obsn' : re.compile(r'^(FMI|MDA|CARIS|MSKCC)-(.*?)$'),
'bsn' : re.compile(r'^(T-[0-9]{2}-[0-9]{6})$'),
'msn' : re.compile(r'^(?:MSN)?([0-9]+)$'),
'psn' : re.compile(r'^(?:PSN)?([0-9]+)$'),
}
for elem in id_list:
try:
trimmed = re.search(type_regex[qtype],elem).group(1)
valid_list.append(trimmed)
except AttributeError:
sys.stdout.write("WARN: id '{}' is not valid. Skipping "
"entry!\n".format(elem))
return valid_list
if __name__=='__main__':
args = vars(get_args())
query_list = []
if args['file']:
query_list = read_batchfile(args['file'])
else:
query_list = args['ids'].split(',')
valid_ids = validate_list(query_list, args['type'])
if not valid_ids:
sys.stderr.write("ERROR: No valid IDs input!\n")
sys.exit(1)
json_db = args['json']
# Make a call to MATCHbox to get a JSON obj of data.
if json_db is "None":
sys.stdout.write('Retrieving a live MATCHBox data object. This may '
'take a few minutes...\n')
sys.stdout.flush()
json_db=None
data = MatchData(matchbox=args['matchbox'], method='mongo', json_db=json_db,
quiet=True)
sys.stdout.write('\n')
print('Getting MSN / PSN mapping data (Database date: %s)...'
% data.db_date)
results = map_id(data, valid_ids, args['type'])
if args['outfile']:
sys.stderr.write("Writing output to %s.\n" % args['outfile'])
outfh = open(args['outfile'], 'w')
else:
outfh = sys.stdout
print_results(results, outfh)
| true |
c142619f0a40fcac6db1f557cfcd215bae16e2cf | Python | Eustaceyi/Leetcode | /120. Triangle.py | UTF-8 | 826 | 3.125 | 3 | [] | no_license | class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
'''
Modify original
'''
if not triangle:
return 0
for i in range(1, len(triangle)):
for j in range(1,i):
triangle[i][j] += min(triangle[i-1][j], triangle[i-1][j-1])
triangle[i][0] += triangle[i-1][0]
triangle[i][-1] += triangle[i-1][-1]
return min(triangle[-1])
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
'''
O(n) extra space
'''
if not triangle:
return 0
res = list(triangle[-1])
for i in range(len(triangle)-2, -1, -1):
for j in range(i+1):
res[j] = min(res[j], res[j+1]) + triangle[i][j]
return res[0] | true |
9852fa95bd7468b8aef5662c33cee827829d03ed | Python | IntroTextMining-GU/Reuters | /COSC586.py | UTF-8 | 6,887 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""
Imports
"""
import numpy
import pandas
import sklearn
import nltk
import sklearn
import matplotlib.pyplot as pyplot
# nltk.download()
from nltk.corpus import reuters
from nltk.corpus import stopwords
#from nltk import word_tokenizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Function Definitions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Ugly, but gets the job done
# Takes the NLTK data and creates a binary indicator array for the categories
def Y_Trainer():
y_train = [[0 for x in range(90)] for y in range(7769)]
x = 0
# Cycle through each doc and check for training
for doc in range(len(doc_list)):
if doc_list[doc].startswith('training'):
# If it is training reset Y to 0 and cycle through categories
for cat in range(len(categories)):
if categories[cat] in reuters.categories(doc_list[doc]):
y_train[x][cat] = 1
x += 1
return numpy.asarray(y_train)
def Y_Tester():
y_test = [[0 for x in range(90)] for y in range(3019)]
x = 0
# Cycle through each doc and check for training
for doc in range(len(doc_list)):
if doc_list[doc].startswith('test'):
# If it is training reset Y to 0 and cycle through categories
for cat in range(len(categories)):
if categories[cat] in reuters.categories(doc_list[doc]):
y_test[x][cat] = 1
x += 1
return numpy.asarray(y_test)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Show Basic Stats for Reuters (Mod Apte Split)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
print("The reuters corpus has {} tags".format(len(reuters.categories())))
print("The reuters corpus has {} documents".format(len(reuters.fileids())))
# create counter to summarize
categories = []
file_count = []
documents = reuters.fileids()
# count each tag's number of documents
for i in reuters.categories():
file_count.append(len(reuters.fileids(i)))
categories.append(i)
# create a dataframe out of the counts
df = pandas.DataFrame( {'categories': categories, "file_count": file_count}).sort_values('file_count', ascending = False)
print(df.head())
# For later if we decide to do some filtering
# category_filter = df.iloc[1:4, 0].values.tolist()
# Examining the distribution of categories
# This plot is realllllllly busy
CategoryPlot = pyplot.barh(df.loc[:,"categories"], df.loc[:,"file_count"])
# Reduce the number temporarily
category_filter2 = []
for index, row in df.iterrows():
if row['file_count'] >= 50:
category_filter2.append(row['categories'])
df2 = df[df.categories.isin(category_filter2)].sort_values('file_count', ascending = False)
CategoryPlot2 = df2.plot(x = df2.loc[:,"categories"], kind = 'barh', title = 'Reduced Reuters (Only Categories >= 50 instances)')
CategoryPlot2.invert_yaxis()
# Create lists of test and training docs
doc_list = numpy.array(reuters.fileids())
test_doc = doc_list[['test' in x for x in doc_list]]
train_doc = doc_list[['training' in x for x in doc_list]]
print("test_doc is created with following document names: {} ...".format(test_doc[0:5]))
print("train_doc is created with following document names: {} ...".format(train_doc[0:5]))
# Create the corpus for later use
test_corpus = [" ".join([t for t in reuters.words(test_doc[t])]) for t in range(len(test_doc))]
train_corpus = [" ".join([t for t in reuters.words(train_doc[t])]) for t in range(len(train_doc))]
print("test_corpus is created, the first line is: {} ...".format(test_corpus[0][:100]))
print("train_corpus is created, the first line is: {} ...".format(train_corpus[0][:100]))
# Create a vectorizer (NOT CURRENTLY USING. PLAY WITH IN A BIT)
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(train_corpus)
X_test_counts = count_vect.fit_transform(test_corpus)
print("Reuters Training BOW Matrix shape:", X_train_counts.shape)
print("Reuters Training BOW Matrix shape:", X_test_counts.shape)
# Following in the footsteps of Sean
# Warning: Very long
print(count_vect.vocabulary_)
# Creating the output shapes for use
Y_Train = Y_Trainer()
Y_Test = Y_Tester()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Creating some Baseline Accuracies
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Let's vectorize this stuff. WOOHOO!!!
stop_words = stopwords.words("english")
vectorizer = HashingVectorizer(stop_words = stop_words, alternate_sign = False)
X_Train = vectorizer.transform(train_corpus)
X_Test = vectorizer.transform(test_corpus)
# Now, we're going throw in a little training on KNN
MrRogers = KNeighborsClassifier(n_neighbors = 5)
MrRogers.fit(X_Train, Y_Train)
pred = MrRogers.predict(X_Test)
myScore = accuracy_score(Y_Test, pred)
print("KNN accuracy score was: " + str(myScore))
# One vs Rest Classifier
OVR = OneVsRestClassifier(LinearSVC(random_state=0))
OVR.fit(X_Train, Y_Train)
pred = OVR.predict(X_Test)
myScore = accuracy_score(Y_Test, pred)
print("OVR accuracy score was: " + str(myScore))
# Try out a Neural Network. This one takes a while.
NN = MLPClassifier()
NN.fit(X_Train, Y_Train)
pred = OVR.predict(X_Test)
myScore = accuracy_score(Y_Test, pred)
print("NB accuracy score was: " + str(myScore))
# Try out a decision tree.
Tree = DecisionTreeClassifier()
Tree.fit(X_Train, Y_Train)
pred = Tree.predict(X_Test)
myScore = accuracy_score(Y_Test, pred)
print("NB accuracy score was: " + str(myScore))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Random Working Area
""""""""""""""""""""""""""""""""""""""""""""""""""
reuters.words(categories = ['acq', 'money-fx', 'grain'])
# Based on NLTK tutorials
all_words = nltk.FreqDist(w.lower() for w in reuters.words())
word_features = list(all_words)[:2000]
print(word_features)
def document_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains({})'.format(word)] = (word in document_words)
return features
def target_vars(document):
categories = {}
for i in len(test_doc):
print(reuters.categories[test_doc])
print(reuters.words('training/3482'))
print(reuters.raw('training/3482'))
print(reuters.categories('training/3482'))
"""
| true |
4d1bdaf8f451837725a78f9a34f543c96a0ae3f4 | Python | ariadn3/Athena | /Medium puzzles/marsLanderEp2.py | UTF-8 | 2,356 | 3.609375 | 4 | [] | no_license | import sys
surface_n = int(input()) # the number of points used to draw the surface of Mars.
surfaceNodeList = []
flatSurface = (-1, -1)
for i in range(surface_n):
# land_x: X coordinate of a surface point. (0 to 6999)
# land_y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
land_x, land_y = [int(j) for j in input().split()]
if len(surfaceNodeList) == 0:
surfaceNodeList.append((land_x, land_y))
continue
if land_y == surfaceNodeList[len(surfaceNodeList)-1][1]:
flatSurface = (surfaceNodeList[len(surfaceNodeList)-1][0], land_x)
surfaceNodeList.append((land_x, land_y))
print(flatSurface, file=sys.stderr)
# game loop
while True:
# h_speed: the horizontal speed (in m/s), can be negative.
# v_speed: the vertical speed (in m/s), can be negative.
# fuel: the quantity of remaining fuel in liters.
# rotate: the rotation angle in degrees (-90 to 90).
# power: the thrust power (0 to 4).
x, y, h_speed, v_speed, fuel, rotate, power = [int(i) for i in input().split()]
if x < flatSurface[0]:
if h_speed < 50:
rotate = -45
power = 4
elif h_speed > 50:
rotate = 25
power = 4
else:
rotate = 0
power = 4
elif x > flatSurface[1]:
if h_speed > -50:
rotate = 45
power = 4
elif h_speed < -50:
rotate = -25
power = 4
else:
rotate = 0
power = 4
if v_speed > 4:
power = 0
else:
if h_speed > 50:
rotate = 45
power = 4
elif 50 >= h_speed > 10:
rotate = 30
power = 4
elif 10 >= h_speed > 0:
rotate = 22
power = 4
elif 0 > h_speed >= -10:
rotate = -22
power = 4
elif -10 > h_speed >= -50:
rotate = -30
power = 4
elif -50 > h_speed:
rotate = -45
power = 4
elif h_speed < 0:
rotate = -30
power = 4
else:
rotate = 0
if v_speed < -35:
power = 4
else:
power = 0
print('{} {}'.format(rotate, power))
| true |
970c91e5772ac2cdf1fff4ba19f0ee7c8af6caa3 | Python | Codestined/sledge | /scripts/main/escentity/_entity.py | UTF-8 | 1,707 | 3.15625 | 3 | [
"MIT"
] | permissive | # Copyright 2019 Frame Studios. All rights reserved.
# Frame v1.0 python implementation by some Pane-in-the-Frame developers.
# pyFrame v1.0
# Project Manager: Caleb Adepitan
# The Frame specifications that govern this implementation can be found at:
# https://frame.github.io/spec/v1/
# Developers Indulgent Program (DIP)
# Use of this source code is licensed under the GPL 2.0 LICENSE
# which can be found in the LICENSE file.
# In attribution to Realongman, Inc.
import re
# escape entities
# # = #
# . = .
# $ = $
# : = :
# :: = &dblcln; (non-standard entity)
# ! = !
# % = %
# @ = @
# & = &
"""These are the html entities for characters that may
have special meanings in the frame markup.
These characters cannot be written as they are in a frame markup;
as they may be parsed with respect to their meanings in a frame.
How to escape:
(1) `#` will print `#`;
(2) `&num;` will print `#`
So if you want a real `#` to appear in your compiled markup
for the client or browser to parse, you'd write as (2).
The escape is only needed for those characters that have special
meanings in frame -- others don't need to be escaped
As in:
`&num;` will print `#`
`#` will print `#`
but
`"` remains as `"`
It is not frame's business."""
ENTITY_MAP = {
"#": "#",
".": ".",
"$": "$",
":": ":",
"&dblcln;": "::",
"!": "!",
"%": "%",
"@": "@",
"&": "&"
}
def escape(text):
for entity, char in ENTITY_MAP.items():
text = re.sub(entity, char, text)
return text
| true |
c027d37ecd58a21903428862199f02a1c014807f | Python | cdelahousse/Kindle-Display-And-Server | /server/server.py | UTF-8 | 897 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
from http.server import HTTPServer, BaseHTTPRequestHandler
from gen_image import gen_png_byte_stream
from config import PORT
import re
class KindleDisplayRequestHandler(BaseHTTPRequestHandler):
def do_GET(client):
path = client.path
m = re.search(r'\d+$', path)
if path == '/favicon.ico':
client.send_response(404)
client.end_headers()
elif m != None and m.group(0).isdigit():
client.send_response(200)
client.send_header("Content-type", "image/png")
client.end_headers()
png_index = int(m.group(0))
b = gen_png_byte_stream(png_index)
client.wfile.write(b)
if __name__ == "__main__":
print('Starting Kindle Display Image server on port %s' % PORT)
srv = HTTPServer(('', PORT), KindleDisplayRequestHandler)
srv.serve_forever()
| true |
d5a9590fd248e748071d25d5067e519b2813b794 | Python | stricoff92/freecodecamp-challenges | /python/Basic-Algorith-Scripting/sliceAndSplice.py | UTF-8 | 432 | 4 | 4 | [
"MIT"
] | permissive | '''
You are given two arrays and an index.
Use the array methods slice and splice to copy each element of the first array into the second array, in order.
Begin inserting elements at index n of the second array.
Return the resulting array. The input arrays should remain the same after the function runs.
'''
def frankenSplice(arr1, arr2, n):
return arr2[0:n] + arr1 + arr2[n:]
frankenSplice([1, 2, 3], [4, 5, 6], 1) | true |
fbef012b038412f46dc5c69a97c1d2dddc6ad631 | Python | alexshank/capstone-dsp | /Initial_DSP_Analysis/Tuner.py | UTF-8 | 3,106 | 2.78125 | 3 | [] | no_license | # needed libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from math import log10, inf
import Helpers as h
# controllable sampling parameters
human_resolution = 3.6 # humans can notice 3.6 Hz differences
N = 512 # N = M (no zero padding for interpolation)
notes = [82, 110, 147, 196, 247, 330] # standard tuning frequencies
# calculated FFT characteristics
Fs = human_resolution / 1.2 * N
NyQuist = max(notes) * 2
binFreqs = np.fft.fftfreq(N, d=1 / Fs)
samplingPeriod = 1 / Fs
sampling_time = samplingPeriod * N
binSize = Fs / N
worstError = binSize / 2
# display FFT chracteristics
h.printVal('NyQuist (Hz)', NyQuist)
h.printVal('Fs (Hz)', Fs)
h.printVal('Sampling period (sec)', samplingPeriod)
h.printVal('Samples (N)', N)
h.printVal('Total sample time (sec)', sampling_time)
h.printVal('Bin size (Hz)', binSize)
h.printVal('Worst Error (+-Hz)', worstError)
h.printVal('Rect -3dB Res (Hz)', human_resolution)
print()
# print bins that note frequencies should fall in
criticalIndices = []
for note in notes:
closestIndex = h.closestBin(note, binFreqs)
criticalIndices.append(closestIndex)
print('Closest bins to {}: '.format(note), end='')
for i in range(-1, 2, 1):
index = closestIndex + i
print('[({}) {}]'.format(index, round(binFreqs[index], 1)), end='')
print()
print()
# design least-squared error LP FIR filter [0 - 500 Hz]
taps = 31
bands = [0, 500, 550, Fs / 2]
desired = [1, 1, 0, 0]
coeffs = signal.firls(taps, bands, desired, fs=Fs)
freq, response = signal.freqz(coeffs)
plt.figure(1)
plt.plot(freq*Fs/2/np.pi, 20*np.log10(abs(response) / max(abs(response))))
plt.title('FIR Filter Response (N={})'.format(taps))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Magnitude Response (dB)')
# print filter coefficients
print('FIR LP Filter Coefficients')
for coeff in coeffs:
print(coeff)
print()
# create test samples and filter them
t = np.arange(0, sampling_time, samplingPeriod)
samples = h.createTones(t, [20, 500, 700])
filteredSamples = signal.lfilter(coeffs, 1, samples)
# plot filtering effects
[x_freq, y_freq] = h.getOneSidedFFT(samples, N, Fs)
[x_freq_filtered, y_freq_filtered] = h.getOneSidedFFT(filteredSamples, N, Fs)
fig, ax = plt.subplots(2)
ax[0].plot(x_freq, y_freq)
ax[1].plot(x_freq_filtered, y_freq_filtered)
fig.suptitle('Test Signal Filtering Effect')
for ax in ax.flat:
ax.set(xlabel='Frequency (Hz)', ylabel='Magnitude Response (dB)')
ax.label_outer()
# create test samples that are close together
t = np.arange(0, sampling_time, samplingPeriod)
samples = h.createTones(t, [80.5, 82.5])
[x_freq, y_freq] = h.getOneSidedFFT(samples, N, Fs)
# plot frequency resolution
plt.figure(3)
y_freq = y_freq[25:round(N/2) - 225]
x_freq = x_freq[25:round(N/2) - 225]
plt.plot(x_freq, y_freq, 'b-', label='FFT Result')
plt.plot([79, 86], [-3, -3], label='-3 dB Cutoff')
plt.title('Frequency Resolution of FFT (81.5 & 82.5 Hz Tones)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Magnitude Response (dB)')
plt.legend(loc='upper left')
plt.show()
| true |
1994a8cddec109493e1038a035a74f40991dddb8 | Python | gistable/gistable | /all-gists/2244911/snippet.py | UTF-8 | 4,681 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
#
# Copyright (c) 2012 Dave Pifke.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# This is a simple performance test of different methods for counting the
# number of occurrences of a series of values.
def values():
"""
Returns a tuple containing four random values: an integer between 0 and
512, a boolean, an integer between 0 and 256, and a boolean, respectively.
"""
from random import randint
return (randint(0, 512),
bool(randint(0, 1)),
randint(0, 256),
bool(randint(0 , 1)))
def nested_defaultdict(n):
"""
Returns a series of nested defaultdict objects, four deep. The value of
the innermost dict is the number of occurrences of the keys that got us
there.
"""
from collections import defaultdict
from functools import partial
counts = defaultdict(
partial(defaultdict,
partial(defaultdict,
partial(defaultdict, int))))
for i in range(n):
a, b, c, d = values()
counts[a][b][c][d] += 1
return counts
def tuple_defaultdict(n):
"""
Returns a defaultdict where the key is a tuple of the input values and
the value is the number of occurrences.
"""
from collections import defaultdict
counts = defaultdict(int)
for i in range(n):
a, b, c, d = values()
counts[(a, b, c, d)] += 1
return counts
def namedtuple_defaultdict(n):
"""
Returns a defaultdict where the key is a namedtuple of the input values and
the value is the number of occurrences.
"""
from collections import namedtuple, defaultdict
counts = defaultdict(int)
Key = namedtuple('Key', 'a b c d')
for i in range(n):
a, b, c, d = values()
counts[Key(a, b, c, d)] += 1
return counts
def tuple_counter_update(n):
"""
Returns a Counter, keyed using a tuple. Uses Counter.update().
"""
from collections import Counter
counts = Counter()
for i in range(n):
a, b, c, d = values()
counts.update((a, b, c, d))
return counts
def tuple_counter_incr(n):
"""
Returns a Counter, keyed using a tuple. Uses Counter()[value] += 1.
"""
from collections import Counter
counts = Counter()
for i in range(n):
a, b, c, d = values()
counts[(a, b, c, d)] += 1
return counts
def namedtuple_counter_update(n):
"""
Returns a Counter, keyed using a namedtuple. Uses Counter.update()
"""
from collections import namedtuple, Counter
counts = Counter()
Key = namedtuple('Key', 'a b c d')
for i in range(n):
a, b, c, d = values()
counts.update(Key(a, b, c, d))
return counts
def namedtuple_counter_incr(n):
"""
Returns a Counter, keyed using a namedtuple. Uses Counter()[value] += 1.
"""
from collections import namedtuple, Counter
counts = Counter()
Key = namedtuple('Key', 'a b c d')
for i in range(n):
a, b, c, d = values()
counts[Key(a, b, c, d)] += 1
return counts
if __name__ == '__main__':
from timeit import Timer
funcs = [nested_defaultdict,
tuple_defaultdict,
namedtuple_defaultdict,
tuple_counter_update,
tuple_counter_incr,
namedtuple_counter_update,
namedtuple_counter_incr]
# Credit to Raymond Hettinger for the following:
setup = 'from __main__ import %s' % ', '.join([x.__name__ for x in funcs])
for func in funcs:
stmt = '%s(%d)' % (func.__name__, 1000)
print(func.__name__, min(Timer(stmt, setup).repeat(7, 20)))
# eof
| true |
860d133a7e6662436b6359e45963c4e41e7505d2 | Python | DiegoC386/Algoritmos_Diego | /Taller Estructuras de Control Selectivas/Ejercicio_2.py | UTF-8 | 477 | 4.3125 | 4 | [
"MIT"
] | permissive | """
Escriba un algoritmo, que dado como dato el sueldo de un trabajador,
le aplique un aumento del 15% si su salario bruto
es inferior a $900.000 COP y 12% en caso contrario.
Imprima el nuevo sueldo del trabajador.
Entradas
salariobruto-->float--sb
Salidas
Salarioneto-->float--sn
"""
sb=float(input("Digite salario bruto: "))
if(sb<900000):
cn=sb+(sb*0.15)
print("Salario neto es igual: "+str(cn))
else:
cn=sb+(sb*0.12)
print("Salario neto es igual: "+str(cn))
| true |
ad20d3ef3deccd13f6f7b1c45263c0cd3db3a98d | Python | P4SSER8Y/ProjectEuler | /pr038/pr038.py | UTF-8 | 441 | 2.953125 | 3 | [] | no_license | def pr038():
def push(s):
if len(s) > 9:
return None
if sum(1 for c in '123456789' if c in s) == 9:
ret.append(int(s))
ret = []
for x in range(1, 10000):
s = ''
i = 1
while len(s) < 9:
s += str(x * i)
i += 1
push(s)
return sorted(ret, reverse=True)[0]
def run():
return pr038()
if __name__ == "__main__":
print(run())
| true |
9a7fee7558d929916ee0867bb9b9b4c79a9f2d9b | Python | crlesage/cs373-collatz | /SphereCollatz.py | UTF-8 | 3,734 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
# ------------------------------
# projects/collatz/SphereCollatz.py
# Copyright (C) 2015
# Glenn P. Downing
# ------------------------------
# -------
# imports
# -------
import sys
# combined Collatz.py and RunCollatz.py
# from Collatz import collatz_solve
# ---
# Global Cache
# ---
"""
Cache dictionary for reference.
Includes eager cache represented by powers of 2.
"""
cache = {1:1, 1:2}
# Eager cache, powers of 2
cache[4] = 3
cache[8] = 4
cache[16] = 5
cache[32] = 6
cache[64] = 7
cache[128] = 8
cache[256] = 9
cache[512] = 10
cache[1024] = 11
cache[2048] = 12
cache[4096] = 13
cache[8192] = 14
cache[16384] = 15
cache[32768] = 16
cache[65536] = 17
cache[131072] = 18
cache[262144] = 19
cache[524288] = 20
#Meta Cache
# meta_cache = {1000: 179, 2000: 182, 3000: 217}
# ------------
# collatz_read
# ------------
def collatz_read (s) :
"""
read two ints
s a string
return a list of two ints, representing the beginning and end of a range, [i, j]
"""
a = s.split()
return [int(a[0]), int(a[1])]
# ------------
# collatz_eval
# ------------
def collatz_eval (i, j) :
"""
i the beginning of the range, inclusive
j the end of the range, inclusive
return the max cycle length of the range [i, j]
Computes the max cycle length between i and j.
Stores cycle lengths in Cache for a quick reference.
"""
assert i > 0
assert j > 0
assert i < 1000000
assert j < 1000000
#Swap i and j if j is smaller
if (i > j) :
temp = i
i = j
j = temp
assert j >= i
max_cycle_length = 0
# Meta Cache check
# if ((j - i) > 1000) :
# for n in range (i, (i / 1000))
for n in range (i, j + 1) :
# Check to see if already in cache
if (n in cache) :
if cache[n] > max_cycle_length :
max_cycle_length = cache[n]
# If not, use helper to get cycle length and store in cache
cache[n] = collatz_helper(n)
# Update max cycle length if needed
if cache[n] > max_cycle_length :
max_cycle_length = cache[n]
assert max_cycle_length > 0
return max_cycle_length
def collatz_helper (n) :
"""
Computes and returns cycle length of n + cycle lengths found in cache
"""
assert n > 0
# Cycle length less than 3 is itself
if (n < 3) :
return n
# Compute cycle length otherwise
cycle_length = 0
# If not in cache already, find cycle length
while (n not in cache) :
if (n % 2) == 0 :
n = (n // 2)
cycle_length += 1
assert cycle_length > 0
else :
n += (n >> 1) + 1
cycle_length += 2
assert cycle_length > 0
# Return the current cycle length plus if there is a cache value
return cycle_length + cache[n]
# -------------
# collatz_print
# -------------
def collatz_print (w, i, j, v) :
"""
print three ints
w a writer
i the beginning of the range, inclusive
j the end of the range, inclusive
v the max cycle length
"""
w.write(str(i) + " " + str(j) + " " + str(v) + "\n")
# -------------
# collatz_solve
# -------------
def collatz_solve (r, w) :
"""
r a reader
w a writer
"""
for s in r :
i, j = collatz_read(s)
v = collatz_eval(i, j)
collatz_print(w, i, j, v)
# ----
# main
# ----
if __name__ == "__main__" :
collatz_solve(sys.stdin, sys.stdout)
"""
% cat RunCollatz.in
1 10
100 200
201 210
900 1000
% RunCollatz.py < RunCollatz.in > RunCollatz.out
% cat RunCollatz.out
1 10 1
100 200 1
201 210 1
900 1000 1
% pydoc3 -w Collatz
# That creates the file Collatz.html
""" | true |
21942087a838d1f55b4e613d7a4d4eb8fbec6720 | Python | nansencenter/django-geo-spaas-argo-floats | /argo_floats/management/commands/ingest_argo.py | UTF-8 | 1,318 | 2.5625 | 3 | [] | no_license | from django.core.management.base import BaseCommand, CommandError
from argo_floats.utils import crawl
class Command(BaseCommand):
args = '<url> <select>'
help = '''
Add Argo float to archive.
Args:
<url>: the url to the thredds server
<select>: You can select datasets based on their THREDDS ID using
the 'select' parameter
url = http://tds0.ifremer.fr/thredds/catalog/CORIOLIS-ARGO-GDAC-OBS/kordi/catalog.html
'''
def add_arguments(self, parser):
parser.add_argument('url', nargs='*', type=str)
# parser.add_argument('--platnum',
# action='store',
# default='',
# help='''Argo float number''')
# parser.add_argument('--filename',
# action='store',
# default='',
# help='''Filename of a specific dataset''')
def handle(self, *args, **options):
if not len(options['url'])==1:
raise IOError('Please provide a url to the data')
url = options.pop('url')[0]
print(url)
added = crawl(url)
self.stdout.write(
'Successfully added metadata of %s Argo float profiles' %added)
| true |
76599733ca332184e175186a502a057e2e69d4e9 | Python | dongweiming/mp | /2016-12-03/pipe.py | UTF-8 | 217 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | from multiprocessing import Process, Pipe
def f(conn):
conn.send(Pipe())
conn.close()
parent_conn, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
p.start()
print parent_conn.recv()
p.join()
| true |
00f910cc01d8c54f2baae14d59f378ca3c553fbf | Python | MaxCoder360/tensorboi | /regularizers.py | UTF-8 | 1,936 | 2.859375 | 3 | [] | no_license | from layers import Module
import numpy as np
class Dropout(Module):
__slots__ = 'p', 'mask', '_train', 'grad_input'
def __init__(self, p=0.5):
super().__init__()
self.p = p
self.mask = None
def forward(self, input):
if self._train:
self.mask = np.random.binomial(1, self.p, input.shape[1]) / self.p
self.output = np.array([np.multiply(input[i], self.mask) for i in range(input.shape[0])])
else:
self.output = self.p * input
return self.output
def backward(self, input, grad_output):
if self._train:
self.grad_input = np.multiply(grad_output, self.mask)
else:
self.grad_input = self.p * grad_output
return self.grad_input
class BatchNorm(Module):
__slots__ = 'gamma', 'mu', 'sigma'
def __init__(self, gamma=0.1):
super().__init__()
self.gamma = gamma
def forward(self, input):
if self._train:
self.mu = np.mean(input, axis=1, keepdims=True)
self.sigma = np.var(input, axis=1, keepdims=True)
input_norm = (input - self.mu) / np.sqrt(self.sigma + 1e-9)
self.output = self.gamma * input_norm
else:
self.output = input
return self.output
def backward(self, input, grad_output):
if self._train:
n, d = input.shape
input_mu = input - self.mu
std_inv = 1. / np.sqrt(self.sigma + 1e-8)
grad_input_norm = grad_output * self.gamma
grad_sigma = np.sum(grad_input_norm * input_mu, axis=0) * -.5 * std_inv ** 3
grad_mu = np.sum(grad_input_norm * -std_inv, axis=0) + grad_sigma * np.mean(-2. * input_mu, axis=0)
grad_input = (grad_input_norm * std_inv) + (grad_sigma * 2 * input_mu / n) + (grad_mu / n)
else:
grad_input = grad_output
return grad_input
| true |
47e29f7fd618e5a8d68724c7e19a5e998ccff9d9 | Python | amotzkau/docker | /MPDAutoQueue/autoqueue.py | UTF-8 | 4,499 | 2.546875 | 3 | [] | no_license | #! /usr/bin/python3
import musicpd
import argparse, time, socket, random
parser = argparse.ArgumentParser(description="Automatically add songs to the MPD queue", add_help=False)
parser.add_argument("--help", help="show this help message and exit", action="help")
parser.add_argument("-h", "--host", help="MPD host name (default: localhost)", default="localhost")
parser.add_argument("-p", "--port", help="MPD port (default: 6600)", type=int, default=6600)
parser.add_argument("-q", "--quiet", help="Silent mode", action="store_true", default=False)
parser.add_argument("-l", "--playlist", metavar="PLAYLIST[:WEIGHT]", help="Takes songs from this playlist (default: Auto)", action="append", default=[])
parser.add_argument("-b", "--before", help="How many played songs leave in the playlist (default: 10)", type=int, default=10)
parser.add_argument("-a", "--ahead", help="How many songs plan ahead (default: 10)", type=int, default=10)
args = parser.parse_args()
client = musicpd.MPDClient()
currentlists = {} # { 'playlist', 'last-modified', 'weight' }
currentsongs = [] # [ (id, name) ]
playedsongs = [] # [ id ]
if not len(args.playlist):
args.playlist.append('Auto')
def msg(*msg):
if not args.quiet:
print(*msg)
def parsePlaylist(name):
values = name.rsplit(":", 1)
if len(values) == 2:
try:
return (values[0], int(values[1]))
except ValueError:
msg("Could not parse '%s'" % (name,))
return (values[0],1)
else:
return (name, 1,)
def waitForAutoMode():
while True:
status = client.status()
if int(status['random']):
msg("Random mode is activated, waiting for deactivation...")
elif int(status['repeat']):
msg("Repeat mode is activated, waiting for deactivation...")
elif int(status['single']):
msg("Single mode is activated, waiting for deactivation...")
else:
break
client.idle('options')
def updateSongList():
global currentsongs
global currentlists
global playedsongs
lists = { entry['playlist']: entry for entry in client.listplaylists() }
dirty = False
newlists = {}
for (listname, weight) in args.playlist:
if listname in lists:
dirty = (not listname in currentlists or currentlists[listname]['last-modified'] != lists[listname]['last-modified'])
newlists[listname] = lists[listname]
newlists[listname]['weight'] = weight
else:
msg("Unknown playlist '%s'." % listname)
if dirty:
newsongs = []
for playlist in newlists.values():
newsongs.extend(playlist['weight'] * client.listplaylist(playlist['playlist']))
currentlists = newlists
currentsongs = list(zip(range(len(newsongs)), newsongs))
playedsongs = []
def chooseSong():
global currentsongs
global playedsongs
while 2*len(playedsongs) > len(currentsongs):
playedsongs.pop(0)
pos = random.randrange(len(currentsongs) - len(playedsongs))
i = 0
while True:
if not currentsongs[i][0] in playedsongs:
pos -= 1
if pos < 0:
break
i += 1
playedsongs.append(currentsongs[i][0])
return currentsongs[i][1]
def updatePlaylist():
status = client.status()
if 'song' in status:
pos = int(status['song'])
else:
pos = 0
length = int(status['playlistlength'])
client.command_list_ok_begin()
if pos > args.before:
client.delete((0,pos - args.before))
for i in range(length - pos - 1, args.ahead):
client.add(chooseSong())
client.command_list_end()
args.playlist = [ parsePlaylist(entry) for entry in args.playlist ]
try:
while True:
try:
msg("Connecting to %s:%d..." % (args.host, args.port))
client.connect(args.host, args.port)
msg("Connected to MPD %s." % (client.mpd_version,))
while True:
waitForAutoMode()
updateSongList()
if len(currentsongs):
updatePlaylist()
client.idle('stored_playlist', 'playlist', 'player')
except socket.error as e:
msg('Error:', e)
time.sleep(1)
except musicpd.ConnectionError as e:
msg('Error:', e)
time.sleep(1)
except KeyboardInterrupt:
client.close()
client.disconnect()
| true |
1631295b9aaf6cecc893551b8161df117d0d4c29 | Python | code440/translate_pptx | /translate_pptx.py | UTF-8 | 1,163 | 2.890625 | 3 | [] | no_license | '''
Created on 2018/09/22
@author: 440
'''
import requests
from pptx import Presentation
from time import sleep
# my api key
api_key=""
def translate(str_in, source="ja", target="en"):
url = "https://script.google.com/macros/s/"
url += api_key
url += "/exec?text=" + str_in
url += "&source=" + source
url += "&target=" + target
rr = requests.get(url)
return rr.text
if __name__ == '__main__':
path_to_presentation = "test.pptx"
prs = Presentation(path_to_presentation)
print("start")
for ns, slide in enumerate(prs.slides):
for nsh, shape in enumerate(slide.shapes):
if not shape.has_text_frame:
continue
for np, paragraph in enumerate(shape.text_frame.paragraphs):
for rs, run in enumerate(paragraph.runs):
str_in = run.text
str_out = translate(str_in)
prs.slides[ns].shapes[nsh].text_frame.paragraphs[np].runs[rs].text = str_out
sleep(1.5)
print(np)
prs.save('test_trans.pptx')
print("end")
| true |
9ea2f22622717eeb193c8f874df87e6875f2430c | Python | n0t-a-b0t/packet-sniffer | /sniffer_v2.py | UTF-8 | 5,614 | 2.640625 | 3 | [] | no_license | import socket
import struct
import binascii
def printer(data):
file_obj = open('trace_file.txt', 'a')
file_obj.write(data)
file_obj.close()
return
def tcp(data):
sniff = struct.unpack('!2H2I4H', data[:20])
data = data[20:]
printer("==================TCP Header=================\n")
printer("Source port: \t" + str(sniff[0]) + '\n')
printer("Destination port: \t" + str(sniff[1]) + '\n')
printer("Sequence number: \t" + str(sniff[2]) + '\n')
printer("Acknowledgement number: \t" + str(sniff[3]) + '\n')
if (sniff[4] >> 12) == 5:
printer("Data Offset: \t" + str(sniff[4] >> 12) + '\n')
else:
printer("Data Offset: \t" + str(sniff[4] >> 12) + '\n')
printer("This Header has Option Field\n")
printer("Reserved: \t" + str((sniff[4] >> 6) & 0x03f) + '\n')
printer("Urgent Flag: \t" + str((sniff[4] >> 5) & 0x001) + '\n')
printer("Acknowledgement flag: \t" + str((sniff[4] >> 4) & 0x001) + '\n')
printer("Push Flag: \t" + str((sniff[4] >> 3) & 0x0001) + '\n')
printer("Reset flag: \t" + str((sniff[4] >> 2) & 0x0001) + '\n')
printer("SYN flag: \t" + str((sniff[4] >> 1) & 0x0001) + '\n')
printer("FIN flag: \t" + str(sniff[4] & 0x0001) + '\n')
printer("Window: \t" + str(sniff[5]) + '\n')
printer("Checksum: \t" + str(sniff[6]) + '\n')
printer("Urgent Pointer: \t" + str(sniff[7]) + '\n')
printer("=============================================\n")
return data
def udp(data):
sniff = struct.unpack('!4H', data[:8])
data = data[8:]
printer("==================UDP Header=================\n")
printer("Source port: \t" + str(sniff[0]) + '\n')
printer("Destination port: \t" + str(sniff[1]) + '\n')
printer("Length: \t" + str(sniff[2]) + '\n')
printer("Checksum: \t" + str(sniff[3]) + '\n')
printer("=============================================\n")
return data
def ipv4(data):
sniff = struct.unpack('!6H4s4s', data[:20])
data = data[20:]
printer("=================IPv4 Header===================\n")
printer("Version: \t" + str(sniff[0] >> 12) + '\n')
if ((sniff[0] >> 8) & 0x0f) > 5:
printer("This Header has Options field attached to it\n")
else:
printer("Internet Header Length: \t" + str((sniff[0] >> 8) & 0x0f) + '\n')
printer("DSCP: \t" + str((sniff[0] >> 2) & 0x003f) + '\n')
printer("ECN: \t" + str(sniff[0] & 0x0003) + '\n')
printer("Total length: \t" + str(sniff[1]) + '\n')
printer("Identification: \t" + str(sniff[2]) + '\n')
printer("Reserved Flag: \t" + str(sniff[3] >> 15) + '\n')
printer("Don't Fragment Flag: \t" + str((sniff[3] >> 14) & 0x1) + '\n')
printer("More Fragments Flag: \t" + str((sniff[3] >> 13) & 0x1) + '\n')
printer("Fragment Offset: \t" + str(sniff[3] & 0x1fff) + '\n')
printer("Time To Live: \t" + str(sniff[4] >> 8) + '\n')
if (sniff[4] & 0x00ff) == 6:
printer("Protocol: \tTCP " + str(sniff[4] & 0x00ff) + '\n')
elif (sniff[4] & 0x00ff) == 17:
printer("Protocol: \tUDP " + str(sniff[4] & 0x00ff) + '\n')
else:
printer("Protocol: \t" + str(sniff[4] & 0x00ff) + '\n')
printer("Header Checksum: \t" + str(sniff[5]) + '\n')
printer("Source IPv4 Address: \t" + str(socket.inet_ntoa(sniff[6])) + '\n')
printer("Destination IPv4 Address: \t" + str(socket.inet_ntoa(sniff[7])) + '\n')
printer("===============================================\n")
return data, (sniff[4] & 0x00ff)
def arp(data):
sniff = struct.unpack('!4H6s4s6s4s', data)
printer("========Address Resolution Protocol===========\n")
printer("Hardware Type: \t" + str(sniff[0]) + '\n')
printer("Protocol Type: \t" + str(hex(sniff[1])) + '\n')
printer("Hardware Size: \t" + str((sniff[2] >> 8)) + '\n')
printer("Protocol Size: \t" + str(sniff[2] & 0x00ff) + '\n')
printer("operation: \t" + str(sniff[3]) + '\n')
printer("Sender MAC Address: \t" + str(binascii.hexlify(sniff[4])) + '\n')
printer("Sender IPv4 Address: \t" + str(socket.inet_ntoa(sniff[5])) + '\n')
printer("Target MAC Address: \t" + str(binascii.hexlify(sniff[6])) + '\n')
printer("Target IPv4 Address: \t" + str(socket.inet_ntoa(sniff[7])) + '\n')
printer("==============================================\n")
return
def l2_analyser(data):
sniff = struct.unpack('!6s6sH', data[:14])
data = data[14:]
printer("==========Layer 2 Ethernet Frame===========\n")
printer("Destination MAC Address: \t" + str(binascii.hexlify(sniff[0])) + '\n')
printer("Source MAC Address: \t" + str(binascii.hexlify(sniff[1])) + '\n')
if hex(sniff[2]) == '0x8100':
printer("This frame has 802.1Q Tag\n")
elif hex(sniff[2]) == '0x88a8':
printer("This frame has 802.1ad Tag\n")
elif hex(sniff[2]) == '0x800':
printer("Type: \tIPv4 " + str(hex(sniff[2])) + '\n')
elif hex(sniff[2]) == '0x806':
printer("Type: \tARP " + str(hex(sniff[2])) + '\n')
else:
printer("Type: " + str(hex(sniff[2])) + '\n')
printer("===========================================\n")
return data, hex(sniff[2])
def main():
while True:
printer("\n\n\n")
sock = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x003))
data, l3_proto = l2_analyser(sock.recv(2048))
if l3_proto == '0x806':
arp(data)
elif l3_proto == '0x800':
data, l4_proto = ipv4(data)
if l4_proto == 17:
data = udp(data)
elif l4_proto == 6:
data = tcp(data)
return
main()
| true |
fa7ea9f91c145ebd863cd61e0956e22fdb525807 | Python | tdev131287/PythonCode | /mysqlconnect_Final.py | UTF-8 | 1,523 | 2.734375 | 3 | [] | no_license | ##!/usr/bin/python
#import MySQLdb
#
## Connect
#db = MySQLdb.connect(host="172.22.0.16",
# user="root",
# passwd="Sc@1234",
# db="sma")
#
#cursor = db.cursor()
#
## Execute SQL select statement
#cursor.execute("select NAICS_Titles,NAICS from Shipment_NAICS_code")
#
## Commit your changes if writing
## In this case, we are only reading data
## db.commit()
#
## Get the number of rows in the resultset
#numrows = cursor.rowcount
#
## Get and display one row at a time
#for x in range(0, numrows):
# row = cursor.fetchone()
# print (row[0], "-->", row[1])
#
## Close the connection
#db.close()
#import datetime
import mysql.connector as sql
import pandas as pd
db_connection = sql.connect(host="172.28.0.11", user='root', database='sma',passwd="Sc@1234")
#
#cursor = cnx.cursor()
#cursor.execute("select * from cost_data")
#row = cursor.fetchone()
#pro_info = pd.DataFrame(cursor.fetchall())
#pro_info.to_csv('sample.csv')
df = pd.read_sql('select * from dummy', con=db_connection)
df=df.apply(lambda x: x.str.replace(',',''))
#df=df.loc[df['time'] == 2012]
#df1=df.loc[df['time'].isin([2012,2014, 2013])]
#df1=df.describe()
#df['Amount']=df['Amount'].astype('float')
df['Amount'] = pd.to_numeric(df['Amount'],errors='coerce')
Total = df['Amount'].sum()
print (Total)
df.to_csv('sample.csv')
#
#while row is not None:
# print(row)
# row = cursor.fetchone()
#
#print (cursor) | true |
825046a27cec6206fd1c290bb14d8f1733c346c0 | Python | felipeserna/holbertonschool-machine_learning | /pipeline/0x01-apis/2-user_location.py | UTF-8 | 833 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python3
"""
GitHub API
Script that prints the location of a specific user.
Your code should not be executed when the file is imported.
"""
import requests
import sys
import time
if __name__ == '__main__':
url = sys.argv[1]
# https://api.github.com/users/holbertonschool
my_status = requests.get(url).status_code
if my_status == 200:
print(requests.get(url).json()["location"])
elif my_status == 404:
print("Not found")
elif my_status == 403:
"""
Reset in X min. X is the number of minutes from now and the value of
X-Ratelimit-Reset
"""
Now = int(time.time())
Reset = int(requests.get(url).headers['X-Ratelimit-Reset'])
seconds = Reset - Now
X = seconds / 60
print("Reset in {} min".format(int(X)))
| true |