blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
530e78cfaf61329b81acdad0309247af27335469
|
Python
|
RafidaZaman/Lab-3-solution
|
/Problem 4.py
|
UTF-8
| 367
| 2.828125
| 3
|
[] |
no_license
|
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
iris = load_iris()
# X = features and y = response
X = iris.data
y = iris.target
print(iris)
knn = KNeighborsClassifier(n_neighbors=50)
knn.fit(X, y)
y_pred = knn.predict(X)
print("Accuracy:")
print(metrics.accuracy_score(y, y_pred))
| true
|
638419846c35eaf0d3885cf9bc09549afb7c2340
|
Python
|
lic-informatica-umet/Eibu-s-code
|
/Uni Programacion/IntroProg/Guia de ejercicios/Unidad 7/1 - Operaciones Condicionales/3.py
|
UTF-8
| 671
| 4.875
| 5
|
[] |
no_license
|
'''
3. Ingresar dos valores y realizar cl producto, si el 1ro es mayor al 2do, si son iguales solo indicarlo.
'''
# Nombre: Agustin Arce
# Fecha: 20/04/2019
# Programa: Producto entre dos numeros
# Inicializacion de variables
num1 = 0
num2 = 0
prod = 0
# Ingreso de datos
num1 = float(input("Ingrese primer numero: "))
num2 = float(input("Ingrese segundo numero: "))
# Operacion
prod = num1 * num2
# Control de flujo e informacion en pantalla
if num1 == num2:
print("Los dos numeros son iguales, no se mostrara el producto")
elif num1 >= num2:
print("El producto entre los dos valores es:", prod)
else:
print("Intente indicar el primer numero como mayor")
| true
|
35d654597e5a2d9de53510654e1c617d7417bcfd
|
Python
|
a8578062/store
|
/Day13/homework/sendemail.py
|
UTF-8
| 1,602
| 2.53125
| 3
|
[] |
no_license
|
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
from email.header import Header
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
import os,sys
from nt import chdir#判断目录是否存在并切换目录
print("正在发送...")
#登陆邮件服务器
smtpObj=smtplib.SMTP('smtp.qq.com', 587)
smtpObj.ehlo()
smtpObj.starttls()
#传入相应的账号密码信息
smtpObj.login('1021295302@qq.com', '')
#邮件收发信人信息
sender = '1021295302@qq.com'#发件人信息
receivers = ['2431320433@qq.com']#收件人信息
#完善发件人收件人,主题信息
message=MIMEMultipart()
message['From'] = formataddr(["姚佳龙",sender])
message['To'] = formataddr(["Jason",'2431320433@qq.com'.join(receivers)])
subject = 'Day13'
message['Subject'] = Header(subject, 'utf-8')
#正文部分
textmessage = MIMEText('计算器的测试报告','html', 'utf-8')
message.attach(textmessage)
workLoc = os.path.join('E:\\', 'pythonProject','day13','homework')
# #检查路径有效性
if (os.path.exists(workLoc))&(os.path.isdir(workLoc)):
#尝试改变当前工作路径:
chdir(workLoc)
else:
print('路径无效,请从新检查')
sys.exit()
#尝试添加附件
File='计算器的测试报告.html'
print("附件文件名为:%s" %File)
FileLoc=os.path.join(workLoc,File)
FileAtt = MIMEApplication(open(FileLoc,'rb').read())
FileAtt.add_header('Content-Disposition', 'attachment', filename=File)
message.attach(FileAtt)
#发送邮件操作
smtpObj.sendmail(sender,receivers, message.as_string())
smtpObj.quit()
print("发送成功!")
| true
|
79de0fb745ad4c52ac84db5ebb3ca9e3a5685d87
|
Python
|
haruki37/keisan
|
/test/latexfile.py
|
UTF-8
| 1,818
| 2.890625
| 3
|
[] |
no_license
|
"""
latexfile.py
"""
import os
import subprocess
import textwrap
class LatexFile:
PROLOGUE = textwrap.dedent('''\
\\documentclass[{}pt,dvipdfmx,a4paper]{{jsarticle}}
\\usepackage{{amsmath}}
\\usepackage{{tgpagella, euler}}
\\begin{{document}}\
''')
def __init__(self, fname, title=None, point=10):
self.title = title
self.text = []
self.fname = fname
self.point = point
if title is None:
self.title = ''
else:
self.title = '\\title{{{}}} \n\\date{{}} \n\\maketitle'.format(title)
def add_text(self, text):
self.text.append(text)
def add_eq(self, equation):
self.text.append('\\begin{align*}\n' + equation + '\n\\end{align*}')
def add_eqs(self, eqs, leftalign=True):
if leftalign is True:
self.text.append('\\begin{align*}\n' + '& ' + '\\\\ \n& '.join(eqs) + '\n\\end{align*}')
else:
self.text.append('\\begin{align*}\n' + '\\\\ \n'.join(eqs) + '\n\\end{align*}')
def output(self):
res = '\n'.join([
self.PROLOGUE.format(self.point),
self.title,
'\n'.join(self.text),
'\\end{document}'
])
return res
def compile(self, remove=True):
with open(self.fname + '.tex', mode='wt') as fout:
fout.write(self.output())
command1 = 'platex {}.tex'.format(self.fname)
print(command1.split(' '))
subprocess.run(command1.split(' '))
command2 = "dvipdfmx {}".format(self.fname)
subprocess.run(command2.split(' '))
if remove is True:
rmv_list = [self.fname + x for x in ('.aux', '.log', '.dvi')]
for rmv_fname in rmv_list:
os.remove(rmv_fname)
| true
|
b7d68915559caff7605dbf2ed2c2505261fb60c4
|
Python
|
Bhargavisaikia219/Madlibs-Generator
|
/Madlibs_Generator.py
|
UTF-8
| 1,425
| 3.8125
| 4
|
[] |
no_license
|
#taking a series of input from the user
charname1 = input("Give me a character name:")
charname2 = input("Give me another character name:")
place = input("Give me name of a place:")
yr = input("Mention a year:")
verb1 = input("Give me a verb (present tense):")
noun1 = input("Give me a noun:")
noun2 = input("Give me another noun:")
verb2 = input("Give me a verb (present tense, in third person):")
verb3 = input("Give me a verb (present tense, in third person):")
charname3 = input("Give me another character name:")
verb4 = input("Give me a verb (present tense):")
noun3 = input("Give me another noun:")
adj = input("Give me an adjective:")
showname = input("Mention name of a show:")
noun4 = input("Give me another noun:")
#arrange the data in madlib variable
madlib = f"Newlywed couple {charname1} and {charname2} move into the town of {place} in a black-and-white {yr}s setting.One day they {verb1} a heart drawn on their {noun1}, but neither can remember what the {noun2} is.\nWhile Vision {verb2} his job at Computational Services Inc., Wanda decides that the heart {verb3} their anniversary.Their neighbor {charname3} introduces herself to Wanda and helps her prepare to {verb4} that night.Vision amazes his co-workers with his {noun3} but is unsure what his company actually does.All of this takes place in the {adj} sitcom {showname} which someone is watching on a {noun4}."
#to display the output
print(madlib)
| true
|
b8783374f1caca681dfa97860e2348055ed575e6
|
Python
|
TheDubliner/RedArmy-Cogs
|
/stig/stig.py
|
UTF-8
| 4,873
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
from pathlib import Path
import asyncio
import discord
import random
import re
import yaml
from redbot.core import (
Config,
commands,
data_manager
)
from redbot.core.utils import (
chat_formatting
)
UNIQUE_ID = 5140937153389558
class Stig(commands.Cog):
__version__ = "0.1.0"
DATAFILENAME = "stigquotes.yaml"
CUSTOMFILENAME = "custom-stig-quotes.yaml"
IMGFILENAME = "stig.jpeg"
"""Spam random The Stig quotes."""
def __init__(self, bot):
super().__init__()
self.bot = bot
self.name = "stig"
self.config = Config.get_conf(
self,
identifier=UNIQUE_ID,
force_registration=True
)
async def red_delete_data_for_user(self, **kwargs):
"""No data to delete."""
return
@commands.group(invoke_without_command=True)
async def stig(self, ctx: commands.Context,
name: str = None, gender: str = "m"):
"""
Add The Stig to your channel.
Post a random Stig quote to the channel. You can also mention another
user, optionally adding **f** to change the gender.
"""
async with ctx.channel.typing():
await asyncio.sleep(1)
quote = await self.get_random_stig_quote()
if ctx.message.mentions:
name = ctx.message.mentions[0].display_name
if name:
quote = self.replace_name(quote, name)
if gender == "f":
quote = self.replace_pronouns(quote)
if quote:
embed = self.build_embed(quote)
return await ctx.channel.send(embed=embed)
@stig.command(name="addquote", rest_is_raw=True)
async def add_stig_quote(self, ctx: commands.Context, *, quote: str):
"""
Add a Stig quote to the rotation.
Ensure your sentence includes the name _Stig_ somewhere!
"""
if quote is not None:
if not re.search(r"Stig", quote):
message = ("Your message needs more Stig!")
return await ctx.send(message)
quote = quote.strip()
# remove quotes but only if symmetric
if quote.startswith('"') and quote.endswith('"'):
quote = quote[1:-1]
fname = data_manager.cog_data_path(self) / self.CUSTOMFILENAME
data = [quote]
if fname.exists():
with open(fname, "r") as f:
existing_quotes = yaml.safe_load(f)
data = existing_quotes + data
with open(fname, "w") as f:
f.write(yaml.dump(data))
return await ctx.channel.send(
chat_formatting.info("Added your quote!")
)
else:
await self.bot.say(
chat_formatting.warning(
"Cannot add a quote with no text, "
"attachments or embed images."
))
@stig.command()
async def version(self, ctx: commands.Context):
"""
Display the current cog version.
"""
await ctx.reply(
f"This cog is on version {self.__version__}.",
mention_author=False
)
async def get_random_stig_quote(self):
"""
Returns a random Stig quote.
"""
bundledquotes = Path.joinpath(data_manager.bundled_data_path(self),
self.DATAFILENAME)
customquotes = Path.joinpath(data_manager.cog_data_path(self),
self.CUSTOMFILENAME)
with open(bundledquotes, "r", encoding="utf8") as source:
quotes = yaml.safe_load(source)
if customquotes.exists():
with open(customquotes, "r", encoding="utf8") as source:
extraquotes = yaml.safe_load(source)
quotes = quotes + extraquotes
return random.choice(quotes)
@staticmethod
def replace_pronouns(sentence):
sentence = re.sub(r"\b([Hh])is\b", r"\1er", sentence)
sentence = re.sub(r"\bhe\b", "she", sentence)
sentence = re.sub(r"\bHe\b", "She", sentence)
sentence = re.sub(r"\b([Hh])im\b", r"\1er", sentence)
return sentence
@staticmethod
def replace_name(sentence, name):
sentence = re.sub(r"\bStig\b", name, sentence)
return sentence
def build_embed(self, quote):
"""
Builds an embed message based on the provided quote.
"""
embed = discord.Embed(
title='The Stig',
colour=discord.Colour.from_rgb(0, 0, 245),
description=quote)
embed.set_thumbnail(url="https://github.com/TheDubliner/RedArmy-Cogs/"
"blob/master/stig/data/stig.jpeg?raw=true")
return embed
| true
|
01bef5c36db1d849c01f5202669baf49cf430f49
|
Python
|
chiragjindal/Competitive-programming
|
/Codechef/APRIL12/DUMPLING.py
|
UTF-8
| 278
| 2.75
| 3
|
[] |
no_license
|
#import psyco
#psyco.full()
def gcd(a,b):
while b>0:
a,b=b,a%b
return a
for i in range(input()):
a,b,c,d,k=raw_input().split(' ')
g1,g2=gcd(int(a),int(b)),gcd(int(c),int(d))
lcm=(g1*g2)//gcd(g1,g2)
positions=int(k)//lcm
print (positions*2)+1
| true
|
9f03c323c4617f000ed4e8f98be04dd18a629f35
|
Python
|
sven91swe/CarND-AdvancedLaneLines
|
/code/development.py
|
UTF-8
| 5,871
| 2.71875
| 3
|
[] |
no_license
|
import cv2
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
from undistort import undistort
from transformPerspective import transformPerspective, inverseTransformPerspective
from imageFilters import threshold, sobelX, sobelY, sobelDirection, sobelMagnitude, toHLS
from findLines import findLineAndPlot
pathToTestImages = "../test_images"
outputFolder = "../output_images"
listTestImages = os.listdir(pathToTestImages)
image = cv2.imread("../camera_cal/calibration1.jpg")
undistortedImage = undistort(image)
cv2.imwrite(outputFolder + "/" + "undist-chessboard.jpg", undistortedImage)
for imageName in listTestImages:
#image = cv2.imread(pathToTestImages + "/" + imageName)
image = mpimg.imread(pathToTestImages + "/" + imageName)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
pathAndNameForOutput = outputFolder + "/"
nameWithoutFormat = imageName.split(".")[0]
cv2.imwrite(pathAndNameForOutput + "original-" + imageName, image)
undistortedImage = undistort(image)
cv2.imwrite(pathAndNameForOutput + "undist-" + imageName, undistortedImage)
transformed = transformPerspective(undistortedImage)
cv2.imwrite(pathAndNameForOutput + "transformed-" + imageName, transformed)
scaledSobelX = sobelX(undistortedImage, k = 9)
cv2.imwrite(pathAndNameForOutput + "sobelX-" + imageName, scaledSobelX)
thresholdSobelX = threshold(scaledSobelX, min = 8, max=100)
#cv2.imwrite(pathAndNameForOutput + "sobelX - threshold - " + imageName, thresholdSobelX)
plt.imsave(pathAndNameForOutput + "sobelX-threshold-" + nameWithoutFormat + ".png", thresholdSobelX,
cmap = 'gray', format='png')
scaledSobelY = sobelY(undistortedImage, k = 9)
cv2.imwrite(pathAndNameForOutput + "sobelY-" + imageName, scaledSobelY)
thresholdSobelY = threshold(scaledSobelY, min = 8, max=100)
plt.imsave(pathAndNameForOutput + "sobelY-threshold-" + nameWithoutFormat + ".png", thresholdSobelY,
cmap = 'gray', format='png')
sobelDir = sobelDirection(undistortedImage, k = 9)
plt.imsave(pathAndNameForOutput + "sobelDirection-" + nameWithoutFormat + ".png", sobelDir,
cmap='gray', format='png')
thresholdSobelDirection = threshold(sobelDir, min=0.7, max=1.3)
plt.imsave(pathAndNameForOutput + "sobelDirection-threshold-" + nameWithoutFormat + ".png", thresholdSobelDirection,
cmap='gray', format='png')
sobelMag = sobelMagnitude(undistortedImage, k = 5)
plt.imsave(pathAndNameForOutput + "sobelMagnitude-" + nameWithoutFormat + ".png", sobelMag,
cmap='gray', format='png')
thresholdSobelMag = threshold(sobelMag, min=12, max=15)
plt.imsave(pathAndNameForOutput + "sobelMagnitude-threshold-" + nameWithoutFormat + ".png", thresholdSobelMag,
cmap='gray', format='png')
H, L, S = toHLS(undistortedImage)
cv2.imwrite(pathAndNameForOutput + "hue-" + imageName, H)
cv2.imwrite(pathAndNameForOutput + "lightness-" + imageName, L)
cv2.imwrite(pathAndNameForOutput + "saturation-" + imageName, S)
H_threshold = threshold(H, min=10, max=30)
plt.imsave(pathAndNameForOutput + "hue-threshold-" + nameWithoutFormat + ".png",
H_threshold,
cmap='gray', format='png')
H_threshold_yellow = threshold(H, min=17, max=30)
plt.imsave(pathAndNameForOutput + "hue_yellow-threshold-" + nameWithoutFormat + ".png",
H_threshold_yellow,
cmap='gray', format='png')
S_threshold = threshold(S, min=100, max=255)
plt.imsave(pathAndNameForOutput + "saturation-threshold-" + nameWithoutFormat + ".png",
S_threshold,
cmap='gray', format='png')
L_threshold = threshold(L, min=100, max=255)
plt.imsave(pathAndNameForOutput + "lightness-threshold-" + nameWithoutFormat + ".png",
L_threshold,
cmap='gray', format='png')
combined = np.zeros_like(thresholdSobelX)
combined[(S_threshold +
H_threshold +
H_threshold_yellow +
thresholdSobelMag +
thresholdSobelDirection +
thresholdSobelY +
thresholdSobelX)>=6] = 1
plt.imsave(pathAndNameForOutput + "combined-" + nameWithoutFormat + ".png",
combined,
cmap='gray', format='png')
combinedAndTransformed = transformPerspective(combined)
plt.imsave(pathAndNameForOutput + "combinedAndTransformed-" + nameWithoutFormat + ".png",
combinedAndTransformed,
cmap='gray', format='png')
print(imageName)
import findLines
findLines.old_left_fit = None
findLines.old_right_fit = None
lines, radius, offMiddleBy = findLineAndPlot(pathAndNameForOutput + "foundLine-" + nameWithoutFormat + ".png", combinedAndTransformed)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = inverseTransformPerspective(lines)
# Combine the result with the original image
result = cv2.addWeighted(undistortedImage, 1, newwarp, 0.3, 0)
cv2.imwrite(pathAndNameForOutput + "resultWithLines-" + imageName, result)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(result, 'Radius: ' + str(radius) + "m", (10, 500), font, 1, (255, 255, 255), 2)
if offMiddleBy < 0:
cv2.putText(result, 'Left of middle by: ' + str(offMiddleBy) + "m", (10, 100), font, 1, (255, 255, 255), 2)
else:
cv2.putText(result, 'Right of middle by: ' + str(offMiddleBy) + "m", (10, 300), font, 1, (255, 255, 255), 2)
convertedRGB = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
plt.imsave(pathAndNameForOutput + "convertedRGB-" + nameWithoutFormat + ".png",
convertedRGB, format='png')
| true
|
aa94564cc3e854f4d61fd37cb7d80d71ba5f3b00
|
Python
|
TangYiChing/A-method-to-discover-combinatorial-variants-within-transcription-factors-associated-with-gene-expres
|
/script/parse_randomSelection_result.py
|
UTF-8
| 6,956
| 2.703125
| 3
|
[] |
no_license
|
"""
Run Wilconsin Ranked Sum for each outliers that passed background models
Report outliers passing Wilconsin paired test (p<0.01)
"""
import os
import sys
import glob
import argparse
import numpy as np
import pandas as pd
import scipy.stats as scistats
def read_as_df(input_path):
"""
Function to read tab-delimited flat file (1st row is header) from imput path.
:param input_path: file name str
:return df: pandas dataframe
"""
if input_path.endswith('.gz'):
df = pd.read_csv(input_path, compression='gzip', header=0, sep='\t', quotechar='"', error_bad_lines=False)
else:
df = pd.read_csv(input_path, sep="\t", header=0)
return df
def has_headers(df, header_list):
"""
Function to check whether the header_list is in file.
:param df: pd.dataframe
:param header_list: list of str
:return True/False
"""
header_in_df = df.columns.tolist()
missing_header = []
for header in header_list:
if not header in header_in_df:
missing_header.append(header)
if len(missing_header) > 0:
print("header should have:{:}, missing={:}".format(header_list, missing_header))
sys.exit(1)
return
def collect_workingFiles(resultFolder, chrList):
# collecting results from all chromsomes
files = glob.glob(resultFolder+'working*.txt')
resultDict = {'chr'+str(chr):[] for chr in range(1,23)}
for f in files:
# retrive chr number
nchr = os.path.basename(f).split('.txt')[0].split('chr')[-1]
# append to dict
if int(nchr) in chrList:
chr_key_str = 'chr' + str(nchr)
resultDict[chr_key_str].append(f)
return resultDict
def chrDict2runDict(baselineDict, tfDict, chrStr, run=100):
runDict = {'run'+str(run+1):[] for run in range(int(run))}
for f in baselineDict[chrStr]:
run_key_str = os.path.basename(f).split('_')[-2].split('.')[-1]
runDict[run_key_str].append(f)
for f in tfDict[chrStr]:
run_key_str = os.path.basename(f).split('_')[-2].split('.')[-1]
runDict[run_key_str].append(f)
return runDict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Parse random fold selection result.")
parser.add_argument("--outlierFile",
required = True,
help = "result file from parse_backgroundmodel.py Must have headers = [adj_p-value]")
parser.add_argument("--baseline_resultFolder",
required = True,
help = "Location to results from random fold selection model. (e.g., ./baseline/randomSelection/)")
parser.add_argument("--tfmodel_resultFolder",
required = True,
help = "Location to results from random fold selection model.")
parser.add_argument("--model_name",
choices = ['tf-binding', 'tf-both', 'tf-regulation'],
help = "Choice of TF-model: [ tf-binding | tf-both | tf-regulation ]")
parser.add_argument("--threshold",
required = False,
type = float,
default = 0.01,
help = "threshold to filter outliers. default: 0.01")
parser.add_argument("--run",
type = int,
help = "the number of files from N runs.")
parser.add_argument("--out_prefix",
nargs = "?",
default = "result",
help = "Prefix of output file.")
args = parser.parse_args()
# initialize
outlierFile = args.outlierFile
baselineFolder = args.baseline_resultFolder
tfFolder = args.tfmodel_resultFolder
model = args.model_name
threshold = args.threshold
runs = args.run
prefix = args.out_prefix
# get significant outliers (those passed background model)
outliers = read_as_df(outlierFile)
has_headers(outliers, ['chr', 'gene', 'genename', 'adj_p-value'])
sig_outliers = outliers.loc[outliers['adj_p-value'] < threshold]
print( 'model={:}, outliers={:}, threshold={:}, significant outliers={:}'.format(model, outliers.shape[0], threshold, sig_outliers.shape[0]) )
# retrieve result from baseline and tfmodel
chrList = list(sig_outliers['chr'].unique())
baseline_fileDict = collect_workingFiles(baselineFolder, chrList)
tfmodel_fileDict = collect_workingFiles(tfFolder, chrList)
# retrieve paired-result from all runs, for each outliers
qualifiedGeneList = []
wilcoxinPvalue = []
for chrom in chrList:
# get chromsome and corresponding run files
chromDf = sig_outliers.loc[sig_outliers['chr']==chrom]
runDict = chrDict2runDict(baseline_fileDict, tfmodel_fileDict, 'chr'+str(chrom), run=100)
# for each gene/outlier, combine run files to calculate Wilconsin ranked sum
if chromDf.shape[0]>0:
print( 'chr={:}, #outliers={:}'.format(chrom, chromDf.shape[0]) )
genes = list( chromDf['gene'].unique() )
baselineR2 = []
tfR2 = []
for i in range(len(genes)):
for key, value in runDict.items(): # loop through all runs
bdf = read_as_df(value[0]) # files from baseline
tdf = read_as_df(value[1]) # files from tfmodel
if bdf.shape[0] > 0 and tdf.shape[0] > 0:
if genes[i] in bdf['gene'].values.tolist() and genes[i] in tdf['gene'].values.tolist():
#print( ' {:}/{:} outlier(s), gene={:}'.format(i+1, len(genes), genes[i]) )
bdf.set_index('gene', inplace=True)
tdf.set_index('gene', inplace=True)
# get R2 from each run
bR2 = bdf.loc[genes[i], 'R2']
tR2 = tdf.loc[genes[i], 'R2']
baselineR2.append(bR2)
tfR2.append(tR2)
# filter by threshold to obtain qualified outlier(s)
if len(baselineR2) > 0 or len(tfR2) >0:
stats, pvalue = scistats.wilcoxon(baselineR2, tfR2)
if pvalue < threshold:
qualifiedGeneList.append(genes[i])
wilcoxinPvalue.append(pvalue)
print( ' gene={:}, #bR2={:}, #tR2={:}, wilcoxin test: p-value={:}'.format(genes[i], len(baselineR2), len(tfR2), pvalue) )
# combine back to outliers table
df = sig_outliers.loc[sig_outliers['gene'].isin(qualifiedGeneList)]
df.loc[:, 'wilcoxon p-value'] = wilcoxinPvalue
print( 'threshold={:}, #pass={:}'.format(threshold, df.shape[0]))
# save to file
df.to_csv(prefix+'.'+model+'.outliers.pass.randomFoldSelection.txt', header=True, index=False, sep="\t")
| true
|
d67fae4939f9f191b16c1abab7f587eccf014c75
|
Python
|
ericzhai918/Python
|
/My_Python_Test/Higher_Order_Function/map_02.py
|
UTF-8
| 216
| 3.265625
| 3
|
[] |
no_license
|
def f(x):
return x * x
l = []
for n in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
l.append(f(n))
print(l)
r = map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])
print(list(r))
s = map(str, [1, 2, 3, 4, 5, 6, 7, 8, 9])
print(list(s))
| true
|
c92f3a1ad062f2fd87a14bb5e5d42d2d2be55d63
|
Python
|
eabdiel/python_playground
|
/Russian Peasant Algorithm - Multi Module Flow/sim_database.Py
|
UTF-8
| 938
| 3.359375
| 3
|
[] |
no_license
|
"""
Simulation of Web App Architecture | Case study based on assignment from
https://www.udemy.com/share/103nlaAEMZdllWTHg=/
Run flow_controller.py to start
"""
import time
def russian(a, b):
x = a;
y = b # Semicolon -> Compound Statement
z = 0 # Acumulator
while x > 0: # While Loop Begins
if x % 2 == 1: z = z + y # Modulo operator
y = y << 1 # Shift Binary over to left
x = x >> 1 # Shift Binary over to right
print("/////////////////////////////")
print("**********Hit DB*************")
print("/////////////////////////////")
return z ## Return Z
def test_russian():
start_time = time.time()
print(russian(357, 16))
print("Russian Algorithm took %f seconds" % (time.time() - start_time))
assert russian(357, 16) == 5712
if __name__ == "__main__":
test_russian()
# End of code - github.com/eabdiel
| true
|
c3f103854b4b9091fb1cc17747b72dd0c1c0559d
|
Python
|
JeetShetty/Blackjack
|
/tests/test_round.py
|
UTF-8
| 9,916
| 2.625
| 3
|
[] |
no_license
|
import unittest
import mock
from blackjack import round
class TestRound(unittest.TestCase):
def setUp(self):
self.mock_time = mock.Mock()
self.mock_shoe = mock.Mock()
self.mock_bankroll = mock.Mock()
def test_deal_hands_dealer_natural(self):
mock_player_input = mock.Mock()
mock_hand_generator = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_two = mock.Mock()
mock_hand_generator.side_effect = [mock_hand_one, mock_hand_two]
mock_hand_one.hand_value.return_value = 21
mock_hand_two.hand_value.return_value = 15
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round.deal_hands(self.mock_shoe, mock_hand_generator)
self.assertEqual(bj_round._dealer_hand, mock_hand_one)
self.assertEqual(bj_round._player_hands, [mock_hand_two])
self.assertTrue(bj_round.dealer_natural)
self.assertFalse(bj_round.player_natural)
def test_deal_hands_player_natural(self):
mock_player_input = mock.Mock()
mock_hand_generator = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_two = mock.Mock()
mock_hand_generator.side_effect = [mock_hand_one, mock_hand_two]
mock_hand_one.hand_value.return_value = 15
mock_hand_two.hand_value.return_value = 21
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round.deal_hands(self.mock_shoe, mock_hand_generator)
self.assertEqual(bj_round._dealer_hand, mock_hand_one)
self.assertEqual(bj_round._player_hands, [mock_hand_two])
self.assertFalse(bj_round.dealer_natural)
self.assertTrue(bj_round.player_natural)
def test_double_down(self):
mock_player_input = mock.Mock()
mock_player_input.bet.return_value = 250
mock_hand = mock.Mock()
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._player_hands = [mock_hand]
bj_round.place_bet()
bj_round._double_down()
self.assertEqual(bj_round._bet, 500)
def test_play_through_split_hand(self):
mock_player_input = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_two = mock.Mock()
mock_hand_three = mock.Mock()
mock_hand_one.split_aces = mock_hand_two.split_aces = (
mock_hand_three.split_aces) = False
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._player_hands = [mock_hand_one]
mock_player_input.action.side_effect = ['sp', 'h', 'st', 'st']
mock_hand_one.split.return_value = mock_hand_two, mock_hand_three
mock_hand_two.hand_value.return_value = 15
mock_hand_three.hand_value.return_value = 15
mock_hand_one.display_hand.return_value = ''
mock_hand_two.display_hand.return_value = ''
mock_hand_three.display_hand.return_value = ''
bj_round._bet = 1
bj_round.play_through_player_hands(self.mock_shoe)
self.assertEqual(bj_round._player_hands,
[mock_hand_two, mock_hand_three])
self.assertEqual(mock_hand_two.hit.call_count, 1)
self.assertFalse(mock_hand_three.hit.called)
def test_play_through_stand(self):
mock_player_input = mock.Mock()
mock_hand = mock.Mock()
mock_hand.split_aces = False
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._player_hands = [mock_hand]
mock_hand.hand_value.return_value = 15
mock_hand.display_hand.return_value = ''
mock_player_input.action.return_value = 'st'
bj_round.play_through_player_hands(self.mock_shoe)
self.assertFalse(mock_hand.hit.called)
self.assertFalse(mock_hand.split.called)
def test_play_through_hit_once_and_bust(self):
mock_player_input = mock.Mock()
mock_hand = mock.Mock()
mock_hand.split_aces = False
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._player_hands = [mock_hand]
mock_hand.hand_value.return_value = 22
mock_hand.display_hand.return_value = ''
mock_player_input.action.return_value = 'h'
bj_round.play_through_player_hands(self.mock_shoe)
self.assertFalse(mock_hand.split.called)
self.assertEqual(mock_hand.hit.call_count, 1)
def test_play_through_split_aces(self):
mock_hand_one = mock.Mock()
mock_hand_two = mock.Mock()
mock_hand_one.split_aces = mock_hand_two.split_aces = True
bj_round = round.Round(self.mock_time, None, self.mock_bankroll)
bj_round._player_hands = [mock_hand_one, mock_hand_two]
bust = bj_round.play_through_player_hands(self.mock_shoe)
self.assertFalse(bust)
mock_hand_one.hit.assert_called_once()
mock_hand_two.hit.assert_called_once()
def increase_hand_value(self, x):
self.hand_value += 5
def test_play_through_dealer_hand_dealer_hits(self):
mock_player_input = mock.Mock()
mock_hand = mock.Mock()
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._dealer_hand = mock_hand
self.hand_value = 15
mock_hand.hand_value.side_effect = lambda: self.hand_value
mock_hand.display_hand.return_value = ''
mock_hand.hit.side_effect = self.increase_hand_value
bj_round.play_through_dealer_hand(self.mock_shoe)
self.assertEqual(mock_hand.hit.call_count, 1)
def test_play_through_dealer_stands(self):
mock_player_input = mock.Mock()
mock_hand = mock.Mock()
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._dealer_hand = mock_hand
mock_hand.hand_value.return_value = 19
mock_hand.display_hand.return_value = ''
bj_round.play_through_dealer_hand(self.mock_shoe)
self.assertFalse(mock_hand.hit.called)
def test_amount_won_player_busts(self):
mock_player_input = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_one.hand_value.return_value = 25
mock_hand_two = mock.Mock()
mock_hand_two.hand_value.return_value = 22
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._dealer_hand = mock_hand_one
bj_round._player_hands = [mock_hand_two]
bj_round._bet = 250
self.assertEqual(bj_round.showdown(), -250)
def test_amount_won_player_natural_no_dealer_natural(self):
mock_player_input = mock.Mock()
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round.player_natural = True
bj_round._bet = 250
self.assertEqual(bj_round.showdown(), 375)
def test_amount_won_player_and_dealer_naturals(self):
mock_player_input = mock.Mock()
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round.player_natural = True
bj_round.dealer_natural = True
bj_round._bet = 250
self.assertEqual(bj_round.showdown(), 0)
def test_amount_won_dealer_busts(self):
mock_player_input = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_one.hand_value.return_value = 25
mock_hand_two = mock.Mock()
mock_hand_two.hand_value.return_value = 15
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._dealer_hand = mock_hand_one
bj_round._player_hands = [mock_hand_two]
bj_round._bet = 250
self.assertEqual(bj_round.showdown(), 250)
def test_amount_won_player_wins(self):
mock_player_input = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_one.hand_value.return_value = 19
mock_hand_two = mock.Mock()
mock_hand_two.hand_value.return_value = 20
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._dealer_hand = mock_hand_one
bj_round._player_hands = [mock_hand_two]
bj_round._bet = 250
self.assertEqual(bj_round.showdown(), 250)
def test_amount_won_dealer_wins(self):
mock_player_input = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_one.hand_value.return_value = 20
mock_hand_two = mock.Mock()
mock_hand_two.hand_value.return_value = 19
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._dealer_hand = mock_hand_one
bj_round._player_hands = [mock_hand_two]
bj_round._bet = 250
self.assertEqual(bj_round.showdown(), -250)
def test_amount_won_two_player_hands(self):
mock_player_input = mock.Mock()
mock_hand_one = mock.Mock()
mock_hand_one.hand_value.return_value = 19
mock_hand_two = mock.Mock()
mock_hand_three = mock.Mock()
mock_hand_two.hand_value.return_value = 20
mock_hand_three.hand_value.return_value = 20
bj_round = round.Round(self.mock_time, mock_player_input,
self.mock_bankroll)
bj_round._dealer_hand = mock_hand_one
bj_round._player_hands = [mock_hand_two, mock_hand_three]
bj_round._bet = 250
self.assertEqual(bj_round.showdown(), 500)
| true
|
69159eed6aa0e9a1b97fbfda47a9d8d5d0658cd7
|
Python
|
0x5eba/Dueling-DQN-SuperMarioBros
|
/environment/frame_stack_env.py
|
UTF-8
| 2,145
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
"""An environment wrapper to stack observations into a tensor."""
from collections import deque
import numpy as np
import gym
class FrameStackEnv(gym.Wrapper):
"""An environment wrapper to stack observations into a tensor."""
def __init__(self, env, k):
""" Stack k last frames.
Returns lazy array, which is much more memory efficient.
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1], shp[2] * k),
dtype=np.uint8
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
"""A memory efficient buffer for frame tensors."""
def __init__(self, frames):
"""
This object ensures that common frames between the observations are
only stored once. It exists purely to optimize memory usage which can
be huge for DQN's 1M frames replay buffers. This object should only be
converted to numpy array before being passed to the model. You'd not
believe how complex the previous solution was.
"""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
| true
|
07566fa9877faf0ed693fee24f643a38d308d6b2
|
Python
|
chance-murphy/national-parks-website-scraper
|
/SI507_project4.py
|
UTF-8
| 4,388
| 3.09375
| 3
|
[] |
no_license
|
import requests, json
from bs4 import BeautifulSoup
from advanced_expiry_caching import Cache
import pandas as pd
import csv
# "crawling" -- generally -- going to all links from a link ... like a spiderweb
# its specific def'n varies, but this is approximately the case in all situations
# and is like what you may want to do in many cases when scraping
######
# A "simple" example (without much fancy functionality or data processing)
# Constants
START_URL = "https://www.nps.gov/"
FILENAME = "national_sites.json"
# So I can use 1 (one) instance of the Cache tool -- just one for my whole program, even though I'll get data from multiple places
PROGRAM_CACHE = Cache(FILENAME)
# assuming constants exist as such
# use a tool to build functionality here
def access_page_data(url):
data = PROGRAM_CACHE.get(url)
if not data:
data = requests.get(url).text
PROGRAM_CACHE.set(url, data) # default here with the Cache.set tool is that it will expire in 7 days, which is probs fine, but something to explore
return data
#######
main_page = access_page_data(START_URL)
# explore... find that there's a <ul> with class 'topics' and I want the links at each list item...
# I've cached this so I can do work on it a bunch
main_soup = BeautifulSoup(main_page, features="html.parser")
# print(main_soup.prettify())
dropdown_list = main_soup.find('ul',{'class':'dropdown-menu SearchBar-keywordSearch'})
# print(dropdown_list) # cool
# for each list item in the unordered list, I want to capture -- and CACHE so I only get it 1 time this week --
# the data at each URL in the list...
states_links = dropdown_list.find_all('a')
# print(states_links) # cool
# Get a list of possible locations for later
# site_states = []
# for i in states_links[:1]:
# state = i.text
# site_states.append(state)
# print(site_locations)
# Debugging/thinking code:
# #
# for i in states_links:
# print(i['href'])
# Just text! I'm not going back to the internet at all anymore since I cached the main page the first time
# This is stuff ^ I'd eventually clean up, but probably not at first as I work through this problem.
states_pages = [] # gotta get all the data in BeautifulSoup objects to work with...
site_states = []
site_names = []
site_types = []
site_descriptions = []
site_locations = []
for i in states_links:
page_data = access_page_data(START_URL + i['href'])
soup_of_page = BeautifulSoup(page_data, features="html.parser")
# print(soup_of_page.prettify())
states_pages.append(soup_of_page)
# print(states_pages[0].prettify())
for state in states_pages:
site_state = state.find("h1", class_ = "page-title")
parks_list = state.find_all("div", class_ = "col-md-9 col-sm-9 col-xs-12 table-cell list_left")
#Get the states
for i in parks_list:
site_states.append(site_state.text)
#Get name of each site
for i in parks_list:
name = i.h3.text
if name == '':
name = 'N/A'
site_names.append(name)
#Type of each site
for i in parks_list:
type = i.h2.text
if type == '':
type = 'N/A'
site_types.append(type)
#Get Description of each site
for i in parks_list:
description = i.p.text
if description == '':
description = 'N/A'
site_descriptions.append(description)
#Get location of each site
for i in parks_list:
location = i.h4.text
if location == '':
location = 'N/A'
site_locations.append(location)
# print(site_states)
# print(site_names)
# print(site_types)
# print(site_descriptions)
# print(site_locations)
# print(len(site_states))
# print(len(site_names))
# print(len(site_types))
# print(len(site_descriptions))
# print(len(site_locations))
site_info = pd.DataFrame({'State': site_states,
'Location': site_locations,
'Name': site_names,
'Type': site_types,
'Description': site_descriptions,
})
# print(site_info.info())
csv_file_name = "national_sites.csv"
site_info.to_csv(csv_file_name)
##################
# START IGNORING #
##################
# #Location of each site
# site_location = []
# for i in parks_list:
# location = i.h4.text
# site_location.append(location)
# # print(site_location)
| true
|
74e9391f8214ef30216a35cd7eeebc8cd191bed9
|
Python
|
caideyang/python2018
|
/Python全栈学习/第二模块 函数、装饰器、迭代器、内置方法/practise/map-test.py
|
UTF-8
| 119
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/6 15:57
L = [1,2,3,4,5]
l = map(lambda x:x**2,L)
print(list(l))
| true
|
654ed1d6cd8c2baffb6837df0179c95a3c0a24bd
|
Python
|
scress78/Module7Try2
|
/sort_and_search_list.py
|
UTF-8
| 652
| 3.984375
| 4
|
[] |
no_license
|
"""
Program: sort_and_search_list.py
Author: Spencer Cress
Date: 06/21/2020
This program contains the functions sort_list and search_list for
Search and Sort List Assignment
"""
def sort_list(x):
"""
:parameter x: a list to be sorted
:returns: A sorted list
"""
x.sort()
return x
def search_list(x, y):
"""
:parameter x: a list to be searched
:parameter y: the item to be searched for in the list
:returns: the index of the item in the list, if it is in the list; otherwise -1 if it isn't.
"""
try:
z = x.index(y)
except ValueError:
z = -1
return z
| true
|
aff22dcaf92ac61c374a7ede56a19c8afc39cd13
|
Python
|
murakami10/atc_python
|
/solved/05/abc106_b.py
|
UTF-8
| 575
| 2.9375
| 3
|
[] |
no_license
|
import collections
from typing import Dict
N = int(input())
ans: int = 0
for i in range(1, N + 1):
if i % 2 == 0:
continue
tmp_i: int = i
table: Dict[int, int] = collections.defaultdict(lambda: 0)
for j in range(2, int(pow(i, 0.5)) + 1):
while tmp_i % j == 0:
tmp_i //= j
table[j] += 1
if tmp_i != 1:
table[tmp_i] += 1
count: int = 1
for key, value in table.items():
count *= value + 1
if count == 8:
ans += 1
print(ans)
# https://atcoder.jp/contests/abc106/tasks/abc106_b
| true
|
49160f697f87c3fe4c9ba55b46c0e24e35e5b99c
|
Python
|
mudkip201/distributions
|
/dist/src/dists/chi2.py
|
UTF-8
| 1,222
| 2.640625
| 3
|
[] |
no_license
|
'''
Created on Jul 15, 2017
@author: matthewcowen-green
'''
import dists.Distribution.Distribution as Distribution
import math
import dists.normal.normal as normal
class chi2(Distribution): #Chi-squared
@staticmethod
def random(k):
avg_=0
for _ in range(k):
avg_+=math.pow(normal.random(0,1),2)
return avg_
@staticmethod
def pdf(k,x):
return 1/(math.pow(2,k/2)*math.gamma(k/2))*math.pow(x,k/2-1)*math.exp(-x/2)
@staticmethod
def kurtosis(k):
return 12/k
@staticmethod
def mean(k):
return k
@staticmethod
def median(k):
return k*math.pow(1-2/(9*k),3)
@staticmethod
def mode(k):
return max(k-2,0)
@staticmethod
def variance(k):
return 2*k
@staticmethod
def stddev(k):
return math.sqrt(2*k)
@staticmethod
def skewness(k):
return math.sqrt(8/k)
@staticmethod
def mle(x): #not in eclipse
def mlefunc(args_):
tomin=1
for i in x:
tomin*=chi2.pdf(args_[0],i)
return -tomin
'''
ret=op.differential_evolution(mlefunc,[(0.01,50)]).x.tolist()
return {'k':ret[0]}
'''
| true
|
e1955dd78da70ac6ea18500c927d0f117562e6f7
|
Python
|
leoprover/ltb
|
/leo3ltb/data/problem.py
|
UTF-8
| 3,929
| 2.765625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
from ..tptp.szsStatus import SZS_STATUS
class Problem:
'''
LTB Problem
* filePattern: the pattern of problem files for this problem, usally something like 'Problems/HL400001*.p', * is a placeholder
for the variant of the problem, @see ProblemVariant
* output: the name of the outfile for the problem
* variants: a dictionary of problem variantes for the problem.
Keys are a variant identifiers like '^1', '^3', '1'
Values are ProblemVariant instances
* successfulVariant: first found variant which proves the problem
'''
def __init__(self, filePattern, output):
self.filePattern = filePattern
self.output = output
self.variants = {}
self.successfulVariant = None
self._finished = False
def isSuccessful(self):
'''
Whether a prove for this problem was successful.
'''
return self.successfulVariant is not None
def isFinished(self):
return self._finished
def setFinished(self):
self._finished = True
def getOutfile(self):
'''
Output filename of the problem.
'''
return self.output
def __str__(self):
solvedBy = ''
tried = ''
if self.isSuccessful():
solvedBy = ', solvedBy: {}'.format(self.successfulVariant)
tried = ', {} variants tried'.format(len(self.variants))
return '{}{}{}'.format(
self.filePattern,
solvedBy,
tried,
)
class ProblemVariant:
'''
LTB problem variant.
* problem: the problem the variant belongs to
* variant: the variant identifier
* szsStatus: the szsStatus of the problem variant
* stdout: the stdout of the prove attempt of the problem variant
* stderr: the stdout of the prove attempt of the problem variant
* process: the process which is/was used to prove the problem variant
'''
def __init__(self, problem, *, variant):
self.problem = problem
# back reference
self.problem.variants[variant] = self
self.variant = variant
self.ltbVariant = variant[0:2]
self.szsStatus = 'NotTriedYet'
self.stdout = []
self.stderr = []
self.process = None
def getProblemFile(self):
'''
The accual problem definition file.
FilePattern 'Problems/HL400001*.p' with variant '^3' -> 'Problems/HL400001^3.p'
'''
problemFile = self.problem.filePattern.replace('*', self.ltbVariant)
return problemFile
def getOutfile(self):
'''
Output filename of the problem variant.
'''
outputFile = self.problem.output + self.ltbVariant + '.out'
return outputFile
def getErrfile(self):
'''
Output filename of the problem variant.
'''
outputFile = self.problem.output + self.ltbVariant + '.err'
return outputFile
def isSuccessful(self):
'''
Whether a prove for this problem variant was successful.
'''
return SZS_STATUS.isSuccess(self.szsStatus)
def isScheduled(self):
'''
Whether the problem is scheduled.
'''
if self.schedulerStatus:
return True
return False
def isScheduled(self):
'''
Whether the problem is scheduled.
'''
if self.schedulerStatus:
return True
return False
def __str__(self):
return '{name} [{szsStatus}, {schedulerStatus}, {processState}] {stdout} {stderr}'.format(
name=self.getProblemFile(),
szsStatus=self.szsStatus,
schedulerStatus=self.schedulerStatus if self.schedulerStatus else '-',
processState=self.process.stateStr() if self.process else 'no process',
stdout=self.stdout[-3:],
stderr=self.stderr[-3:],
)
| true
|
f94661e500810391bbc385fd65c9c3da84024cff
|
Python
|
cameronmcphail/RAPID
|
/rapid/robustness/analysis/comparisons.py
|
UTF-8
| 4,736
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
"""Compares robustness values
Contains (1) a function for showing how a different set of scenarios
affects the robustness values and robustness rankings; and (2) a
function for showing how different robustness metrics affects the
robustness values and robustness rankings.
Also contains a helper function for creating basic visualisations of
the effects of scenarios and robustness metrics.
"""
import copy
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
def scenarios_similarity(R):
"""Determines similarity in robustness from multiple scenario sets
Robustness is a function of scenarios, decision alternatives, and
a performance metric. 2 sets of scenarios can lead to a different
calculation of robustness. This function measures the difference
in 2 different ways:
- Relative difference, delta (%); and
- Kendall's Tau-b correlation, tau (unitless, [-1, 1]).
Parameters
----------
R : numpy.ndarray, shape=(m, n)
Robustness values, R, for m decision alternatives
and n scenario sets.
Returns
-------
delta : numpy.ndarray, shape=(n, n)
Average relative difference (%) in robustness for each pair of
scenario sets. i.e. idx [0, 3] would be the relative difference
between scenario sets 0 and 3 (and would be equal to [3, 0])
tau : numpy.ndarray, shape=(n, n)
Kendall's Tau-b correlation for each pair of scenario sets.
i.e. idx [0, 3] would be the correlation between scenario
sets 0 and 3 (and would be equal to [3, 0])
"""
# Get the number of sets of scenarios
n = R.shape[1]
deltas = np.zeros((n, n))
taus = np.zeros((n, n))
for idx_1 in range(n):
for idx_2 in range(idx_1, n):
delta = np.divide(
np.abs(R[:, idx_1] - R[:, idx_2]),
(np.abs(R[:, idx_1] + R[:, idx_2])) / 2.)
delta = np.average(delta) * 100.0
deltas[idx_1, idx_2] = delta
deltas[idx_2, idx_1] = delta
tau, _ = stats.kendalltau(R[:, idx_1], R[:, idx_2], nan_policy='omit')
taus[idx_1, idx_2] = tau
taus[idx_2, idx_1] = tau
return deltas, taus
def R_metric_similarity(R):
"""Determines similarity in robustness from multiple robustness metrics
2 different robustness metrics can lead to a different
calculation of robustness. This function measures the difference
by using Kendall's Tau-b correlation, tau (unitless, [-1, 1]).
Parameters
----------
R : numpy.ndarray, shape=(m, n)
Robustness values, R, for m decision alternatives
and n robustness metrics.
Returns
-------
tau : numpy.ndarray, shape=(n, n)
Kendall's Tau-b correlation for each pair of robustness metrics.
i.e. idx [0, 3] would be the correlation between R metrics
0 and 3 (and would be equal to [3, 0])
"""
# Get the number of sets of scenarios
n = R.shape[1]
taus = np.zeros((n, n))
for idx_1 in range(n):
for idx_2 in range(idx_1, n):
tau, _ = stats.kendalltau(R[:, idx_1], R[:, idx_2], nan_policy='omit')
taus[idx_1, idx_2] = tau
taus[idx_2, idx_1] = tau
return taus
def delta_plot(delta):
"""A helper fn for plotting the deltas
Plots the deltas as a 2D heatmap grid.
Parameters
----------
delta : numpy.ndarray, shape=(n, n)
Average relative difference (%) in robustness for each pair of
scenario sets. i.e. idx [0, 3] would be the relative difference
between scenario sets 0 and 3 (and would be equal to [3, 0])
"""
ax = plt.subplot(111)
cmap = 'rainbow'
cmap = copy.copy(mpl.cm.get_cmap(cmap))
cmap.set_bad(cmap(0.0))
norm = mpl.colors.LogNorm(vmin=1.0, vmax=100.0)
im = ax.imshow(delta, cmap=cmap, norm=norm)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
plt.show()
def tau_plot(tau):
"""A helper fn for plotting the Kendall's Tau-b values
Plots the tau values as a 2D heatmap grid.
Parameters
----------
tau : numpy.ndarray, shape=(n, n)
Kendall's Tau-b correlation for each pair of scenario sets.
i.e. idx [0, 3] would be the correlation between scenario
sets 0 and 3 (and would be equal to [3, 0])
"""
ax = plt.subplot(111)
im = ax.imshow(tau, cmap='RdBu', vmin=-1.0, vmax=1.0)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
plt.show()
| true
|
324ed4c429db05389bc6764b4aef19248d12f492
|
Python
|
rasake/MPCAS
|
/FFR135 Artificial Neural Networks/Assignment 1/pattern_utilities.py
|
UTF-8
| 565
| 2.75
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 19 13:19:57 2016
@author: Rasmus
"""
import numpy as np
def create_random_pattern(pattern_length):
temp_lst = [np.sign(2*(np.random.rand()-0.5)) for x in range(pattern_length)]
return np.reshape(np.array(temp_lst), [pattern_length, 1])
def store_random_patterns(hopfield_network, nbr_of_patterns):
patterns = [create_random_pattern(hopfield_network._NBR_OF_CELLS) for x in range(nbr_of_patterns)]
for pattern_i in patterns:
hopfield_network.store_pattern(pattern_i)
return patterns
| true
|
c582d85e80fe750df50762b9b11632bafea06ccd
|
Python
|
TuomoNieminen/BrewStat
|
/data/ratebeer_python_old/format_beer_json.py
|
UTF-8
| 1,055
| 3.484375
| 3
|
[] |
no_license
|
import json
# Reads a json file containting beer information, adds missing value tags to all the beers that are
# missing features so that all beers have all features. Saves the new formatted json to output file
def add_missing_value_tags(input_file: str = "first5beers.json", output_file: str ="formatted_beers.json", missing_value_tag: str ="NA") -> None:
with open(input_file, "r") as f:
beer_dict = json.load(f)
all_features = set([feature for beer in beer_dict for feature in beer_dict[beer]])
# Add missing values
for beer in beer_dict:
for item in all_features:
if item not in beer_dict[beer]:
beer_dict[beer][item] = missing_value_tag
# Add beer url to features and convert dictionary of beers to just list of beers
output_list = []
for beer in beer_dict:
beer_dict[beer]["url"] = beer
output_list.append(beer_dict[beer])
with open(output_file, "w") as f:
json.dump(output_list, f)
if __name__ == "__main__":
add_missing_value_tags()
| true
|
e8a942c7afa534bf4de19c9e78dee3bf730f4d94
|
Python
|
igormorgado/nlp
|
/writes/asd.py
|
UTF-8
| 758
| 2.515625
| 3
|
[] |
no_license
|
import numpy as np
S = "Será que hoje vai chover, eu não sei não"
S = S.lower()
S = S.replace(',', '')
S = S.split()
V = list(set(S))
V.sort()
V = dict(zip(V, range(0, len(V))))
n = len(V)
M = np.zeros((4,n), dtype=int)
for k, w in enumerate(S[0:2] + S[3:5]):
i = V[w]
wr = np.zeros(n)
wr[i] = 1
M[k] = wr
# V
# V
# S
# list(set(S))
# list(set(S)).sorted()
# V = list(set(S))
# V
# V.sort()
# V
# for w in S[0:2] + S[3:5]:
# print(V[w])
# S
# S[0:2]
# S[0:2] + S[3:5]
# V
# V = dict(zip(V,range(1,len(V))))
# V
# V = dict(zip(V,range(1,len(V+1))))
# V = "Será que hoje vai chover, eu não sei não"
# V = V.lower()
# V = V.split()
# S
# V
# V = dict(zip(V,range(1,len(V+1))))
# V
# V = dict(zip(V,range(1,len(V)+1)))
# V
# %history
| true
|
4228b5b992f1c5bef25c7ed809deecca1a2f51d6
|
Python
|
JuliaMaria/Algorytmy-Kombinatoryczne
|
/3/Ex2.py
|
UTF-8
| 440
| 3.296875
| 3
|
[] |
no_license
|
import numpy as np
def rank(subset, n):
result = np.repeat(0, n)
for x in range(n):
if x+1 in subset:
result[x] = 1
b = 0
r = 0
for x in range(0,n):
b = (b+result[x])%2
if b == 1:
power = n-(x+1)
r = r + np.power(2, power)
print r
rank([], 3)
rank([3], 3)
rank([2, 3], 3)
rank([2], 3)
rank([1, 2], 3)
rank([1, 2, 3], 3)
rank([1, 3], 3)
rank([1], 3)
| true
|
2f93d29ad388fce9d51c3149bf4cb86d537b6b06
|
Python
|
Csonic90/python_example_PL
|
/lista 7/zad5.py
|
UTF-8
| 316
| 3.34375
| 3
|
[] |
no_license
|
fo = open("p.txt", "r")
s = fo.read()
ls = list(s.split())
element = input('podaj szukane słowo')
iloscElem = ls.count(element)
if iloscElem > 0 :
print('słowo "'
+ element
+'" znajduje sie w szukanym tekscie : '
+ str(iloscElem)+' razy' )
else :
print('slowo nie występujeala')
fo.close()
| true
|
4f497581351344a20f972e7759843ace5f39e93d
|
Python
|
Kolbk17/cs660aia-voip-flood-detect
|
/sip_generator.py
|
UTF-8
| 5,495
| 2.609375
| 3
|
[] |
no_license
|
import random
import graph_pps
import make_sketch as ms
"""
Assigns a specific range of values within the maximum and minumum packets per second to a percent.
The percent represents the number of packets within a range of the normal distribution.
The percents used are: 0.1, 0.5, 1.7, 3.4, 8.2, 13.0, 23.1
"""
def get_distance(min, max, avg, percents):
lst = []
min_range = avg - min
min_set = min_range / 7
max_range = max - avg
max_set = max_range / 7
j = 0
for i in range(0, len(percents)):
lst.append((min + min_set * j, min + min_set * (j + 1)))
j += 1
j = 0
for i in range(0, len(percents)):
lst.append((avg + max_set * j, avg + max_set * (j + 1)))
j += 1
return lst
"""
Uses psuedo random number generators to calculate a value between the minimum and maximum values given,
Then uses a set of probabilities to create a normal distribution of packets per second.
"""
def gen_pps(avg_pack, min_pack=0, max_pack=0):
percents = [0.1, 0.5, 1.7, 3.4, 8.2, 13.0, 23.1]
ranges = get_distance(min_pack, max_pack, avg_pack, percents)
do_pps = True
if min_pack == 0 and max_pack == 0:
do_pps = False
pps = avg_pack
while do_pps:
pps = random.randint(min_pack, max_pack)
percent = random.randrange(0, 231, 1) / 10
loc = 0
for i in range(0, len(percents)):
if percent <= percents[i]:
loc = i
break
if pps < avg_pack:
if pps >= ranges[loc][0] and pps < ranges[loc][1]:
break
elif pps > avg_pack:
if pps > ranges[13 - loc][0] and pps <= ranges[13 - loc][1]:
break
elif pps == avg_pack:
if pps >= ranges[13 - loc][0] and pps <= ranges[13 - loc][1]:
break
return pps
"""
A pseudo packet generator that takes a time second, a csv file to write traces to, the total number of packets to write,
the max, min, and average number of packets to write per second, and a number of attack packets.
First gets the packets per second, while the pps is greater than 0, continue selecting packets for users.
Random numbers are used to mimic network errors, timeouts, and no responses from callee in normal SIP traffic.
Returns the next time second and the number of packets left, so they can be used as parameters for the next iteration.
"""
def gen_packets(timer, tempfile, packets, avg_pack, min_pack=0, max_pack=0, atk=0):
with open(tempfile, 'w+') as f:
f.write('')
pps = gen_pps(avg_pack, min_pack, max_pack)
total = pps
timer += 1
packets -= pps
while pps > 0:
sipid = get_sipid()
accept = False
gen_inv(tempfile, timer, sipid)
pps -= 1
for i in range(0, 3):
if random.random() < .5:
gen_inv(tempfile, timer, sipid)
pps -= 1
for i in range(0, 2):
if random.random() < .7:
gen_acc(tempfile, timer, sipid)
pps -= 1
accept = True
if(accept == True):
gen_ack(tempfile, timer, sipid)
pps -= 1
for i in range(0, 2):
if random.random() < .5:
gen_ack(tempfile, timer, sipid)
pps -= 1
gen_bye(tempfile, timer, sipid)
pps -= 1
if atk > 0:
atk_sipid = get_sipid(True)
count = random.randint(int(atk/2), atk)
total += count
for i in range(0, count):
gen_inv(tempfile, timer, atk_sipid)
with open('pps.txt', 'a') as f:
f.write(str(total) + '\n')
return (timer, packets)
"""
Set of functions used to write a single packet to a csv file of traces
"""
def gen_inv(tempfile, timer, sipid):
with open(tempfile, 'a') as f:
f.write(str(timer) + '\t' + str(sipid) + '\tINV\n')
def gen_acc(tempfile, timer, sipid):
with open(tempfile, 'a') as f:
f.write(str(timer) + '\t' + str(sipid) + '\tACC\n')
def gen_ack(tempfile, timer, sipid):
with open(tempfile, 'a') as f:
f.write(str(timer) + '\t' + str(sipid) + '\tACK\n')
def gen_bye(tempfile, timer, sipid):
with open(tempfile, 'a') as f:
f.write(str(timer) + '\t' + str(sipid) + '\tBYE\n')
"""
Generates a string in the standard SIP ID form of 'name@example.com' by randomly selecting a name, a set of numbers,
and a hostname from the defined arrays.
"""
def get_sipid(attack=False):
names = ['alice', 'bob', 'carol', 'dave', 'evan', 'fin', 'gale', 'hana', 'ingres', 'jake', 'kely', 'lilly', 'mark',
'nora', 'ozpin', 'pira', 'quest', 'rose', 'stella', 'tim', 'umbra', 'vic', 'will', 'xeno', 'yarl', 'zest']
extentions = ['@gmail.com', '@yahoo.com', '@skype.com', '@discord.com', '@hotmail.com', '@sketchyvoip.net']
n = random.randint(0, len(names)-1)
e = random.randint(0, len(extentions)-1)
sipid = str(names[n])
for i in range(0, 4):
if random.random() > .7:
sipid += str(random.randint(0, 9))
if attack == True:
sipid += '@attackers_haven.tor'
else:
sipid += str(extentions[e])
return sipid
"""
Sets an array of time seconds of when an attack will occure based on a maximum value and an interval
"""
def set_attack_seconds(max, interval):
attacks = []
i = interval
while i < max:
attacks.append(i)
i += interval
return attacks
| true
|
06d5e97a8ac55f266c525989bba877831f8c48a5
|
Python
|
Chelton-dev/ICTPRG-Python
|
/file10readnum.py
|
UTF-8
| 215
| 3.671875
| 4
|
[] |
no_license
|
infile = open('numbers2.txt')
num1 = int(infile.readline())
num2 = int(infile.readline())
num3 = int(infile.readline())
infile.close()
total = num1+num2+num3
print("numbers: ", num1,num2,num3)
print("total: ",total)
| true
|
ee2e020fd78a07716d0f6566181e5fee5426cf40
|
Python
|
RuiSONG1117/SD201
|
/kNN.py
|
UTF-8
| 3,130
| 2.984375
| 3
|
[] |
no_license
|
# coding: utf-8
# In[178]:
import os
import sys
import random
os.chdir("/Users/songsophie/Documents/SD/SD201 DataMining/TP2/data")
import sklearn as sk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
def splitFileName(fileNames, labels):
trainingFileNames = []
trainingLabels = []
testFileNames = []
testLabels = []
if(len(fileNames)<>len(labels)):
print("Dimension wrong")
return [],[],[],[]
for i in range(0, len(fileNames)):
k = random.random()
if (k < 0.6667):
trainingFileNames.append(fileNames[i])
trainingLabels.append(labels[i])
else:
testFileNames.append(fileNames[i])
testLabels.append(labels[i])
return trainingFileNames, trainingLabels, testFileNames, testLabels
FileNames = ["apple1.txt","apple2.txt","apple3.txt","apple4.txt","apple5.txt",
"apple6.txt","apple7.txt","apple8.txt","apple9.txt","apple10.txt",
"apple11.txt","apple12.txt","apple13.txt","apple14.txt","apple15.txt"]
Labels = [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1]
trainingFileNames, trainingLabels, testFileNames, testLabels = splitFileName(FileNames, Labels)
print trainingFileNames, trainingLabels, testFileNames, testLabels
# read the text of training documents
trainingFiles = []
# the i th training file
for i in range(0, len(trainingFileNames)):
string = ""
of = open(trainingFileNames[i], 'r')
string = of.read().strip()
#for line in of:
# string += line.strip()
trainingFiles.append(string)
#print trainingFiles[1]
#read the text of test documents
testFiles = []
for j in range(0, len(testFileNames)):
string1 = ""
of = open(testFileNames[j], 'r')
string1 = of.read().strip()
#for line1 in of:
# string1 += line1.strip()
testFiles.append(string1)
count_vect = CountVectorizer(stop_words = 'english')
# convert training and test data to matrix
X_train_counts = count_vect.fit_transform(trainingFiles)
training = X_train_counts.toarray()
trainingCl = np.array(trainingLabels)
X_test_counts = count_vect.transform(testFiles)
test = X_test_counts.toarray()
testCl = np.array(testLabels)
print("Results using count_vect:")
for k in range(1,5):
neigh = KNeighborsClassifier(n_neighbors=k)
neigh.fit(training,trainingCl)
print("neighbors: %d, score: %f" %(k, neigh.score(test, testCl)))
# improve the classifier with tf-idf representation
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
training_tfidf = X_train_tfidf.toarray()
trainingCl_tfidf = np.array(trainingLabels)
X_test_tfidf = tfidf_transformer.transform(X_test_counts)
test_tfidf = X_test_tfidf.toarray()
testCl_tfidf = np.array(testLabels)
print("Results using tf-idf:")
for k in range(1,5):
neigh = KNeighborsClassifier(n_neighbors=k)
neigh.fit(training_tfidf,trainingCl_tfidf)
print("neighbors: %d, score: %f" %(k, neigh.score(test_tfidf, testCl_tfidf)))
| true
|
b0f1dbd50ee77ba589182a299b32b8ca1739f087
|
Python
|
janewjy/Leetcode
|
/UniqueBinarySearchTrees.py
|
UTF-8
| 803
| 3.359375
| 3
|
[] |
no_license
|
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
subtree = {0:1, 1:1,2:2}
for i in xrange(1,n+1):
if i not in subtree:
print i
num = 0
for j in range(i):
num += subtree[i-1-j] * subtree[j]
subtree[i] = num
return subtree[n]
a = Solution()
print a.numTrees(7)
# 2-6
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
dp = [0]*(n+1)
dp[0] = 1
dp[1] = 1
for i in xrange(2,n+1):
for j in xrange(i):
dp[i] += dp[j]*dp[i-j-1]
return dp[n]
| true
|
69f8f2afdc30b7b377eaf9dc2cb81154d0090048
|
Python
|
AlaixComet/AnnotationSlamPTut
|
/traitement donnees/randomizeList.py
|
UTF-8
| 1,181
| 3.421875
| 3
|
[] |
no_license
|
from random import randint
"""
file used to generate random list of texts
"""
def randommizeList(l):
"""
with a list l return randomized list l2
"""
l2 = []
if len(l) == 0 :
raise Exception("list can't be empty")
while(len(l)>0) :
if len(l) == 1 :
l2.append(l.pop())
return l2
else :
l2.append(l.pop(randint(0,len(l)-1)))
def chooseOneFromList(l) :
"""
with a list l return a random value
"""
if len(l) == 0 :
raise Exception("list can't be empty","list len = "+str(len(l)),l)
return l.pop(randint(0,len(l)-1))
temoin = ["volley","concours"]
text1 = ["nord","florence"]
text2 = ["sauveur","provocation"]
with open("Liste_Textes_Passation.txt",'w',encoding="utf-8") as csvFile :
csvFile.write("Id Annotateur,Texte 1,Texte 2,Texte 3\n")
for i in range(0,150):
l = []
l.append(chooseOneFromList(temoin.copy()))
l.append(chooseOneFromList(text1.copy()))
l.append(chooseOneFromList(text2.copy()))
finalL = randommizeList(l.copy())
csvFile.write(","+str(finalL[0])+","+str(finalL[1])+","+str(finalL[2])+"\n")
| true
|
a9ec96f9620c5e8275f3a8a9fbb5c82f9fb20028
|
Python
|
digitalWestie/curlybot
|
/curlybot.py
|
UTF-8
| 3,412
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
import os
import time
import json
from slackclient import SlackClient
import pycurl
from io import BytesIO
# curlybot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
AT_BOT = "<@" + BOT_ID + ">"
# instantiate Slack & Twilio clients
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
HOST = 'https://api.github.com'
PATH = '/'
#'users/digitalWestie/repos'
def index_cmd(resource):
result = make_request(resource)
if (result == None):
return "Sorry! Couldn't return a list of " + resource + "."
else:
return "Here's a list of "+resource+" you wanted: \n```\n"+result+"\n```"
def api_info_cmd():
return "Here's some details:\nENDPOINT: "+HOST+PATH
def quit_cmd():
slack_client.api_call("chat.postMessage", channel=channel, text="Bye!", as_user=True)
exit()
FULLCOMMANDS = {
"tell me about the API you're using": api_info_cmd,
"what API?": api_info_cmd,
"config info?": api_info_cmd,
"that'll do pig": quit_cmd,
"disconnect": quit_cmd
}
PARAMCOMMANDS = {
"list all the ": index_cmd,
"give me all the ": index_cmd,
"give me a list of the ": index_cmd
}
#IDEA: split at the first 'the' to find the object
def make_request(resource):
url = HOST+PATH+resource
print 'Making a request to ' + url
data = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEFUNCTION, data.write)
c.perform()
c.close()
r = data.getvalue()
data.close()
try:
jsondata = json.loads(r)
return json.dumps(jsondata, sort_keys=True, indent=2, separators=(',', ': '))
except:
print "couldn't parse json from request to " + url
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
#command = command.lower()
command = command.encode('utf-8')
response = "Not sure what you mean. Use the *list all the " + \
"* command with a resource, delimited by spaces."
for cmd in PARAMCOMMANDS.keys():
if command.startswith(cmd.lower()):
split = command.split(cmd)
response = PARAMCOMMANDS[cmd](split[1])
for cmd in FULLCOMMANDS.keys():
if command == cmd.lower():
response = FULLCOMMANDS[cmd]()
slack_client.api_call("chat.postMessage", channel=channel, text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("CurlyBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| true
|
63ea8b7bff029f6829f902e9843e9faa6715145d
|
Python
|
antarcticalgebra/antarcticalgebra
|
/play_level.py
|
UTF-8
| 4,933
| 3.109375
| 3
|
[] |
no_license
|
#! /usr/bin/env python
import pygame
import random
import time
from equation import Equation
class play_level:
def draw(self, level, event):
self.__screen.fill([0, 0, 0])
pygame.font.init()
font = pygame.font.Font(None, 100)
ren = font.render("This is level " + str(level), 1, [0, 255, 0])
square_count_text = font.render("Squares caught: " + str(self.__square_count), 1, [0, 255, 0])
eq_text = font.render(self.__eq, 1, [0, 255, 0])
self.__screen.blit(ren, [25, 50])
self.__screen.blit(square_count_text, [25, 200])
self.__screen.blit(eq_text, [25, 350])
self.__see_saw.draw()
self.__cur_time = time.time()
if (self.__cur_time - self.__start_time) < self.__drop_time:
if self.__square.get_x() > self.__see_saw.get_upper_left_corner()[0] and self.__square.get_x() < self.__see_saw.get_upper_right_corner()[0]:
if self.__square.get_height_constraint() < self.__see_saw.get_upper_left_corner()[1] and self.__square.get_height_constraint() > self.__see_saw.get_lower_right_corner()[1]:
self.__square_count += 1
self.__generate_square()
self.__square.draw((self.__cur_time - self.__start_time), self.__drop_time, self.__drop_height)
else:
self.__generate_square()
pygame.display.flip()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
self.__see_saw.move_right(True)
if event.key == pygame.K_LEFT:
self.__see_saw.move_left(True)
if event.key == pygame.K_RETURN:
return 0
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
self.__see_saw.move_right(False)
if event.key == pygame.K_LEFT:
self.__see_saw.move_left(False)
return level
def __generate_square(self):
self.__square = Droppable_Square(self.__screen)
self.__start_time = time.time()
def __init__(self, screen):
self.__screen = screen
self.__seesaw_gap = 10
self.__see_saw = See_Saw(self.__screen, self.__seesaw_gap)
self.__start_time = time.time()
self.__cur_time = None
self.__drop_time = 5 #in seconds
self.__square = Droppable_Square(self.__screen)
self.__drop_height = self.__screen.get_height() - (self.__see_saw.get_height() + self.__seesaw_gap)
self.__square_count = 0
self.__eq = Equation().generateEquation(2)
class Droppable_Square:
def __init__(self, parent_screen):
colors = [[255,0,0], [0,255,0], [0,0,255]]
self.__color = colors[random.randint(0, len(colors)-1)]
self.__size = 50
self.__x = random.randint(0, parent_screen.get_width() - self.__size)
self.__y = 0
self.__screen = parent_screen
def draw(self, time_elapsed, drop_time, drop_height):
self.__y = (time_elapsed / drop_time) * drop_height
pygame.draw.rect(self.__screen, self.__color, [self.__x, self.__y, self.__size, self.__size])
def get_size(self):
return self.__size
def get_x(self):
return self.__x
def get_y(self):
return self.__y
def get_height_constraint(self):
return self.__y + (self.__size - 10)
class See_Saw:
def __init__(self, parent_screen, gap):
self.__color = [255, 0, 0]
self.__length = 200
self.__height = 25
self.__x = parent_screen.get_width()/2 - self.__length
self.__y = parent_screen.get_height() - (self.__height + gap)
self.__screen = parent_screen
self.__move_right = False
self.__move_left = False
def draw(self):
if (self.__x + self.__length) > self.__screen.get_width():
self.__x = self.__screen.get_width() - self.__length
elif self.__move_right:
self.__x += 5
if self.__x < 0:
self.__x = 0
elif self.__move_left:
self.__x -= 5
pygame.draw.rect(self.__screen, self.__color, [self.__x, self.__y, self.__length, self.__height])
def move_right(self, bool):
self.__move_right = bool
def move_left(self, bool):
self.__move_left = bool
def get_height(self):
return self.__height
def get_upper_left_corner(self):
return [self.__x, self.__y]
def get_upper_right_corner(self):
return [self.__x + self.__length, self.__y]
def get_lower_right_corner(self):
return [self.__x + self.__length, self.__y - self.__height]
def get_lower_left_corner(self):
return [self.__x, self.__y - self.__height]
| true
|
c88c83b2f36cf02bc51971358cf2f88143dd00a4
|
Python
|
dipalpatel77/mypythonpractice
|
/collagechallange.py
|
UTF-8
| 70
| 3.59375
| 4
|
[] |
no_license
|
a=input("enter the number")
n=0
while(a!=0):
n+=a%10
a=a/10
print n
| true
|
27313219c97489391301dae072fa241336b8dfb9
|
Python
|
SweetSnack/unipg-twitter-stream
|
/twitter/listeners.py
|
UTF-8
| 1,371
| 2.53125
| 3
|
[] |
no_license
|
import json
import config
from tweepy import OAuthHandler, API, Stream
from tweepy.streaming import StreamListener
from server import MessageHandler
class StdOutListener(StreamListener):
"""
Handles tweets from the received Twitter stream.
"""
def on_data(self, data):
data = json.loads(data)
if 'entities' in data and 'hashtags' in data.get('entities'):
data_hashtags = data['entities']['hashtags']
for i in data_hashtags:
hashtag = '#{}'.format(i['text'])
if hashtag in config.HASHTAGS:
# sends a websocket message with the appropriate hashtag
for connection in MessageHandler.connections:
connection.write_message(hashtag)
return True
return True
def on_error(self, status):
print(status)
def TwitterListener():
"""
Initializes the twitter listener
"""
l = StdOutListener()
auth = OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)
auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
stream = Stream(auth, l)
api = API(auth_handler=auth)
config.HASHTAGS = [x['name'] for x in api.trends_place(id=44418)[0]['trends']]
print("Stream listener is up and running")
stream.filter(track=config.HASHTAGS)
| true
|
f2b5055d146abfc7020290361e759ab7683da7d6
|
Python
|
Bandwidth/python-sdk
|
/bandwidth/voice/bxml/verbs/start_stream.py
|
UTF-8
| 2,559
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
"""
start_stream.py
Representation of Bandwidth's start stream BXML verb
@copyright Bandwidth INC
"""
from lxml import etree
from .base_verb import AbstractBxmlVerb
START_STREAM_TAG = "StartStream"
class StartStream(AbstractBxmlVerb):
def __init__(self, destination, name=None, tracks=None, streamEventUrl=None, streamEventMethod=None, username=None, password=None, streamParams=None):
"""
Initializes the StartStream class with the following parameters
:param str destination: A websocket URI to send the stream to
:param str name: A name to refer to this stream by
:param str tracks: The part of the call to send a stream from. `inbound`, `outbound` or `both`.
:param str streamEventUrl: URL to send the associated Webhook events to during this stream's lifetime
:param str streamEventMethod: The HTTP method to use for the request to `streamEventUrl`. `GET` or `POST`
:param str username: The username to send in the HTTP request to `streamEventUrl`
:param str password: The password to send in the HTTP request to `streamEventUrl`
"""
self.destination = destination
self.name = name
self.tracks = tracks
self.streamEventUrl = streamEventUrl
self.streamEventMethod = streamEventMethod
self.username = username
self.password = password
self.stream_params = streamParams
def to_etree_element(self):
"""
Converts the class into an etree element. Used for other verb classes to build xml
:return etree.Element: The etree Element representing this class
"""
root = etree.Element(START_STREAM_TAG)
root.set("destination", self.destination)
if self.name is not None:
root.set("name", self.name)
if self.tracks is not None:
root.set("tracks", self.tracks)
if self.streamEventUrl is not None:
root.set("streamEventUrl", self.streamEventUrl)
if self.streamEventMethod is not None:
root.set("streamEventMethod", self.streamEventMethod)
if self.username is not None:
root.set("username", self.username)
if self.password is not None:
root.set("password", self.password)
if self.stream_params is not None:
for stream_param in self.stream_params:
root.append(stream_param.to_etree_element())
return root
def to_bxml(self):
return etree.tostring(self.to_etree_element()).decode()
| true
|
d9b638ef42f0811466dc59195f5d89a5e6178d08
|
Python
|
tomfisher/tradingtool
|
/trash/create_stock_history.py
|
UTF-8
| 786
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
from yahoo_finance import Share
import sys
import json
import csv
# Create the list of stock symbol s
symbols = []
with open('data/companylist.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
header_skip = False
for row in reader:
symbol = row[0][1:-1]
if not header_skip:
header_skip = True
continue
if symbol!='VIIX':
symbols.append(symbol)
# Get the history for each stock symbol
data = {}
for symbol in symbols:
print 'Processing ' + symbol
try:
share = Share(symbol)
data[symbol] = share.get_historical('2013-01-01', '2016-12-31')
except:
continue
# Dump the stock history
output_path = 'data/stock_history.json'
with open(output_path, "wb") as f:
f.write(json.dumps(data, indent=4))
| true
|
4df9b7340aa208f10b929f1e7bc8ad07da768cab
|
Python
|
gurumitts/lumens
|
/lumens/lumens.py
|
UTF-8
| 1,429
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
import RPi.GPIO as GPIO
from random import randint
import logging
from apscheduler.schedulers.background import BackgroundScheduler
scheduler = BackgroundScheduler()
led_pins = {'r': 26, 'b': 20, 'g': 21, 'w': 19}
#set up gpio
print GPIO.VERSION
GPIO.setmode(GPIO.BCM)
class Lumens:
def __init__(self):
logging.getLogger('lumens').info('lumens is starting...')
self.random_mode = False
for led in led_pins:
pin = led_pins[led]
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 1)
scheduler.start()
scheduler.add_job(self._randomize, 'interval', seconds=30)
def toggle(self, led):
pin = led_pins[led]
logging.getLogger('lumens').info('toggle %s %s' % (led, pin))
current_status = GPIO.input(pin)
if current_status == 1:
GPIO.output(pin, 0)
else:
GPIO.output(pin, 1)
def random(self):
if self.random_mode:
self.random_mode = False
else:
self.random_mode = True
logging.getLogger('lumens').info('random mode is %s' % self.random_mode)
def _randomize(self):
if self.random_mode:
self._all_off()
pin = led_pins[led_pins.keys()[randint(0, 3)]]
GPIO.output(pin, 0)
def _all_off(self):
for led in led_pins:
pin = led_pins[led]
GPIO.output(pin, 1)
| true
|
eef88e05a36ee6c3c58708569a13d1f704a716af
|
Python
|
xbliss/photons-core
|
/modules/photons_tile_paint/twinkles.py
|
UTF-8
| 4,508
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
from photons_tile_paint.animation import Animation, coords_for_horizontal_line, Finish
from photons_tile_paint.options import AnimationOptions
from photons_themes.theme import ThemeColor as Color
from photons_themes.canvas import Canvas
from delfick_project.norms import dictobj, sb
import random
# Palettes from https://www.color-hex.com/ (Using HSL)
palettes = {
"twilight": [
(0.65 * 360, 0.53, 0.33, 3500),
(0.76 * 360, 0.42, 0.38, 3500),
(0.79 * 360, 0.42, 0.73, 3500),
],
"summertime": [
(0.11 * 360, 1, 0.65, 3500),
(0.51 * 360, 0.86, 0.38, 3500),
(0.06 * 360, 0.81, 0.54, 3500),
(0.58 * 360, 0.82, 0.27, 3500),
],
"rainbow_dash": [
(0.01 * 360, 0.84, 0.57, 3500),
(0.06 * 360, 0.89, 0.58, 3500),
(0.15 * 360, 0.96, 0.79, 3500),
(0.26 * 360, 0.50, 0.51, 3500),
(0.55 * 360, 0.97, 0.41, 3500),
],
}
class choose_palette(sb.Spec):
def normalise_empty(self, meta):
return None
def normalise_filled(self, meta, val):
val = sb.string_choice_spec(list(palettes)).normalise(meta, val)
return palettes[val]
class TileTwinklesOptions(AnimationOptions):
num_iterations = dictobj.Field(sb.integer_spec, default=-1)
palette = dictobj.Field(choose_palette())
num_twinkles = dictobj.Field(sb.integer_spec, default=20)
fade_in_speed = dictobj.Field(sb.float_spec, default=0.125)
fade_out_speed = dictobj.Field(sb.float_spec, default=0.078)
def final_iteration(self, iteration):
if self.num_iterations == -1:
return False
return self.num_iterations <= iteration
class TwinklesState:
def __init__(self, coords, options):
self.options = options
self.twinkles = {}
self.directions = {}
self.left = coords[0][0][0]
self.right = coords[0][0][0]
self.top = coords[0][0][1]
self.bottom = coords[0][0][1]
for (left, top), (width, height) in coords:
self.left = min(left, self.left)
self.right = max(left + width, self.right)
self.bottom = min(top - height, self.bottom)
self.top = max(top, self.top)
self.place_random(random.randrange(0, self.options.num_twinkles / 2))
def random_coord(self):
left = random.randrange(self.left, self.right)
top = random.randrange(self.bottom, self.top)
return left, top
def place_random(self, amount):
for _ in range(amount):
if self.options.palette:
hue, saturation, brightness, kelvin = random.choice(self.options.palette)
else:
hue = random.randrange(0, 360)
saturation = random.randrange(5, 10) / 10
brightness = random.randrange(1, 10) / 10
kelvin = random.randrange(2500, 9000)
point = self.random_coord()
if point not in self.twinkles:
self.directions[point] = 1 if brightness < 0.6 else 0
self.twinkles[point] = Color(hue, saturation, brightness, kelvin)
def tick(self):
diff = self.options.num_twinkles - len(self.twinkles)
if diff > 0:
self.place_random(random.randrange(0, diff))
for pos, color in list(self.twinkles.items()):
if color.brightness == 0:
del self.twinkles[pos]
for (x, y), color in self.twinkles.items():
if self.directions[(x, y)] == 0:
color.brightness -= self.options.fade_in_speed
if color.brightness < 0:
color.brightness = 0
else:
color.brightness += self.options.fade_out_speed
if color.brightness > 1:
color.brightness = 1
self.directions[(x, y)] = 0
return self
class TileTwinklesAnimation(Animation):
coords = coords_for_horizontal_line
def setup(self):
self.iteration = 0
def next_state(self, prev_state, coords):
if prev_state is None:
return TwinklesState(coords, self.options)
self.iteration += 1
if self.options.final_iteration(self.iteration):
raise Finish("Reached max iterations")
return prev_state.tick()
def make_canvas(self, state, coords):
canvas = Canvas()
for point, color in state.twinkles.items():
canvas[point] = color
return canvas
| true
|
6d59ba5ea4e06d23720d090ebac95bbc65529383
|
Python
|
omar20261/python-examples
|
/Http_Req.py
|
UTF-8
| 206
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import urllib3
def main():
MyUrl="https://www.google.com.eg/";
http = urllib3.PoolManager()
data= http.request('GET',MyUrl);
print(data)
if __name__ == "__main__":main()
| true
|
967284cdbcf86d61d77409e8ca2d97e04443901b
|
Python
|
Pluto-Zmy/Python-OJ
|
/5/2. Prime.py
|
UTF-8
| 196
| 3.84375
| 4
|
[] |
no_license
|
def isPrime(num):
for i in range(2, num):
if num % i == 0:
return False
return True
def primeSum(m, n):
sum = 0
for num in range(m, n + 1):
if isPrime(num):
sum += num
return sum
| true
|
9ff3f79ad99496d030abb533b6e1d9b3bd58cbdd
|
Python
|
rookzeno/kprlibrary
|
/葉からdfs.py
|
UTF-8
| 649
| 2.546875
| 3
|
[] |
no_license
|
import sys
sys.setrecursionlimit(200000)
n,k = map(int,input().split())
a = list(map(int,input().split()))
ans = 0
if a[0] != 1:
a[0] = 1
ans += 1
b = [[]for i in range(n)]
for i in range(n):
b[a[i]-1].append(i)
b[0].remove(0)
huka = 0
kyo = [float("inf")] * n
def dfs(x,y):
kyo[x] = y
for i in b[x]:
dfs(i,y+1)
dfs(0,0)
def dfs2(x,y):
if kyo[x] <=k-y:
return
if y == k-1 and x != 0:
kyo[x] = 0
global ans
ans += 1
return
kyo[x] = 0
dfs2(a[x]-1,y+1)
hukai = []
for i in range(n):
hukai.append([kyo[i],i])
hukai.sort(key = lambda x:-x[0])
for j,i in hukai:
if j <= k:
continue
dfs2(i,0)
print(ans)
| true
|
132e94eb729cc02437a85444f005647c1a940bb6
|
Python
|
bravesoftdz/delphi-epidata
|
/src/acquisition/cdcp/cdc_dropbox_receiver.py
|
UTF-8
| 4,411
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
"""
===============
=== Purpose ===
===============
Downloads CDC page stats stored in Delphi's dropbox.
This program:
1. downloads new files within dropbox:/cdc_page_stats
2. moves the originals to dropbox:/cdc_page_stats/archived_reports
3. zips the downloaded files and moves that to delphi:/common/cdc_stage
4. queues cdc_upload.py, cdc_extract.py, and other scripts to run
See also:
- cdc_upload.py
- cdc_extract.py
"""
# standard library
import argparse
import datetime
from zipfile import ZIP_DEFLATED, ZipFile
# third party
import dropbox
import mysql.connector
# first party
import delphi.operations.secrets as secrets
# location constants
DROPBOX_BASE_DIR = '/cdc_page_stats'
DELPHI_BASE_DIR = '/common/cdc_stage'
def get_timestamp_string():
"""
Return the current local date and time as a string.
The format is "%Y%m%d_%H%M%S".
"""
return datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
def trigger_further_processing():
"""Add CDCP processing scripts to the Automation run queue."""
# connect
u, p = secrets.db.auto
cnx = mysql.connector.connect(user=u, password=p, database='automation')
cur = cnx.cursor()
# add step "Process CDCP Data" to queue
cur.execute('CALL automation.RunStep(46)')
# disconnect
cur.close()
cnx.commit()
cnx.close()
def fetch_data():
"""
Check for new files on dropbox, download them, zip them, cleanup dropbox, and
trigger further processing of new data.
"""
# initialize dropbox api
dbx = dropbox.Dropbox(secrets.cdcp.dropbox_token)
# look for new CDC data files
print('checking dropbox:%s' % DROPBOX_BASE_DIR)
save_list = []
for entry in dbx.files_list_folder(DROPBOX_BASE_DIR).entries:
name = entry.name
if name.endswith('.csv') or name.endswith('.zip'):
print(' download "%s"' % name)
save_list.append(name)
else:
print(' skip "%s"' % name)
# determine if there's anything to be done
if len(save_list) == 0:
print('did not find any new data files')
return
# download new files, saving them inside of a new zip file
timestamp = get_timestamp_string()
zip_path = '%s/dropbox_%s.zip' % (DELPHI_BASE_DIR, timestamp)
print('downloading into delphi:%s' % zip_path)
with ZipFile(zip_path, 'w', ZIP_DEFLATED) as zf:
for name in save_list:
# location of the file on dropbox
dropbox_path = '%s/%s' % (DROPBOX_BASE_DIR, name)
print(' %s' % dropbox_path)
# start the download
meta, resp = dbx.files_download(dropbox_path)
# check status and length
if resp.status_code != 200:
raise Exception(['resp.status_code', resp.status_code])
dropbox_len = meta.size
print(' need %d bytes...' % dropbox_len)
content_len = int(resp.headers.get('Content-Length', -1))
if dropbox_len != content_len:
info = ['dropbox_len', dropbox_len, 'content_len', content_len]
raise Exception(info)
# finish the download, holding the data in this variable
filedata = resp.content
# check the length again
payload_len = len(filedata)
print(' downloaded')
if dropbox_len != payload_len:
info = ['dropbox_len', dropbox_len, 'payload_len', payload_len]
raise Exception(info)
# add the downloaded file to the zip file
zf.writestr(name, filedata)
print(' added')
# At this point, all the data is stored and awaiting further processing on
# the delphi server.
print('saved all new data in %s' % zip_path)
# on dropbox, archive downloaded files so they won't be downloaded again
archive_dir = 'archived_reports/processed_%s' % timestamp
print('archiving files...')
for name in save_list:
# source and destination
dropbox_src = '%s/%s' % (DROPBOX_BASE_DIR, name)
dropbox_dst = '%s/%s/%s' % (DROPBOX_BASE_DIR, archive_dir, name)
print(' "%s" -> "%s"' % (dropbox_src, dropbox_dst))
# move the file
meta = dbx.files_move(dropbox_src, dropbox_dst)
# sanity check
if archive_dir not in meta.path_lower:
raise Exception('failed to move "%s"' % name)
# finally, trigger the usual processing flow
print('triggering processing flow')
trigger_further_processing()
print('done')
def main():
# args and usage
parser = argparse.ArgumentParser()
args = parser.parse_args()
# fetch new data
fetch_data()
if __name__ == '__main__':
main()
| true
|
37b14e5317ef55843148ef4abf807fad94391660
|
Python
|
FedericoBaron/my-portfolio
|
/cop3223H/Code/turtleeee.py
|
UTF-8
| 112
| 2.6875
| 3
|
[] |
permissive
|
import turtle
turtle.fd(50)
turtle.lt(90)
turtle.fd(50)
turtle.lt(90)
turtle.fd(50)
turtle.lt(90)
turtle.fd(50)
| true
|
a1d57bec8e0f2b7fdd6df0305c650d438f7be081
|
Python
|
Cenation2812/Python-projects
|
/Ladder.py
|
UTF-8
| 147
| 3.109375
| 3
|
[] |
no_license
|
n=int(input())
l=n*2+2
length=n+l
for i in range(1,length+1):
if i%3==0:
print("*****")
else:
print("* *")
| true
|
df6d53a56f9cac59357a42f50b79bb03b4cbba6c
|
Python
|
Hott-J/Preparing-Programming-Interviews
|
/[03]전화 예비 면접/중첩 괄호.py
|
UTF-8
| 378
| 3.765625
| 4
|
[] |
no_license
|
#괄호가 제대로 중첩되었는지 판단
s="(())"
s1="()()"
s2="(()()"
s3=")("
flag=True
def solution(s):
#flag=True
cnt=0
for i in range(len(s)):
if s[i]=="(":
cnt+=1
else:
cnt-=1
if cnt<0:
return False
if cnt==0:
return True
return False
print(solution(s3))
| true
|
09f4b19824eeea29ad339245ef1adf81e7376b21
|
Python
|
Adelina360/Python_Projects
|
/Problem Solver/Problem Solver.py
|
UTF-8
| 925
| 3.734375
| 4
|
[] |
no_license
|
# People have many problems but they can solver their problems with this
print('WRITE ALL WITH LOWERCASE')
def funcion():
word = input('Your problem: ')
funcion()
def funcion_2():
word_2 = input('Cause of the problem: ')
funcion_2()
def funcion_3():
word_3 = input('Solution: ')
if word_3 == 'yes':
input('Which one: ')
input('Resources for the solution: ')
print('Your problem have solution. Congratulations!')
elif word_3 == 'no':
print('Let it happen.')
else:
word_3 = input('Please input yes or no: ')
if word_3 == 'yes':
input('Which one: ')
input('Resources for the solution: ')
print('Your problem have solution. Congratulations! You can!')
funcion_4()
elif word_3 == 'no':
print('Let it happen.')
funcion_3()
| true
|
b8c03bd29ef349eb0b876b4634032b19e8ad0ced
|
Python
|
afilipch/nrlbio
|
/tasks/targets_ligated_not_to_perfect.py
|
UTF-8
| 2,135
| 2.546875
| 3
|
[] |
no_license
|
#! /usr/lib/python
'''Script answers to the question: How many clusters have a perfect seed match for one of the top N expressed miRNAs families, but were actually found ligated to another miRNA?'''
import argparse
import os
import sys
from pybedtools import BedTool
from collections import defaultdict
from nrlbio.mirna import fasta2mirnas, assign_expression, mirnas2families, find_family
from nrlbio.pyplot_extension import histogram
parser = argparse.ArgumentParser(description='Script answers to the question: How many clusters have a perfect seed match for one of the top N expressed miRNAs families, but were actually found ligated to another miRNA?');
parser.add_argument('path', metavar = 'N', nargs = '?', type = str, help = "path to interaction.bed file");
parser.add_argument('--mir', nargs = '?', required=True, type = str, help = "path to the file with miRNAs in fasta format");
parser.add_argument('--expr', nargs = '?', required=True, type = str, help = "path to the expression file (mirid expression tsv format)");
args = parser.parse_args();
seed_start = 1;
seed_stop = 7
def bound2other(mirid, families, tseq):
lfam=find_family(mirid, families)
for fam in families:
if(fam!=lfam and fam.match in tseq):
return 1;
else:
return 0;
mirdict = fasta2mirnas(args.mir, seed_start, seed_stop);
assign_expression(args.expr, mirdict, sep="\t");
families = mirnas2families(mirdict.values())
families.sort(key=lambda x: x.expression, reverse = True)
result = defaultdict(int);
total_int=0;
for interval in BedTool(args.path):
total_int += 1;
mirid, tseq = interval[6].split(",")[0], interval[8];
for i in range(1,11):
result[i] += bound2other(mirid, families[:i], tseq)
histogram(result, title='interactions that have perfect seed match to another miRNA from top expressed families', ylabel='number of interactions(total %d)' % total_int, xlabel='number of top expressed families', xticks=range(1, 11), xticklabels=None, xticksrotation = 0, output='targets_ligated_not_to_perfect.pdf', color='skyblue', align=u'left', rwidth=0.5)
#if(not find_family(mirid, families)):
#print mirid;
| true
|
7ea1202d8394ba17b4431990a0565eb57db7d169
|
Python
|
dlrgy22/Boostcamp
|
/2주차/2021.01.25/예제/vector.py
|
UTF-8
| 647
| 3.4375
| 3
|
[] |
no_license
|
import numpy as np
def l1_norm(x):
x_norm = np.abs(x)
x_norm = np.sum(x_norm)
return x_norm
def l2_norm(x):
x_norm = x*x
x_norm = np.sum(x_norm)
x_norm = np.sqrt(x_norm)
return x_norm
def angle(x, y):
v = np.inner(x, y) / (l2_norm(x) * l2_norm(y))
theta = np.arccos(v)
return theta
x = np.array([0, 1])
y = np.array([0, 2])
print(angle(x, y))
print(np.inner([1, -1, 1, -1], [4, -4, 4, -4]))
x = np.array([[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
print(np.linalg.inv(x))
x = np.array([[1, 0, 1],
[0, 1, 0]])
print(np.linalg.pinv(x))
x= np.array([[1, 2],
[3, 4]])
y = np.array([[2, 4]])
print(x + y)
| true
|
4edc5ce01e03f5f30d81b63477f8485b618d9870
|
Python
|
enessitki/wikiHow
|
/qt-modelviewcontroller-example/classes/Views.py
|
UTF-8
| 794
| 2.71875
| 3
|
[] |
no_license
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
class FilesView(QWidget):
def __init__(self, parent=None):
super(FilesView, self).__init__(parent=parent)
self.scanButton = QPushButton("Scan")
self.filesLabel = QLabel("")
self.deleteButton = QPushButton("Delete")
layout = QHBoxLayout()
layout.addWidget(self.scanButton)
layout.addWidget(self.filesLabel)
layout.addWidget(self.deleteButton)
self.setLayout(layout)
def update_view(self, files_array):
self.filesLabel.setText(str(files_array).replace(" ", "").replace("]", "").replace("[", "").replace(")", "\n").replace("(", "").replace("'", ""))
def clear(self):
self.filesLabel.setText("")
| true
|
e5c3fbda41fab0f9e7d8979145a580ee414c1d89
|
Python
|
nakagawaneal/my-new-repo1
|
/test_employee.py
|
UTF-8
| 4,150
| 3.109375
| 3
|
[] |
no_license
|
import unittest
from employee import Employee
class TestEmployee(unittest.TestCase):
def test_email(self): #we're creating 2 employees
emp_1 = Employee('Corey', 'Schafer', 50000)
emp_2 = Employee('Sue', 'Smith', 60000)
self.assertEqual(emp_1.email, 'Corey.Schafer@email.com')
self.assertEqual(emp_2.email, 'Sue.Smith@email.com')
emp_1.first = 'John'
emp_2.first = 'Jane'
self.assertEqual(emp_1.email, 'John.Schafer@email.com')
self.assertEqual(emp_2.email, 'Jane.Smith@email.com')
def test_fullname(self):
emp_1 = Employee('Corey', 'Schafer', 50000)
emp_2 = Employee('Sue', 'Smith', 60000)
self.assertEqual(emp_1.fullname, 'Corey Schafer')
self.assertEqual(emp_2.fullname, 'Sue Smith')
emp_1.first = 'John'
emp_2.first = 'Jane'
self.assertEqual(emp_1.fullname, 'John Schafer')
self.assertEqual(emp_2.fullname, 'Jane Smith')
def test_apply_raise(self):
emp_1 = Employee('Corey', 'Schafer', 50000)
emp_2 = Employee('Sue', 'Smith', 60000)
emp_1.apply_raise()
emp_2.apply_raise()
self.assertEqual(emp_1.pay, 52500)
self.assertEqual(emp_2.pay, 63000)
if __name__ == '__main__':
unittest.main()
###### With Prints ######
import unittest
from employee import Employee
class TestEmployee(unittest.TestCase):
def setUp(self):
print('setUp')
self.emp_1 = Employee('Corey', 'Schafer', 50000)
self.emp_2 = Employee('Sue', 'Smith', 60000)
def tearDown(self):
print('tearDown\n')
def test_email(self):
print('test_email')
self.assertEqual(self.emp_1.email, 'Corey.Schafer@email.com')
self.assertEqual(self.emp_2.email, 'Sue.Smith@email.com')
self.emp_1.first = 'John'
self.emp_2.first = 'Jane'
self.assertEqual(self.emp_1.email, 'John.Schafer@email.com')
self.assertEqual(self.emp_2.email, 'Jane.Smith@email.com')
def test_fullname(self):
print('test_fullname')
self.assertEqual(self.emp_1.fullname, 'Corey Schafer')
self.assertEqual(self.emp_2.fullname, 'Sue Smith')
self.emp_1.first = 'John'
self.emp_2.first = 'Jane'
self.assertEqual(self.emp_1.fullname, 'John Schafer')
self.assertEqual(self.emp_2.fullname, 'Jane Smith')
def test_apply_raise(self):
print('test_apply_raise')
self.emp_1.apply_raise()
self.emp_2.apply_raise()
self.assertEqual(self.emp_1.pay, 52500)
self.assertEqual(self.emp_2.pay, 63000)
if __name__ == '__main__':
unittest.main()
###### setUpClass and tearDownClass ######
@classmethod
def setUpClass(cls):
print('setupClass')
@classmethod
def tearDownClass(cls):
print('teardownClass')
##### Mocking #####
def monthly_schedule(self, month):
response = requests.get(f'http://company.com/{self.last}/{month}')
if response.ok:
return response.text
else:
return 'Bad Response!'
def test_monthly_schedule(self):
with patch('employee.requests.get') as mocked_get:
mocked_get.return_value.ok = True
mocked_get.return_value.text = 'Success'
schedule = self.emp_1.monthly_schedule('May')
mocked_get.assert_called_with('http://company.com/Schafer/May') #this is to make sure that the corect url was called
self.assertEqual(schedule, 'Success')
# To test the opposite...
mocked_get.return_value.ok = False #make this False
schedule = self.emp_2.monthly_schedule('June')
mocked_get.assert_called_with('http://company.com/Smith/June') #this is to make sure that the corect url was called
self.assertEqual(schedule, 'Bad Response!')
"""
notes
- tests don't run in order that they're written. So it's important to include a print statement so you canb identify which test was tested
- @classmethod
- this is working with the class instead of the 'instance of the class' (HUH???) -- look at the link in the yt description
- MOCKING
- this is useful for identifying if the API is down or the website is down
example: line 109
- this is testing whether the response is OK or not OK
- import unittest.mock import patch
"""
| true
|
8721479f068078dd85e3a7619f6387e6c5633e94
|
Python
|
Rock1311/Python_Practise
|
/unique_list_from_two_diff_list.py
|
UTF-8
| 311
| 3.953125
| 4
|
[] |
no_license
|
##print unique list from 2 different lists
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2,2, 3, 4,5, 5, 6, 7, 8, 9, 10, 11, 12, 13]
c= []
d=[]
for x in a:
for y in b:
if (x==y):
c.append(x)
print(c)
for x in c:
if (x not in d):
d.append(x)
print(d)
| true
|
7794390e1c780b7069d0c03de4dce6d0384566d8
|
Python
|
TonyBoy22/gittest
|
/Python/displaying data/learn_venv/Lib/site-packages/ground/core/angular.py
|
UTF-8
| 890
| 2.796875
| 3
|
[] |
no_license
|
from .enums import (Kind,
Orientation)
from .hints import (Point,
QuaternaryPointFunction,
Scalar)
def kind(vertex: Point,
first_ray_point: Point,
second_ray_point: Point,
dot_producer: QuaternaryPointFunction[Scalar]) -> Kind:
return Kind(to_sign(dot_producer(vertex, first_ray_point, vertex,
second_ray_point)))
def orientation(vertex: Point,
first_ray_point: Point,
second_ray_point: Point,
cross_producer: QuaternaryPointFunction[Scalar]
) -> Orientation:
return Orientation(to_sign(cross_producer(vertex, first_ray_point, vertex,
second_ray_point)))
def to_sign(value: Scalar) -> int:
return (1 if value > 0 else -1) if value else 0
| true
|
8b1f8e3fe31be65a1292f710628e9cbe6cdb4284
|
Python
|
allenlipeng47/AlgorithmPy
|
/sort/QuickSelect.py
|
UTF-8
| 745
| 3.328125
| 3
|
[] |
no_license
|
class Solution(object):
def select(self, arr, k):
return self.partition(arr, 0, len(arr) - 1, len(arr) - k)
def partition(self, arr, low, high, k):
if low > high:
return -1
l, h, pivot = low, high, arr[low]
while l < h:
while l < h and pivot <= arr[h]:
h = h - 1
arr[l] = arr[h]
while l < h and pivot >= arr[l]:
l = l + 1
arr[h] = arr[l]
arr[l] = pivot
if l == k:
return arr[l]
elif l > k:
return self.partition(arr, low, l - 1, k)
else:
return self.partition(arr, l + 1, high, k)
arr = [4, 3, 5, 2, 1]
s = Solution()
print (s.select(arr, 2))
| true
|
976f7e66565a36cbec543f0fa08419542edb0381
|
Python
|
e-v-mst/cpp_code-kata
|
/Python_Code_Kata/ClassGraph/ClassGraph.py
|
UTF-8
| 843
| 3.078125
| 3
|
[] |
no_license
|
classMap = {'object':list()}
def find_path(start, end, path = [] ):
if start not in classMap:
return None
path = path + [start]
if start == end:
return path
for node in classMap[start]:
#if node not in path:
newpath = find_path(node, end, path)
if newpath:
return newpath
return None
for _ in range(int(input())):
className, *clss = str(input()).split()
if className not in classMap:
classMap[className] = list()
if len(clss) > 0:
for i in (clss[1:]):
if i not in classMap:
classMap[i] = list()
classMap[i].append(className)
for _ in range(int(input())):
classes = str(input()).split()
if find_path(classes[0], classes[1]) == None:
print('No')
else:
print('Yes')
| true
|
d13aa7b84b80cca9ff6ae0563a1e7405fe3ae0aa
|
Python
|
hyusterr/Text-Mining
|
/hw3/hw3-b05702095.py
|
UTF-8
| 9,167
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[46]:
import os
import sys
import nltk
nltk.download('stopwords') # download stopwords lexion
nltk.download('punkt') # download tokenize related tools
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from num2words import num2words
from collections import Counter
# In[23]:
# load data
with open('./training.txt', 'r') as f:
training = f.readlines()
f.close()
# In[24]:
# prepare training data
classes = [i.strip().split()[0] for i in training]
docs = [i.strip().split()[1:] for i in training]
class_N = np.array([len(i) for i in docs])
training_N = 0
for cls in docs:
training_N += len(cls)
prior = class_N / training_N
# In[95]:
# read stopwords
stopwordset = set( stopwords.words( 'english' ) )
# update english numbers to stopwords list
stopwordset.update( { num2words( i ) for i in range( 0, 20 ) } )
# initialize porter stemmer
stemmer = PorterStemmer()
# function for tokenize -> remove stopwords -> stem
def Preprocessing( sentence ):
# tokenize
words = word_tokenize( sentence )
# remove stopwords and stemming, return a list
words = [ stemmer.stem( w.lower() ) for w in words if w.isalpha() and w.lower() not in stopwordset ]
# return a list
return words
def TrainMultiNB( docs ):
# get the dimension of all words
Dictionary = set()
# read through training corpus
for cls in docs:
for doc in cls:
with open( 'IRTM/' + doc + '.txt', 'r' ) as f:
text = set( Preprocessing( f.read() ) )
f.close()
Dictionary.update( text )
# sort the counter
Dictionary = sorted(list(Dictionary))
print( 'Already built MutliNomialNB Dictionary!' )
print('length of dictionary: ', len(Dictionary))
# build word position dictionary
pos_dict = { Dictionary[i]: i for i in range(len(Dictionary)) }
term_N = len(Dictionary)
# build class-TF matrix
cls_tf_matrix = []
for cls in docs:
cls_tf_vec = [1] * term_N
for doc in cls:
with open('IRTM/' + doc + '.txt', 'r') as f:
text = Preprocessing( f.read() )
f.close()
for term in text:
cls_tf_vec[ pos_dict[term] ] += 1
cls_tf_matrix.append( cls_tf_vec )
cls_posterior_matrix = np.array( [ np.array(vec) / sum(vec) for vec in cls_tf_matrix ] )
print( 'the MultiNomial NB model is built, the shape is:', cls_posterior_matrix.shape )
# return probability matrix, term frequency matrix, feature position matrix
return cls_posterior_matrix, np.array( cls_tf_matrix ), pos_dict
# In[136]:
def NBPredict( cls_posterior_matrix, pos_dict, text ):
classes = [ str(i) for i in range(1, 14) ]
doc = Preprocessing( text )
cls_scores = []
for cls in range(len(prior)):
cls_score = np.log(prior[cls])
for term in doc:
if term in pos_dict:
cls_score += cls_posterior_matrix[cls][pos_dict[term]]
cls_scores.append(cls_score)
return classes[ np.argmax( np.array(cls_scores) ) ]
# In[109]:
def TrainBernoulliNB( docs ):
# get the dimension of all words
Dictionary = set()
# read through training corpus
for cls in docs:
for doc in cls:
with open( 'IRTM/' + doc + '.txt', 'r' ) as f:
text = set( Preprocessing( f.read() ) )
f.close()
Dictionary.update( text )
# sort the counter
Dictionary = sorted(list(Dictionary))
print( 'Already built Bernoulli Dictionary!' )
print('length of dictionary: ', len(Dictionary))
# build word position dictionary
pos_dict = { Dictionary[i]: i for i in range(len(Dictionary)) }
term_N = len(Dictionary)
# build class-TF matrix
cls_tf_matrix = []
for cls in docs:
cls_tf_vec = [1] * term_N
for doc in cls:
with open('IRTM/' + doc + '.txt', 'r') as f:
text = set( Preprocessing( f.read() ) )
f.close()
for term in text:
cls_tf_vec[ pos_dict[term] ] += 1
cls_tf_matrix.append( cls_tf_vec )
cls_posterior_matrix = np.array( [ np.array(vec) / sum(vec) for vec in cls_tf_matrix ] )
print( 'the Bernoulli NB model is built, the shape is:', cls_posterior_matrix.shape )
return cls_posterior_matrix, np.array( cls_tf_matrix ), pos_dict
# In[97]:
# prepare testing set
training_set = []
for i in docs:
training_set += i
print( 'getting testing data' )
testing_set = []
for i in range(1, 1096):
if str(i) not in training_set:
testing_set.append(str(i))
# In[103]:
# training
print('Training MultiNomial NB...')
cls_condi_matrix, cls_tf_matrix, pos_dict = TrainMultiNB( docs )
# In[99]:
# predict on testing set
print('Predicting...')
out = []
for txt in testing_set:
with open('./IRTM/' + txt + '.txt') as f:
t = f.read()
f.close()
out.append(NBPredict( cls_condi_matrix, pos_dict, t ))
# In[100]:
outcsv = 'id,Value\n'
for idx, val in zip( testing_set, out ):
outcsv += idx + ',' + val + '\n'
with open('MultiNB-out.csv', 'w') as f:
f.write(outcsv)
f.close()
print('MultiNB predicting is done! output is MultiNB-out.csv')
# In[110]:
print('Training Bernoulli NB...')
# Bernoulli Training
ber_cls_prob_matrix, ber_cls_df_matrix, ber_pos_dict = TrainBernoulliNB( docs )
# In[117]:
print('Predicting...')
# predict on testing set
ber_out = []
for txt in testing_set:
with open('./IRTM/' + txt + '.txt') as f:
t = f.read()
f.close()
ber_out.append(NBPredict( ber_cls_prob_matrix, ber_pos_dict, t ))
ber_outcsv = 'id,Value\n'
for idx, val in zip( testing_set, ber_out ):
ber_outcsv += idx + ',' + val + '\n'
with open('BerNB_out.csv', 'w') as f:
f.write(ber_outcsv)
f.close()
print('Bernoulli NB predicting is done! output is BerNB_out.csv')
# In[124]:
def CalculateChiScore( tf_vec ):
chi_score = 0
for cls in range( len( tf_vec ) ): # 13 classes
present_ontopic = tf_vec[cls]
absent_ontopic = len(docs[cls]) - present_ontopic
present_offtopic = sum( tf_vec ) - present_ontopic
absent_offtopic = len( training_set ) - present_ontopic - present_offtopic - absent_ontopic
present = present_offtopic + present_ontopic
ontopic = present_ontopic + absent_ontopic
Ne = len( training_set ) * present / len( training_set ) * ontopic / len( training_set )
chi_score += ( present_ontopic - Ne ) ** 2 / Ne
return chi_score
# print( len(ber_cls_df_matrix[0,]))
print('Applying Chi-Score Feature Selection...')
chi_score_list = []
for i in range( len(ber_cls_df_matrix[0]) ):
# return to original tf matrix
# vector shape (13, 1)
ori_df_vec = ber_cls_df_matrix[:,i] - 1
# print( ori_df_vec )
chi_score_list.append( (i, CalculateChiScore(ori_df_vec) ) )
# chi
# absent = len(training_set) - present
# In[130]:
chi_select = sorted( chi_score_list, key= lambda x: x[1], reverse=True )[:500]
pos_chi_list = [ x[0] for x in chi_select ]
print('Get top 500 important features!')
# In[131]:
print('Predicting on model after feature selection')
def ChiNBPredict( cls_posterior_matrix, pos_dict, pos_chi_list, text ):
doc = Preprocessing( text )
cls_scores = []
for cls in range(len(prior)):
cls_score = np.log(prior[cls])
for term in doc:
if term in pos_dict and pos_dict[term] in pos_chi_list:
cls_score += cls_posterior_matrix[cls][pos_dict[term]]
cls_scores.append(cls_score)
return classes[ np.argmax( np.array(cls_scores) ) ]
# In[133]:
# predict on testing set
chi_out = []
for txt in testing_set:
with open('./IRTM/' + txt + '.txt') as f:
t = f.read()
f.close()
chi_out.append( ChiNBPredict( cls_condi_matrix, pos_dict, pos_chi_list, t) )
chi_outcsv = 'id,Value\n'
for idx, val in zip( testing_set, chi_out ):
chi_outcsv += idx + ',' + val + '\n'
with open('chi-mul-out.csv', 'w') as f:
f.write(chi_outcsv)
f.close()
# In[135]:
# predict on testing set
chi_ber_out = []
for txt in testing_set:
with open('./IRTM/' + txt + '.txt') as f:
t = f.read()
f.close()
chi_ber_out.append( ChiNBPredict( ber_cls_prob_matrix, ber_pos_dict, pos_chi_list, t) )
chi_ber_outcsv = 'id,Value\n'
for idx, val in zip( testing_set, chi_ber_out ):
chi_ber_outcsv += idx + ',' + val + '\n'
with open('chi-ber-out.csv', 'w') as f:
f.write(chi_ber_outcsv)
f.close()
print( 'Prediction is done! Files are chi-* files!' )
| true
|
0f930a30941d0ade8bae4a7c417ed4f22b2c1f2b
|
Python
|
luochonglie/tf
|
/open_cv/cv_02_visit_bits.py
|
UTF-8
| 1,382
| 3.21875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 16:09:58 2016
按位操作图片
@author: chonglie
"""
import cv2
import numpy as np
from open_cv import cv_01_read_copy_write as img_utils
def salt(img, num):
"""在图片上加入白色的噪点
:param img: 图片
:param num: 噪点数量
:return: 增加噪点后的图片
"""
print(img.shape)
for i in range(num):
y = int(np.random.random() * img.shape[0])
x = int(np.random.random() * img.shape[1])
print("x =", x, "y =", y)
if img.ndim == 2:
img[y, x] = 255
elif img.ndim == 3:
img[y, x, 0] = 255
img[y, x, 1] = 255
img[y, x, 2] = 255
return img
def split_channel(img):
"""
:param img: 图片
:return: 分离后的通道
red: 红
green: 绿
blue: 蓝
"""
blue, green, red = cv2.split(img)
return red, green, blue
def show_add_silt():
img = img_utils.read_img()
salt_image = salt(img, 500)
cv2.imshow("Salt", salt_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_split_channel():
img = img_utils.read_img()
r, g, b = split_channel(img)
cv2.imshow("Blue", r)
cv2.imshow("Red", g)
cv2.imshow("Green", b)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
show_split_channel()
| true
|
14232a52b58c06bafd7a1084d458eca490b98674
|
Python
|
SebastianThomas1/coding_challenges
|
/hackerrank/algorithms/implementation/designer_pdf_viewer.py
|
UTF-8
| 648
| 3.21875
| 3
|
[] |
no_license
|
# Sebastian Thomas (coding at sebastianthomas dot de)
# https://www.hackerrank.com/challenges/designer-pdf-viewer
#
# Designer PDF Viewer
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
IDX_OF_CHAR = {char: idx for idx, char in enumerate(ALPHABET)}
def designer_pdf_viewer(h, word):
return max(h[IDX_OF_CHAR[char]] for char in word) * len(word)
if __name__ == '__main__':
print(designer_pdf_viewer(
[1, 3, 1, 3, 1, 4, 1, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5], 'abc')) # 9
print(designer_pdf_viewer(
[1, 3, 1, 3, 1, 4, 1, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 7], 'zaba')) # 28
| true
|
c511034c6195a3d9a9c89e897e4881714f5dac1c
|
Python
|
chrishart0/GildedRose-Refactoring-Kata
|
/python/test_gilded_rose.py
|
UTF-8
| 7,104
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/python3
# -*- coding: utf-8 -*-
import unittest
from gilded_rose import Item, GildedRose
class GildedRoseTest(unittest.TestCase):
def item_are_valid_tests(self, items):
'''
A series of constraints an item should always abide by
Use this on every test
'''
valid = True
for item in items:
#Verify item quality is not negative
if ( item.quality < 0 ):
valid = False
#Verify item quality is greater than 50
if ( item.quality > 50 ):
valid = False
return valid
def test_EOD_SellIn_value_lowered_1(self):
#Define test item
items = [
Item(name="+5 Dexterity Vest", sell_in=10, quality=20),
]
#Save sell_in value for use in the assert
day0_sell_in = items[0].sell_in
#Iterate one day
GildedRose(items).update_quality()
#Verify sell_in has changed by -1
day1_sell_in = items[0].sell_in
self.assertTrue(day0_sell_in == (day1_sell_in + 1))
self.assertTrue(self.item_are_valid_tests(items))
#TODO: also test what happens when value is at 1 then iterated
def test_EOD_Quality_value_lowered(self):
#Define test item
items = [
Item(name="+5 Dexterity Vest", sell_in=10, quality=2),
]
#save quality value for use in the assert
day0_quality = items[0].quality
#Iterate one day
GildedRose(items).update_quality()
#Verify quality has been reduced by any amount
day1_quality = items[0].quality
self.assertGreater(day0_quality, day1_quality)
self.assertTrue(self.item_are_valid_tests(items))
def test_item_past_expiration_quality_degrades_double(self):
#Define test item
items = [
Item(name="+5 Dexterity Vest", sell_in=1, quality=20),
]
#Save quality value for use in the assert
day0_quality = items[0].quality
#Iterate 2 days, saving quality data for testing
GildedRose(items).update_quality() #Iterate one day
day1_quality = items[0].quality
GildedRose(items).update_quality() #Iterate one day
day2_quality = items[0].quality
#Calculated differences between days
diff_day0_day1 = day0_quality - day1_quality
diff_day1_day2 = day1_quality - day2_quality
#Verify when item has "Expired" it loses value twice as fast
#Diff between day1 and day2 should be double diff between day0 and day1 because sell_in is then less than 1
self.assertEqual(diff_day1_day2, (diff_day0_day1*2))
self.assertTrue(self.item_are_valid_tests(items))
def test_quality_does_not_decrease_under_0(self):
#Define test item
items = [
Item(name="+5 Dexterity Vest", sell_in=10, quality=0),
]
#Iterate one day
GildedRose(items).update_quality()
#Verify quality has been reduced by any amount
final_quality = items[0].quality
self.assertGreaterEqual(final_quality, 0)
self.assertTrue(self.item_are_valid_tests(items))
def test_quality_never_over_50(self):
'''
We are assuming that item quality never input as over 50
Most items go down in quality, we are using Backstage passes amd Brie because they go up
Aged Brie will go: 49 > 50 > 51
Backstage Passes will go: 49 > 52 > 55
'''
params = [
Item(name="Aged Brie", sell_in=10, quality=49),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=3, quality=49),
]
for param in params:
with self.subTest(param=param):
items = [
param,
]
#Iterate one day
GildedRose(items).update_quality()
GildedRose(items).update_quality()
#Verify quality has raised by any amount
final_quality = items[0].quality
self.assertTrue(self.item_are_valid_tests(items))
def test_exception_items_increase_quality_over_time(self):
'''
'Aged Brie' and 'Backstage passes to a TAFKAL80ETC concert' are exception to the rule that quality goes down over time
'Aged Brie' and 'Backstage passes to a TAFKAL80ETC concert' quality value goes up over time
'''
params = [
Item(name="Aged Brie", sell_in=10, quality=20),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=10, quality=20),
]
for param in params:
with self.subTest(param=param):
items = [
param,
]
#save quality value for use in the assert
day0_quality = items[0].quality
#Iterate one day
GildedRose(items).update_quality()
#Verify quality has raised by any amount
day1_quality = items[0].quality
self.assertGreater(day1_quality, day0_quality)
self.assertTrue(self.item_are_valid_tests(items))
def test_backstage_pass_increase_by_2_days_10_to_6(self):
for i in range(6, 11):
with self.subTest(i=i):
items = [
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=i, quality=20),
]
#save quality value for use in the assert
day0_quality = items[0].quality
#Iterate one day
GildedRose(items).update_quality()
#Verify quality has raised by any amount
day1_quality = items[0].quality
self.assertEqual(day1_quality, (day0_quality + 2))
self.assertTrue(self.item_are_valid_tests(items))
def test_backstage_pass_increase_by_3_days_5_to_0(self):
for i in range(1, 6):
with self.subTest(i=i):
items = [
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=i, quality=20),
]
#save quality value for use in the assert
day0_quality = items[0].quality
#Iterate one day
GildedRose(items).update_quality()
#Verify quality has raised by any amount
day1_quality = items[0].quality
self.assertEqual(day1_quality, (day0_quality + 3))
self.assertTrue(self.item_are_valid_tests(items))
def test_backstage_pass_quality_0_past_sell_in_date(self):
items = [
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=0, quality=20),
]
#Iterate one day
GildedRose(items).update_quality()
#Verify quality has raised by any amount
final_quality = items[0].quality
self.assertEqual(final_quality, 0)
if __name__ == '__main__':
unittest.main()
| true
|
7cfb5d2a8c824bc0e22cbc66f9be0bc0e1fc966a
|
Python
|
ffhan/lingua
|
/automata/fa.py
|
UTF-8
| 14,304
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
"""
Defines finite automata abstract class.
In other words, it defines an interface that all derived classes have to follow.
"""
import abc, copy
import automata.state as st
import automata.packs as pk
class FiniteAutomaton(abc.ABC):
"""
Finite automata base abstract class. It isn't aware of transition functions.
This is not an initialisable class.
It serves exclusively as a template for more defined derived classes such as DFA and NFA.
"""
def __init__(self, states, inputs, start_state):
"""
Initialises a finite state automaton.
Direct use is highly discouraged. Use factory method instead.
:param states: all State objects
:param inputs: all inputs
:param State start_state: starting State object
"""
self.states = states
self.inputs = set(inputs)
self.records = pk.Records()
if self.states.get(start_state.name, 0):
self.start_state = start_state
assert isinstance(self.start_state, st.State)
self.current = {start_state}
else:
raise TypeError(self._state_error(start_state.name))
for name, state in self.states.items():
try:
assert isinstance(name, st.StateName)
except AssertionError:
raise TypeError('Type {} is NOT StateName (Name: {})'.format(name.__class__.__name__, name))
try:
assert isinstance(state, st.State)
except AssertionError:
raise TypeError('Type {} is NOT State (Object {})'.format(state.__class__.__name__, state))
assert isinstance(start_state, st.State)
assert isinstance(self.start_state, st.State)
self._check_structure()
self._alias = dict() # used to ensure backwards compatibility after FA minimization.
@abc.abstractmethod
def _check_structure(self) -> bool:
"""
Checks if inner structure is correct. Raises a ValueError if not correct.
:return bool: True
"""
pass
@property
@abc.abstractmethod
def accepted(self) -> bool:
"""
Defines if current automaton state is defined
:return bool: True if accepted, False if not
"""
pass
@property
def accepted_states(self):
"""
Returns states that contain value different than 0.
:return: Accepted States
"""
final = set()
for state in self.states.values():
if state.accepted:
final.add(state)
return final
def _set_alias(self, state, alias):
"""
Stores an alias for a removed State.
:param StateName state: State that has replaced a state
:param StateName alias: The replaced State
:return:
"""
self._alias[alias] = state
def _get_alias(self, alias):
"""
Find current State associated with a removed State.
Returns the same State if it doesn't exist (case when State hasn't been removed from FA.
:param StateName alias: Removed State
:return StateName: Current State identical to the old State
"""
# print(alias, type(alias))
# assert isinstance(alias, StateName)
if isinstance(alias, st.State):
alias = alias.name
found = self._alias.get(alias, alias)
if found in self._alias.keys() and found != alias:
found = self._get_alias(found)
return found
def reachable(self):
"""
Removes all unreachable states.
:return:
"""
visited = self.start_state.indirect_reach
states = dict()
for state in visited:
states[state.name] = state
self.states = states
def reset(self):
"""
Resets the current FA state and clears step records.
:return:
"""
# self.records.clear()
self.current = {self.start_state}
def _not_defined_substring(self):
"""
Method used to follow DRY principle in error reporting.
May be moved to custom Error classes.
:return str: Returns 'is not defined in this "name of the class"' string for an error.
"""
return ' is not defined in this {}.'.format(type(self).__name__)
def _state_error(self, state, prefix=''):
"""
Returns a default error string with a possible prefix.
Example:
print(self.__state_error("q0", "Hey, this state"))
[Out]: Hey, this state "q0" is not defined in this FA.
:param str state: state name
:param str prefix: Possible beginning of the error.
:return str: state error string
"""
start_prefix = 'S' if prefix == '' else ' s'
return '{}{}tate "{}" {}'.format(prefix, start_prefix, state,
type(self).__name__) + self._not_defined_substring()
def _input_error(self, inp):
"""
Defines an input error string.
:param str inp: value of the input
:return str: input error string
"""
return 'Input "{}"'.format(inp) + self._not_defined_substring()
def __repr__(self):
assert isinstance(self.start_state, st.State)
def wrap_in_braces(string, last_brace_newline=False):
"""
Wraps up a string in {}.
:param str string: string to be wrapped
:param bool last_brace_newline: defines if newline will be put after braces
:return str: wrapped string
"""
return '{' + string + ('\n}' if last_brace_newline else '}')
def tab(string):
"""
Puts tabs in front of all lines in a string.
Example:
-------------
For example,
this becomes:
-------------
For example,
this becomes:
-------------
:param str string: input string
:return str: tabbed strings
"""
return '\t' + string.replace('\n', '\n\t')
def newline(*lines):
"""
Returns string composed of all line arguments with newline added between them.
:param str lines: lines of text that need to be newlined.
:return: full string composed of individual lines concatenated with newline in-between
"""
res = '\n'
for line in lines:
res += line + '\n'
return res[:-1]
states = ''
final = ''
for state, state_object in sorted(self.states.items(), key=lambda t : t[0]):
states += str(state) + ','
if state_object.value:
# print(final, state)
final += str(state.name) + ','
final = 'F=' + wrap_in_braces(final[:-1])
states = 'Q=' + wrap_in_braces(states[:-1])
inputs = ''
for inp in sorted(self.inputs):
inputs += str(inp) + ','
inputs = u'\u03A3=' + wrap_in_braces(inputs[:-1])
funcs = u'\u03B4=' + wrap_in_braces(self.functions)
try:
assert isinstance(self.start_state, st.State)
except AssertionError as error:
print("Start state is not a state, it's {}".format(type(self.start_state)), error)
raise error
start = 'q0=' + str(self.start_state.name)
try:
assert isinstance(self.start_state, st.State)
except AssertionError as error:
print("Start state is not a state, it's {}".format(type(self.start_state)), error)
raise error
return '{} '.format(type(self).__name__) + wrap_in_braces(tab(
newline(states, inputs, funcs, start, final)
), True)
def __contains_helper(self, item):
"""
Internal wrapper for states dict getter.
:param StateName item: State name
:return bool: True if State exists
"""
assert type(item) is st.StateName
return item in self.states
def __contains__(self, item):
"""
Wrapper allowing 'in self' notation.
:param item: State name
:return:
"""
assert not isinstance(item, str)
if isinstance(item, st.StateName):
return self.__contains_helper(item)
elif isinstance(item, st.State):
return self.__contains_helper(item.name)
return False
# exists to precisely define how entries are handled. Enter is just interface endpoint
def _process(self, *entry):
"""
Processes the entry arguments.
:param entry: entries that have to be handled.
:return:
"""
records = pk.Records()
records.add_record(pk.RecordPack(self.current, self.accepted))
for inp in entry:
self._access(inp)
records.add_record(pk.RecordPack(self.current, self.accepted))
self.records.add_record(records)
@abc.abstractmethod
def _access(self, value):
"""
A method that handles the individual input passing through FA.
:param value: input
:return:
"""
pass
def enter(self, *entry):
"""
Reads all inputs from entry and puts them through the FA.
:param entry: All entries.
:return: result states
"""
self._process(*entry)
return self.current
def record(self, *entry):
"""
See entry method.
:param entry: All entries.
:return:
"""
self._process(*entry)
return self.records
def output(self, *entry):
"""
Outputs end state acceptance.
:param entry: Inputs
:return bool: Outputs True if end state is acceptable, False if not
"""
self.enter(*entry)
return self.accepted
def distinguish(self):
"""
Distinguishes identical states from non-identical and updates the automatum.
:return:
"""
raise NotImplementedError()
def minimize(self):
"""
Minimizes an automaton.
:return:
"""
raise NotImplementedError()
@property
def functions(self) -> str:
"""
Returns functions for repr() function.
:return str: string representation of transition functions
"""
result = ''
for state in sorted(self.states.values()):
for event, end_states in state.transitions.items():
# extremely bad code, but it's a part of an interface
result += '{},{}->'.format(self._get_alias(state.name), event)
for end in end_states:
# print(end, type(end))
result += '{},'.format(self._get_alias(end.name))
result = result[:-1] + '\n'
return result.strip()
@staticmethod
def factory(input_text, lexer):
"""
Encapsulates FA object creation through a string and a lexer.
Use this method for FA object creation instead of direct __init__
:param type class_type: class type that has to be created
:param str input_text: text that defines the FA
:param Generator lexer: a conecrete Generator implementation
:return FA: a concrete FA object
"""
lexer.scan(input_text)
return __class__(lexer.states, lexer.inputs, lexer.start_state)
def _create_copy(self, *args):
"""
Handles copy creation. Enables easy extension of deepcopy.
Internal method. Do not use directly.
:param args: arguments for creation.
:return: FA object
"""
return self.__class__(*args)
def _create_state(self, *args):
"""
Creates a State object.
Exists solely for easy deepcopy extensions.
Do NOT use directly.
:param args: arguments for State creation.
:return State: a State object
"""
return st.State(*args)
def deepcopy(self):
"""
Deep copies an instance of FA.
:return: FA object not bound by any references to the original object
"""
# copying states has to be done inside FAs' because references have to be the same.
copied_states = set()
for state in self.states.values():
name = copy.deepcopy(state.name)
copied_states.add(self._create_state(name.name, copy.deepcopy(state.value)))
inputs = copy.deepcopy(self.inputs)
states = dict()
for state in copied_states:
states[state.name] = state
for name, state in self.states.items():
for event, transition_states in state.transitions.items():
transitions = set()
for transition_state in transition_states:
try:
transitions.add(states[transition_state.name])
except KeyError as err:
raise err
states[name].transitions[event] = transitions
start_state = states[self.start_state.name]
assert len(self.states) == len(states)
for state in self.states:
assert state in states
return self._create_copy(states, inputs, start_state)
def rename_state(self, old_name: st.StateName, new_name: str):
"""
Renames a state inside the FA.
It's critical to use this method to rename a state.
If renaming states through iterator contain the keys in a list or a set,
otherwise renaming will be incorrect (the dictionary changes size
during iteration).
For example:
for name in list(automaton.states):
...
automaton.rename_state(name, 'new_name')
...
and NOT
for name in automaton.states:
...
:param StateName old_name: state name to be renamed
:param str new_name: new state name
:return:
"""
state = self.states.pop(old_name)
state.name.name = new_name
self.states[state.name] = state
| true
|
db3b6fc01878b2b90179a7ff027dcbdc6eaebd23
|
Python
|
ArvidLandmark/Twitter-sentiment-analyzer
|
/twitter.py
|
UTF-8
| 5,024
| 2.609375
| 3
|
[] |
no_license
|
import tweepy as tw
from textblob import TextBlob
from openpyxl import Workbook
from openpyxl.styles import Font
def paste_cells(ws_feed):
for il in range(len(excel_pos)):
ws_feed.cell(il*7+2, 1).value = search_list()[il]
ws_feed.cell(il * 7 + 2, 1).font = Font(bold=True)
ws_feed.cell(il * 7 + 2, 1).font = Font(bold=True)
ws_feed.cell(il * 7 + 2, 1).font = Font(bold=True)
ws_feed.cell(il*7+3, 1).value = "Positive tweets"
ws_feed.cell(il*7+4, 1).value = "Negative tweets"
ws_feed.cell(il*7+5, 1).value = "Neutral tweets"
ws_feed.cell(il*7+6, 1).value = "Positive amount"
ws_feed.cell(il*7+7, 1).value = "Negative amount"
ws_feed.cell(il*7+3, 2).value = excel_pos[il]
ws_feed.cell(il*7+4, 2).value = excel_neg[il]
ws_feed.cell(il*7+5, 2).value = excel_neu[il]
ws_feed.cell(il*7+6, 2).value = excel_pos_amt[il]
ws_feed.cell(il*7+7, 2).value = excel_neg_amt[il]
def paste_tweets(ws_tweets):
for iu in range(len(excel_all_tweets)):
ws_tweets.cell(1, iu*3+1).value = search_list()[iu]
ws_tweets.cell(1, iu*3+1).font = Font(bold=True)
ws_tweets.cell(1, iu*3+2).value = "Date"
ws_tweets.cell(1, iu*3+2).font = Font(bold=True)
for iy in range(tweet_amount):
try:
ws_tweets.cell(iy+2, iu*3+1).value = excel_all_tweets[iu][iy]
ws_tweets.cell(iy+2, iu*3+2).value = excel_date[iu][iy]
except:
pass
def paste_excel():
ws = wb.active
ws.title = "Twitter feed"
ws_feed = wb["Twitter feed"]
ws_tweets = wb.create_sheet("All_tweets")
paste_tweets(ws_tweets)
paste_cells(ws_feed)
def search_list():
temp_list = []
user_hashtags = b_user_hashtags.split(",")
for i in range(len(user_hashtags)):
temp_list.append("#" + user_hashtags[i])
return temp_list
def twitter_search():
x = 2
consumer_key = "Use your key here"
consumer_secret = "Use your key here"
access_token = "Use your key here"
access_token_secret = "Use your key here"
auth = tw.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tw.API(auth, wait_on_rate_limit=True)
user_save = input("Enter project name: ")
loc_save = input(r"Enter where to save the project: [C:\Users\..]")
backslash_sol = "\\"
print(f"Project will be saved as {loc_save + backslash_sol + user_save}.xlsx")
wb = Workbook()
b_user_hashtags = input("Enter the hashtags you would like to search for [separated by , and no spaces]: ")
date_since = "2015-09-13"
print_list = []
print(search_list())
excel_neg, excel_neu, excel_pos, excel_neg_amt, excel_pos_amt, excel_all_tweets, excel_date = [], [], [], [], [], [], []
tweet_amount = int(input("How many tweets would you like to search: "))
for hashes in range(len(search_list())):
#new_search = search_list() + " -filter:retweets"
tweet_list, tweet_location, sentiment_list = [], [], []
excel_all_tweets.append([])
excel_date.append([])
tweet_count = 0
neg_count, pos_count, neu_count = 0, 0, 0
neg_amt, pos_amt, neu_amt = 0, 0, 0
# Collect tweets
tweets = tw.Cursor(api.search, q=search_list()[hashes] + "-filter:retweets", lang="en", since=date_since).items(tweet_amount)
for tweet in tweets:
try:
tweet_list.append(tweet.text)
tweet_location.append(tweet.user.location)
print(f"[{tweet_count+1}/{tweet_amount}] {tweet_location[tweet_count]} \n {tweet.text}")
tweet_count += 1
analysis = TextBlob(tweet.text)
print(analysis.sentiment)
sentiment_list.append(analysis.sentiment)
excel_all_tweets[hashes].append(tweet.text)
excel_date[hashes].append(tweet.created_at)
if analysis.sentiment[0] == 0:
neu_amt += analysis.sentiment[1]
neu_count += 1
elif analysis.sentiment[0] > 0:
pos_count += 1
pos_amt += analysis.sentiment[1]
else:
neg_count += 1
neg_amt += analysis.sentiment[1]
except:
break
print_list.append(f"\nFor {search_list()[hashes]}: \nNegative tweets count: {neg_count}\nPositive tweets count: {pos_count}\nNeutral tweets count: {neu_count}\nNegative tweets amount: {neg_amt}\nPositive tweets amount: {pos_amt}")
excel_neg.append(neg_count)
excel_neu.append(neu_count)
excel_pos.append(pos_count)
excel_neg_amt.append(neg_amt)
excel_pos_amt.append(pos_amt)
wb.save(loc_save[:-1] + user_save + ".xlsx")
for number_hashes in range(len(print_list)):
print(print_list[number_hashes])
paste_excel()
wb.save(loc_save[:-1] + user_save + ".xlsx")
print(f"Project saved as {loc_save + backslash_sol + user_save}.xlsx")
| true
|
a6dc0dabf394bc92b470153a342775880d35c108
|
Python
|
hudsonchromy/kattis
|
/t9spelling.py
|
UTF-8
| 608
| 2.65625
| 3
|
[] |
no_license
|
trans = {'a': '2', 'b': '22', 'c':'222', 'd':'3', 'e':'33', 'f':'333', 'g':'4', 'h':'44', 'i':'444', 'j':'5', 'k':'55', 'l':'555', 'm':'6', 'n':'66', 'o':'666', 'p':'7', 'q':'77', 'r':'777', 's':'7777', 't':'8', 'u':'88', 'v':'888', 'w':'9', 'x':'99', 'y':'999', 'z':'9999', ' ':'0'}
cases = int(input())
for j in range(cases):
inp = list(input())
outp = trans[inp[0]]
for i in range(1, len(inp)):
#print(outp[-1])
if(trans[inp[i]][0] == outp[-1]):
outp += " " + trans[inp[i]]
else:
outp += trans[inp[i]]
print("Case #{}: ".format(j+1) + outp)
| true
|
b9fa03d24b0ebde9538db4f3df24680076079862
|
Python
|
redelste/CS559
|
/hw1/Assignment1.py
|
UTF-8
| 1,078
| 3.203125
| 3
|
[] |
no_license
|
# coding: utf-8
# In[8]:
import numpy as np
import math
# In[149]:
#non custom input
def observations():
N = [10, 100, 1000]
neat = np.random.normal(0, 1, (N[0], 1))
neat1 = np.random.normal(0,1,(N[1], 1))
neat2 =np.random.normal(0,1,(N[2], 1))
#mean for 10
m1 = sum(neat) / 10
v1 = neat.var()
#mean for 100
m2 = sum(neat1) / 100
v2 = neat1.var()
#mean for 1000
m3 = sum(neat2) /1000
v3 = neat2.var()
print(m1)
print(v1)
print(m2)
print(v2)
print(m3)
print(v3)
# In[150]:
observations()
# In[266]:
def observations2(mean, var, N):
testboi = np.random.normal(mean, (var**.5), N)
print(testboi.mean())
print(testboi.var())
return(testboi)
# In[276]:
print("TESTBOI2 = mean = 1, var = 4, N = 2000")
testboi2 = observations2(1,4,2000)
print("TESTBOI3 = mean =4, var = 9, N = 1000" )
testboi3 = observations2(4,9,1000)
x = newestArr = np.append(testboi2, testboi3)
print("X")
print(x.mean())
print(x.var())
# In[268]:
observations2(1,4,2000)
| true
|
ca480580333ed78d63e1c313be9550ba5426b761
|
Python
|
wschmitt/pynes
|
/emulator.py
|
UTF-8
| 3,303
| 2.703125
| 3
|
[] |
no_license
|
import cpu_opcodes
from cpu import CPU
from ppu import PPU
from ram import RAM
from rom import ROM
class Emulator:
def __init__(self):
self.MEMORY_SIZE = 0x800 # 2kB
self.rom = None
self.ram = RAM(self.MEMORY_SIZE)
self.cpu = CPU(self.cpu_read, self.cpu_write)
self.ppu = PPU(self.ppu_read, self.ppu_write)
self.system_clock = 0
def set_rom(self, rom: ROM):
self.rom = rom
self.cpu.reset()
def has_rom(self):
return self.rom is not None
def tick_clock(self):
self.ppu.clock()
if self.system_clock % 3 == 0:
self.cpu.clock()
self.system_clock += 1
# ----------------------------------- PPU BUS ADDRESSING - 16 bits range - 0x0000 to 0xFFFF
def ppu_write(self, addr, value):
addr &= 0x3FFF
# pattern memory
# if addr <= 0x1FFF:
# return self.rom.get_chr_data(addr) # self.ppu.tbl_pattern[int(addr >= 0x1000)][addr & 0x0FFF]
# elif addr <= 0x3EFF:
# pass
# palette memory
if 0x3F00 <= addr <= 0x3FFF:
addr &= 0x001F
# mirroring
if addr == 0x0010 or addr == 0x0014 or addr == 0x0018 or addr == 0x001C:
addr -= 0x10
self.ppu.tbl_palette[addr] = value
return value
def ppu_read(self, addr, byte=1):
addr &= 0x3FFF
# pattern memory
if addr <= 0x1FFF:
return self.rom.get_chr_data(addr) # self.ppu.tbl_pattern[int(addr >= 0x1000)][addr & 0x0FFF]
elif addr <= 0x3EFF:
pass
# palette memory
elif 0x3F00 <= addr <= 0x3FFF:
addr &= 0x001F
# mirroring
if addr == 0x0010 or addr == 0x0014 or addr == 0x0018 or addr == 0x001C:
addr -= 0x10
return self.ppu.tbl_palette[addr]
def __ppu_memory_access(self, write, addr, value, word=0):
pass
# ----------------------------------- CPU BUS ADDRESSING - 16 bits range - 0x0000 to 0xFFFF
# CPU write to memory
def cpu_write(self, addr, value):
return self.__cpu_memory_access(True, addr, value)
# CPU read from memory
def cpu_read(self, addr, byte=1):
if byte == 2:
return self.__cpu_memory_access(False, addr, None, 1)
return self.__cpu_memory_access(False, addr, None)
def __cpu_memory_access(self, write, addr, value, word=0):
# RAM ranges from 0x0000 to 0x2000 and uses mirroring each 0x800
if addr <= 0x1FFF:
if write:
return self.ram.cpu_write(addr, value)
else:
if word:
return self.ram.get_word(addr) # pop 2 bytes from memory
else:
return self.ram.cpu_read(addr) # pop 1 byte from memory
# PPU Ranges from 0x2000 to 0x3FFF
elif addr <= 0x3FFF:
if write:
return self.ppu.cpu_write(addr, value)
else:
return self.ppu.cpu_read(addr)
# access rom otherwise
if word:
return self.rom.get_word(addr)
return self.rom.get(addr)
| true
|
ae21c861cb3a7f5ea86d7e25b5e38a696ec9d47e
|
Python
|
MJSahebnasi/SearchOnGrid
|
/Main.py
|
UTF-8
| 1,136
| 3.0625
| 3
|
[] |
no_license
|
from bfs import bfs
from matrix_stuff import read_matrix, find_index, draw_path
from dfs import dfs
from A_star import a_star # , a_star_wikiVersion
from collections import deque
matrix = read_matrix()
# print('primary map:')
# for row in matrix:
# print(row)
# print()
(start_y, start_x) = find_index(matrix, 'S')
print('dfs:')
path = deque()
if dfs(matrix, start_y, start_x, [[False] * len(matrix[0]) for _ in range(len(matrix[0]))], path):
for dir in path:
print(dir, end=' ')
# draw_path(matrix, path, start_y, start_x)
else:
print('no path found!')
print('\nbfs:')
result = bfs(matrix, start_y, start_x)
if len(result) > 0:
for dir in result:
print(dir, end=' ')
# draw_path(matrix, result, start_y, start_x)
else:
print('no path found!')
print('\nA*:')
(goal_y, goal_x) = find_index(matrix, 'A')
# result = a_star_wikiVersion(matrix, start_y, start_x, goal_y, goal_x)
result = a_star(matrix, start_y, start_x, goal_y, goal_x)
if len(result) > 0:
for dir in result:
print(dir, end=' ')
# draw_path(matrix, result, start_y, start_x)
else:
print('no path found!')
| true
|
05f41c3ee0dc4b326d19ea9c29ddd10db66f311f
|
Python
|
Yu-Igarashi-aiiit/SelfStudying
|
/atcoder/test.py
|
UTF-8
| 822
| 3.015625
| 3
|
[] |
no_license
|
"map int input": {
"prefix": "mpi",
"body": [
"map(int,input().split())"
],
"description": "map int"
}
"list map int": {
"prefix": "lmpi",
"body": [
"list(map(int,input().split()))"
],
"description": "list map int"
}
"resolve": {
"prefix": "res",
"body": [
"def resolve():"
],
"description": "resolve"
}
"int input": {
"prefix": "ii",
"body": [
"int(input())"
],
"description": "int input"
}
"dfs": {
"prefix": "dfs",
"body": [
"def dfs(A):"
"# 数列の長さが N に達したら打ち切り"
"if len(A) == N:"
"# 処理"
"return"
"for v in range(M):"
"A.append(v)"
"dfs(A)"
"A.pop()"
],
"description": "dfs"
}
| true
|
73f4870a30f7f87c0db8082a910090624cdac8c0
|
Python
|
davzha/DESP
|
/datasets/polygons.py
|
UTF-8
| 1,667
| 2.828125
| 3
|
[] |
no_license
|
import math
import random
import torch
TWO_PI = 2 * math.pi
class Polygons(torch.utils.data.Dataset):
def __init__(self, n_points, n_poly, radius=0.35, noise=True, length=60000, mem_feat=False, mode=None):
self.n_points = n_points
self.length = length
self.center = torch.tensor((0.5,0.5))
self.radius = radius
self.noise = noise
self.n_poly = n_poly
self.mem_feat = mem_feat
self.mode = mode
def _get_n_polygon(self, n):
angles = torch.linspace(0., TWO_PI - (TWO_PI / n), n)
radius = self.radius
if self.noise:
angles += torch.empty(1).uniform_(0., TWO_PI)
# target = torch.randint(self.centers.shape[0], (1,))
center = self.center
x = torch.cos(angles) * radius
y = torch.sin(angles) * radius
coo = torch.stack((x,y), dim=1)
coo = coo + center
padding = torch.zeros(self.n_points - n, 2, dtype=coo.dtype)
padding_len = padding.shape[0]
nonpadding_len = coo.shape[0]
coo = torch.cat([coo, padding], dim=0)
if self.mem_feat:
membership = torch.zeros(self.n_points, 1, dtype=coo.dtype)
membership[:n].fill_(1.)
coo = torch.cat([coo, membership], dim=-1)
if self.n_points != coo.shape[0]:
print(coo.shape, n, padding_len, nonpadding_len)
return coo
def __getitem__(self, item):
# angles = torch.empty(self.n_points).uniform_(0., 2 * np.pi)
n = random.choice(self.n_poly)
coo = self._get_n_polygon(n)
return n, coo
def __len__(self):
return self.length
| true
|
35b261afadd3c90e030a814fd0c274b7849ce29e
|
Python
|
ninjaihero/GeoPix
|
/GP/Assets/code/extensions/SaveLoad.py
|
UTF-8
| 6,787
| 2.640625
| 3
|
[
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
"""
SAVE LOAD is an extension for making a component or it's sub components saveable.
"""
import SaveLoadGlobal
class SaveLoad:
"""
SaveLoad description
"""
def __init__(self, ownerComp):
# The component to which this extension is attached
self.ownerComp = ownerComp
def SaveLoad_GET( self ,
root_op ,
include_self ,
find_children_expression ,
sub_operators ,
extra_page_names ,
extra_parameter_attributes ,
ignore_defaults ,
find_children_expression_secondary ,
panel_values ):
'''
returns a dictionary of save data from the saveable items/parameters.
effectively this is the SAVE function.
root_op = is the root operator this function and all sub functions uses as an origin for this iteration of saving.
include_self = if True, will save parameters and attributes etc of the root, and not just the children.
find_children_expression = this should be a findChildren() funcion, that evaluates properly relative to the root_op. the returned children, are saved. ie children of geoHOLDER.
sub_operators = this should be a comma separated list of sub components to manually save data about, as well. ie. pix, hull, etc.
extra_page_names = specify the other page names of params you wish to save, other than the default uppercase ones.
find_children_expression_secondary = this is the search expression that searches relative to the children found from find_children_expression, ie. for Macro's
'''
root_op = op( root_op )
include_self = eval( include_self )
ignore_defaults = eval( ignore_defaults )
try:
root_objects = eval( find_children_expression ) if find_children_expression != '' else []
except:
root_objects = []
debug('could not eval the expression:', find_children_expression)
sub_operators = sub_operators.split(',')
extra_page_names = [ each for each in extra_page_names.split(',') if each != '' ]
extra_parameter_attributes = [ each for each in extra_parameter_attributes.split(',') if each != '' ]
panel_value_names = panel_values.split(',')
save_data = {}
# if we have include_self flag True, we want to include the top level operator.
if include_self == True:
root_objects.append( root_op )
for obj in root_objects:
obj_data = {}
#### save operator level attributes.
obj_data = SaveLoadGlobal.SaveLoad_get_clone_op_attribute( obj_data, obj )
obj_data = SaveLoadGlobal.SaveLoad_get_general_op_data( obj_data, obj )
obj_data = SaveLoadGlobal.SaveLoad_get_panel_data( obj_data, panel_value_names, obj )
#### save operator and sub operator level node storage.
obj_data = SaveLoadGlobal.SaveLoad_get_op_node_storage( obj_data, obj, sub_operators )
#### save operator hierarchy parent.
obj_data = SaveLoadGlobal.SaveLoad_get_comp_hierearchy_inputs( obj_data, obj )
obj_data = SaveLoadGlobal.SaveLoad_get_comp_node_inputs( obj_data, obj )
#### save custom parameters
pageNames = SaveLoadGlobal.SaveLoad_get_uppercase_custom_parameter_pages( obj )
pageNames += extra_page_names # add some others we have in scene objects.
pageNames = list(set(pageNames))
parAttrs = SaveLoadGlobal.SaveLoad_get_typical_parameter_attributes_to_save()
parAttrs += extra_parameter_attributes
parAttrs = list(set(parAttrs))
# ignoreDefault = False # setting to true, will not save params already set to default value.
obj_data = SaveLoadGlobal.SaveLoad_get_parameter_data( obj_data, obj, pageNames, parAttrs, ignore_defaults )
# store the primary save data.
save_data[obj.path] = obj_data
# return
for root_object in root_objects:
try:
secondaryEvalExpr = "op('%s')%s"%( root_object.path,find_children_expression_secondary)
secondaryResults = eval(secondaryEvalExpr) if find_children_expression_secondary != '' else []
except:
debug('could not eval the expression:', find_children_expression_secondary)
secondaryResults = []
all_secondary_obj_data = {}
for each in secondaryResults:
secondary_obj_data = {}
# print(each)
#### save operator level attributes.
secondary_obj_data = SaveLoadGlobal.SaveLoad_get_clone_op_attribute( secondary_obj_data, each )
secondary_obj_data = SaveLoadGlobal.SaveLoad_get_general_op_data( secondary_obj_data, each )
secondary_obj_data = SaveLoadGlobal.SaveLoad_get_panel_data( secondary_obj_data, panel_value_names, each )
#### save operator and sub operator level node storage.
secondary_obj_data = SaveLoadGlobal.SaveLoad_get_op_node_storage( secondary_obj_data, each, sub_operators )
#### save operator hierarchy parent.
secondary_obj_data = SaveLoadGlobal.SaveLoad_get_comp_hierearchy_inputs( secondary_obj_data, each )
secondary_obj_data = SaveLoadGlobal.SaveLoad_get_comp_node_inputs( secondary_obj_data, each )
#### save custom parameters
pageNames = SaveLoadGlobal.SaveLoad_get_uppercase_custom_parameter_pages( each )
pageNames += extra_page_names # add some others we have in scene objects.
parAttrs = SaveLoadGlobal.SaveLoad_get_typical_parameter_attributes_to_save()
ignoreDefault = False
secondary_obj_data = SaveLoadGlobal.SaveLoad_get_parameter_data( secondary_obj_data, each, pageNames, parAttrs, ignoreDefault )
all_secondary_obj_data[each.path] = secondary_obj_data
# print('---',root_object)
save_data[root_object]['__secondarylayer__'] = all_secondary_obj_data
return save_data
def SaveLoad_SET(self, loadDict, exact_match, isImport):
'''
attempts to recreate or set the operators up..
effectively this is the LOAD function.
'''
# print( list(loadDict.keys()) )
# get the root path. we'll use this to filter out save data from other parts of the network.
rootPath = self.ownerComp.path
if exact_match == False:
# filter out save data from other parts of the network.
loadDict = { k:v for k,v in loadDict.items() if k.startswith(rootPath) }
elif exact_match == True:
# filter out save data from other parts of the network, stricter
loadDict = { k:v for k,v in loadDict.items() if k == rootPath }
elif exact_match == None:
# if the exact match cell is None, we do no filtering at all good sir.
pass
# create or set the initial operaetors up, returning a translation dict for any name conflicts.
translationDict = SaveLoadGlobal.SaveLoad_create_or_set_operators( rootPath , loadDict , isImport=isImport )
# if isImport == True:
# SaveLoadGlobal.SaveLoad_uniquify_names_on_operators( translationDict )
# parent objects to their intended parents.
SaveLoadGlobal.SaveLoad_set_parent_operators( rootPath , loadDict , translationDict )
# wire operators to their inputs.
SaveLoadGlobal.SaveLoad_set_input_operators( rootPath , loadDict , translationDict )
return
| true
|
f60ee09e83d4c6943db7e63f3e7db5a01d452174
|
Python
|
cxu60-zz/LeetCodeInPython
|
/longest_common_prefix.py
|
UTF-8
| 1,415
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# encoding: utf-8
"""
longest_common_prefix.py
Created by Shengwei on 2014-07-15.
"""
# https://oj.leetcode.com/problems/longest-common-prefix/
# tags: easy / medium, array, string, pointer, logest, D&C, edge cases
"""
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution:
# @return a string
def longestCommonPrefix(self, strs):
if len(strs) == 0:
return ''
if len(strs) == 1:
return strs[0]
for index in xrange(len(strs[0])):
char = strs[0][index]
for s in strs:
if index >= len(s) or char != s[index]:
return strs[0][:index]
return strs[0]
############### D&C ###############
class Solution:
# @return a string
def longestCommonPrefix(self, array):
if array is None or len(array) == 0:
return ''
if len(array) == 1:
return array[0]
half = len(array) / 2
longest_left = self.longestCommonPrefix(array[:half])
longest_right = self.longestCommonPrefix(array[half:])
min_length = min(len(longest_left), len(longest_right))
for index in xrange(min_length):
if longest_left[index] != longest_right[index]:
return longest_left[:index]
return longest_left[:min_length]
| true
|
2cde7e1e6f52c53d37d4329de6b184439548aaed
|
Python
|
mvanveen/musicDB
|
/encounter.py
|
UTF-8
| 2,055
| 3.296875
| 3
|
[] |
no_license
|
# Encounter.py
# ==============================================================================
# Michael Van Veen
# 03/08/10
# ==============================================================================
# Checks to see if a file exists in DB.
# ==============================================================================
import sys
import os
import json
import hashlib
encounterDir = os.environ['HOME'] + "/.encounter"
encounterFile = "duplicateHashes"
class Encounter():
def __init__(self):
# Path to duplicate hash file, determined via global vars above
self.__filePath = "" if (encounterDir[-1] == "/") else "/"
self.__filePath = self.__filePath.join((encounterDir,
encounterFile))
self.__duplicateHashes = self.__loadHashes()
if (self.__duplicateHashes == {}):
self.__writeHashes()
def check(self, fileName):
return(self.__checkEntity(fileName))
def __writeHashes(self):
fileObj = open(self.__filePath, "w")
fileObj.write(json.dumps(self.__duplicateHashes))
fileObj.close()
def __loadHashes(self):
if(not (os.access(encounterDir, os.F_OK))):
os.mkdir(encounterDir)
print("Created directory " + encounterDir)
if(not (os.access(self.__filePath, os.F_OK))):
print("Created hash dictionary " + self.__filePath)
return({})
return(json.loads(open(self.__filePath).read()))
def __checkEntity(self, fileName):
if (not os.access(fileName, os.F_OK)):
print("Error: No such file found")
return(False)
hashObj = hashlib.sha1()
hashObj.update(open(fileName).read())
fileHash = hashObj.hexdigest()
# See if the file has been encountered
if (self.__duplicateHashes.has_key(fileHash)):
return(True)
# File has not been encountered. Add it to dictionary.
self.__duplicateHashes[fileHash] = True
return(False)
def close(self):
self.__writeHashes()
if (__name__ == "__main__"):
encounter = Encounter()
if (encounter.check(sys.argv[1])):
print("File has already been encountered")
sys.exit()
print("Adding file....")
encounter.close()
| true
|
0696a1eb1b199494d64f5336db4eb68bf2cfa217
|
Python
|
mrtonks/PythonCourses
|
/Python Programming/Chapter 6/Exercise2.py
|
UTF-8
| 170
| 4.03125
| 4
|
[] |
no_license
|
#Sample for loop program
#First create a list
this_list = [45, 14, 65, 42, 34]
#Create the for loop
for content in this_list:
print content
print "It's the end."
| true
|
5819198dd49c436c6c0bb999ebf234b9fb2f48f7
|
Python
|
hawksong/pythontest
|
/pylearn/src/test/qing.py
|
UTF-8
| 233
| 3.3125
| 3
|
[] |
no_license
|
'''
Created on 2017年12月8日
@author: user
'''
def printinfo( arg1, *vartuple ):
"This prints a variable passed arguments"
print ("Output is: ")
print (arg1)
for var in vartuple:
print (var)
return
| true
|
b01d1a24c338a720426a7ef8678d32f0d0435b76
|
Python
|
harshit-ladia/Excel-to-SQL
|
/myfile.py
|
UTF-8
| 349
| 2.859375
| 3
|
[] |
no_license
|
import pandas as pd
my_data = pd.read_csv(r"file.csv")
table = input()
query=[]
# my file had id as a column
for j in range(my_data['Id'].count()):
query.append("Insert into " + table + " values({},{});".format(my_data.iloc[j,0],my_data.iloc[j,1]))
with open("insert_query.sql","a") as sql:
for i in query:
sql.write(i+"\n")
| true
|
d4d721f5c3788b68f1d027480612572bcf999352
|
Python
|
DimaAnsel/GraphicsFinal-3DRenderer
|
/src/model_creator.py
|
UTF-8
| 29,476
| 2.84375
| 3
|
[] |
no_license
|
################################
# model_creator.py
# Noah Ansel
# nba38
# 2016-11-17
# ------------------------------
# Generates models of different resolutions
# for use in main rendering program.
################################
# import validation
fail = False
try:
from numpy import *
except Exception:
print("ERROR: Could not import 'numpy' module.")
fail = True
try:
from math import *
except Exception:
print("ERROR: Could not import 'math' module.")
fail = True
try:
from copy import copy
except Exception:
print("ERROR: Could not import 'copy' module.")
fail = True
try:
from time import clock
except Exception:
print("ERROR: Could not import 'time' module.")
fail = True
try:
import warnings
except Exception:
print("ERROR: Could not import 'warnings' module.")
fail = True
if fail:
input("Press ENTER to close this window.")
exit()
# ignore unnecessary warnings
warnings.simplefilter(action = 'ignore', category = FutureWarning)
# If set to true, models will use minimal number of points. This slows down
# model generation exponentially.
USE_MINIMAL_POINTS = False
# output file (if run standalone)
OUTPUT_FILE = "models.txt"
# Flags used for cube_intersect() preliminary intersection tests.
POS_X = 0x1
POS_Y = 0x2
POS_Z = 0x4
NEG_X = 0x8
NEG_Y = 0x10
NEG_Z = 0x20
IN_X = 0x40
IN_Y = 0x80
IN_Z = 0x100
EXT = POS_X | POS_Y | POS_Z | NEG_X | NEG_Y | NEG_Z
########
# Determines if the line between 2 points intersects a cube (centered at origin).
# Loosely based on Cohen–Sutherland clipping algorithm discussed in lecture.
# Params:
# p1, p2 : Endpoints of line to test for intersection.
# Returns: True if detects intersection, False otherwise
def cube_intersect(size, p1, p2):
minBound = -size / 2
maxBound = size / 2
p1sides = POS_X if (p1.x >= maxBound) else (NEG_X if (p1.x <= minBound) else IN_X)
p1sides |= POS_Y if (p1.y >= maxBound) else (NEG_Y if (p1.y <= minBound) else IN_Y)
p1sides |= POS_Z if (p1.z >= maxBound) else (NEG_Z if (p1.z <= minBound) else IN_Z)
p2sides = POS_X if (p2.x >= maxBound) else (NEG_X if (p2.x <= minBound) else IN_X)
p2sides |= POS_Y if (p2.y >= maxBound) else (NEG_Y if (p2.y <= minBound) else IN_Y)
p2sides |= POS_Z if (p2.z >= maxBound) else (NEG_Z if (p2.z <= minBound) else IN_Z)
comb = p1sides & p2sides
if comb & EXT or p1sides == p2sides: # both on same side
return False
elif ((comb & IN_X and comb & IN_Y) or
(comb & IN_Y and comb & IN_Z) or
(comb & IN_Z and comb & IN_X)): # on opposite sides
return True
else: # check each face to see if it intersects
lVec = Point(matrix = p2.mat() - p1.mat())
maxD = lVec.mag()
lVec.normalize() # make unit vect
if lVec.x != 0:
dBot = (minBound - p1.x) / lVec.x
dTop = (maxBound - p1.x) / lVec.x
if 0 < dBot and dBot < maxD:
res = Point(matrix = p1.mat() + dBot * lVec.mat())
if (minBound < res.y and res.y < maxBound and
minBound < res.z and res.z < maxBound):
return True
if 0 < dTop and dTop < maxD:
res = Point(matrix = p1.mat() + dTop * lVec.mat())
if (minBound < res.y and res.y < maxBound and
minBound < res.z and res.z < maxBound):
return True
if lVec.y != 0:
dBot = (minBound - p1.y) / lVec.y
dTop = (maxBound - p1.y) / lVec.y
if 0 < dBot and dBot < maxD:
res = Point(matrix = p1.mat() + dBot * lVec.mat())
if (minBound < res.x and res.x < maxBound and
minBound < res.z and res.z < maxBound):
return True
if 0 < dTop and dTop < maxD:
res = Point(matrix = p1.mat() + dTop * lVec.mat())
if (minBound < res.x and res.x < maxBound and
minBound < res.z and res.z < maxBound):
return True
if lVec.z != 0:
dBot = (minBound - p1.z) / lVec.z
dTop = (maxBound - p1.z) / lVec.z
if 0 < dBot and dBot < maxD:
res = Point(matrix = p1.mat() + dBot * lVec.mat())
if (minBound < res.x and res.x < maxBound and
minBound < res.y and res.y < maxBound):
return True
if 0 < dTop and dTop < maxD:
res = Point(matrix = p1.mat() + dTop * lVec.mat())
if (minBound < res.x and res.x < maxBound and
minBound < res.y and res.y < maxBound):
return True
return False
########
# Determines if the line between 2 points intersects a sphere (centered at origin).
# Formula from: https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
# Params:
# p1, p2 : Endpoints of line to test for intersection.
# Returns: True if detects intersection, False otherwise
def sphere_intersect(size, p1, p2):
lVec = Point(matrix = p2.mat() - p1.mat())
maxD = lVec.mag()
lVec.normalize() # make unit vector
res = (lVec.dot(p1))**2 - (p1.x * p1.x + p1.y * p1.y + p1.z * p1.z) + size ** 2
if res < 0:
return False
else:
d = -lVec.dot(p1) - sqrt(res)
if 0 < d and d < maxD: # check that inside line segment
return True
else:
return False
################
# Point: Container class for a single 3-D point.
# Members:
# x, y, z : Coordinates of point in 3-space.
# phi : Declination from positive z-axis. None if not provided on init
# theta : Clockwise rotation from positive x-axis. None if not provided on init
# radius : Distance from origin. None if not provided on init.
class Point:
C1 = 0.4
C2 = 0.3
C3 = 0.3
########
# Initializes the point. Precedence of parameters is: point, matrix,
# spherical coords, Cartesian coords.
# Params:
# point : Point to generate copy of. Supercedes other parameters.
# x, y, z : Coordinates of point in 3-space.
# phi : Declination from positive z-axis. Supercedes x,y,z.
# theta : Clockwise rotation from positive x-axis. Supercedes x,y,z.
# radius : Distance from origin. Supercedes x,y,z.
# matrix : Matrix to set point from. Supersedes spherical and Cartesian coordinates.
def __init__(self,
point = None,
x = 0,
y = 0,
z = 0,
theta = None,
phi = None,
radius = None,
matrix = None):
if point != None:
self.x = point.x
self.y = point.y
self.z = point.z
self.phi = point.phi
self.theta = point.phi
self.radius = point.radius
return
elif matrix != None:
self.x = matrix.item((0,0))
self.y = matrix.item((1,0))
self.z = matrix.item((2,0))
self.phi = None
self.theta = None
self.radius = None
elif theta != None and phi != None and radius != None:
self.x = radius * sin(phi) * cos(theta)
self.y = radius * sin(phi) * sin(theta)
self.z = radius * cos(phi)
else:
self.x = x
self.y = y
self.z = z
# always save for reference
self.phi = phi
self.theta = theta
self.radius = radius
########
# Generates a string representation of the point.
# Coordinates are comma-separated 9-decimal-precision floats.
# Returns: String representation of the point.
def __str__(self):
return "{:.9f},{:.9f},{:.9f}".format(self.x, self.y, self.z)
########
# Determines if two points are identical.
# Params:
# other : Point to compare with
# Returns: True if identical, False otherwise
def __eq__(self, other):
if other == None:
return False
elif isinstance(other, Point):
return (self.x == other.x and self.y == other.y and self.z == other.z)
else:
raise TypeError("Can only compare of type Point.")
########
# Returns a vector matrix representation for translations.
def mat(self):
return mat([[self.x], [self.y], [self.z], [1]])
########
# Computes the attenuation factor of this point for shading.
def att(self):
return min(1 / (Point.C1 + Point.C2 * self.mag() + Point.C3 * (self.x * self.x + self.y * self.y + self.z * self.z)),
1)
########
# Computes the magnitude of this vector.
def mag(self):
return sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
########
# Computes the spherical coordinates of this point.
# Returns: θ, ϕ, and radius of point in spherical coordinates
def to_spherical(self):
r = self.mag()
phi = acos(self.z/r)
theta = atan2(self.y, self.x)
return (theta, phi, r)
########
# Converts the given point into a unit vector.
def normalize(self):
mag = self.mag()
self.x /= mag
self.y /= mag
self.z /= mag
########
# Sets a point from a vector matrix.
# Params:
# matrix : Numpy matrix to set given point from.
def set(self, matrix = None):
if matrix != None:
self.x = matrix.item((0,0))
self.y = matrix.item((1,0))
self.z = matrix.item((2,0))
########
# Computes the dot product of two points.
# Params:
# other : Point to compute dot product with.
# Returns: Computed dot product of two points.
def dot(self, other):
return self.x * other.x + self.y * other.y + self.z * other.z
# Point
################
################
# Model: Class containing all information about a single 3-D model.
# Members:
# name : A string name for the model
# points : List containing all points in model
# norms : List containing all surface normals in model
# tris : List containing all triangles in model
# offset : Offset of this model from the origin, stored as a Point.
# rotation : Rotation of this model, stored as a Point.
# scale : Scaling of this model, stored as a Point.
# color : 3-tuple of RGB format representing model's color
# specular : Specular coefficient. Higher means specular lighting is brighter.
# diffuse : Diffuse coefficient. Higher means diffuse lighting is brighter.
# _size : Size parameter used when calling intersectFcn
# _intersectFcn : Function to be used when determining if given line segment
# intersects with this model. See intersects().
class Model:
DEFAULT_NAME = "unnamedModel"
DEFAULT_COLOR = (1.0, 1.0, 1.0)
DEFAULT_SPECULAR = 0.6
DEFAULT_DIFFUSE = 0.5
################
# Triangle: Interior class to Model that must refer to its parent.
# Should not be instantiated outside of Model's methods.
# Members:
# _parent : Reference to model used for points and norms lists
# p1, p2, p3 : Indexes to parent's points list
# norm : Index to parent's norms list
# color : 3-tuple of RGB format representing triangle's color
# If None or not provided, uses parent's color
class Triangle:
########
# Generates a new triangle.
# Params:
# parent : Reference to model used for points and norms lists
# p1, p2, p3 : Indexes to parent's points list
# norm : Index to parent's norms list
# color : 3-tuple of RGB format representing triangle's color
# If None or not provided, uses parent's color
# triangle : Triangle to copy information from.
def __init__(self, parent, p1 = None, p2 = None, p3 = None, norm = None, color = None, triangle = None):
self._parent = parent
if triangle != None:
self.p1 = triangle.p1
self.p2 = triangle.p2
self.p3 = triangle.p3
self.norm = triangle.norm
self.color = triangle.color
else:
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.norm = norm
if color != None:
self.color = color
else:
self.color = parent.color
########
# Generates a string representation of this triangle.
# Points are separated by spaces, with the last point being the normal.
# Returns: String representation of the triangle
def __str__(self):
return "{} {} {} {}".format(str(self._parent.points[self.p1]),
str(self._parent.points[self.p2]),
str(self._parent.points[self.p3]),
str(self._parent.norms[self.norm]))
# Triangle
################
########
# Generates a model with the given name and parameters.
# Params:
# name : A string name for this model
# color : A 3-tuple of RGB format representing model's color
# points : List of points to associate with this model.
# norms : List of normals to associate with this model.
# tris : List of triangles to associate with this model.
# offset : Offset of this model from the origin, stored as a Point.
# rotation : Rotation of this model, stored as a Point.
# scale : Scaling of this model, stored as a Point.
# specular : Specular coefficient. Higher means specular lighting is brighter.
# diffuse : Diffuse coefficient. Higher means diffuse lighting is brighter.
# size : Size parameter used when calling intersectFcn
# intersectFcn : Function to be used when determining if given line segment
# intersects with this model. See intersects().
def __init__(self,
name = DEFAULT_NAME,
color = None,
points = None,
norms = None,
tris = None,
offset = None,
rotation = None,
scale = None,
specular = DEFAULT_SPECULAR,
diffuse = DEFAULT_DIFFUSE,
size = 1,
intersectFcn = cube_intersect):
self.name = name
self.points = []
if points != None:
for p in points:
self.points.append(Point(p))
self.norms = []
if norms != None:
for n in norms:
self.norms.append(Point(n))
self.tris = []
if tris != None:
for t in tris:
self.tris.append(Model.Triangle(self, triangle = t))
if offset != None:
self.offset = Point(offset)
else:
self.offset = Point(x = 0, y = 0, z = 0)
if rotation != None:
self.rotation = Point(rotation)
else:
self.rotation = Point(phi = 0, theta = 0, radius = 1)
if scale != None:
self.scale = Point(rotation)
else:
self.scale = Point(x = 1, y = 1, z = 1)
if color != None:
self.color = color
else:
self.color = copy(Model.DEFAULT_COLOR)
self.specular = specular
self.diffuse = diffuse
self._size = size
self._intersectFcn = intersectFcn
########
# Generates a multi-line string representation of the model.
# Each line represents a triangle in the model.
# Returns: String representation of the model
def __str__(self):
retStr = "{} {} ({},{},{})\n".format(self.name,
len(self.tris),
self.color[0],
self.color[1],
self.color[2])
for tri in self.tris:
retStr += str(tri) + "\n"
return retStr
########
# Creates a new normal and appends it to the model's list. If normal already exists
# in list and USE_MINIMAL_POINTS set to True, does not create new normal.
# Params:
# x, y, z : Cartesian coordinates of the point.
# phi : Declination from positive z-axis.
# theta : Rotation clockwise from positive x-axis.
# radius : Distance from the origin.
# Returns: Index of the normal
def add_norm(self, x = None, y = None, z = None, phi = None, theta = None, radius = None):
if x != None and y != None and z != None:
if USE_MINIMAL_POINTS:
for i in range(len(self.norms)): # search for already existing point
if x == self.norms[i].x and y == self.norms[i].y and z == self.norms[i].z:
return i
p = Point(x = x, y = y, z = z)
self.norms.append(p)
return len(self.norms) - 1
elif phi != None and theta != None and radius != None:
if USE_MINIMAL_POINTS:
for i in range(len(self.norms)):
if phi == self.norms[i].phi and theta == self.norms[i].theta and radius == self.norms[i].radius:
return i
p = Point(phi = phi, theta = theta, radius = radius)
self.norms.append(p)
return len(self.norms) - 1
raise ValueError("Unrecognized params: x={} y={} z = {} phi={} theta={} radius={}".format(x,y,z,phi,theta,radius))
########
# Creates a new point and appends it to the model's list. If point already exists
# in list and USE_MINIMAL_POINTS set to True, does not create new point.
# Params:
# x, y, z : Cartesian coordinates of the point.
# phi : Declination from positive z-axis.
# theta : Rotation clockwise from positive x-axis.
# radius : Distance from the origin.
# Returns: Index of the point
def add_point(self, x = None, y = None, z = None, phi = None, theta = None, radius = None):
if x != None and y != None and z != None:
if USE_MINIMAL_POINTS:
for i in range(len(self.points)): # search for already existing point
if x == self.points[i].x and y == self.points[i].y and z == self.points[i].z:
return i
p = Point(x = x, y = y, z = z)
self.points.append(p)
return len(self.points) - 1
elif phi != None and theta != None and radius != None:
if USE_MINIMAL_POINTS:
for i in range(len(self.points)):
if phi == self.points[i].phi and theta == self.points[i].theta and radius == self.points[i].radius:
return i
p = Point(phi = phi, theta = theta, radius = radius)
self.points.append(p)
return len(self.points) - 1
raise ValueError("Unrecognized params: x={} y={} z = {} phi={} theta={} radius={}".format(x,y,z,phi,theta,radius))
########
# Creates a new triangle and appends it to the model's list.
# Params:
# p1, p2, p3 : Indexes of triangle corners in the model's points array.
# norm : Index of triangle normal in the model's norms array.
# Returns: Index of the new triangle
def add_tri(self, p1, p2, p3, norm):
tri = Model.Triangle(parent = self, p1 = p1, p2 = p2, p3 = p3, norm = norm)
self.tris.append(tri)
return len(self.tris) - 1
########
# Divides all triangles in model into 4 smaller triangles. Used to generate
# higher resolution cube.
def subdivide_triangles(self):
for i in range(len(self.tris)-1, -1, -1): # from back to not interfere when inserting
tri = self.tris[i]
midX = (self.points[tri.p1].x + self.points[tri.p2].x) / 2
midY = (self.points[tri.p1].y + self.points[tri.p2].y) / 2
midZ = (self.points[tri.p1].z + self.points[tri.p2].z) / 2
mid12idx = self.add_point(x = midX, y = midY, z = midZ)
midX = (self.points[tri.p2].x + self.points[tri.p3].x) / 2
midY = (self.points[tri.p2].y + self.points[tri.p3].y) / 2
midZ = (self.points[tri.p2].z + self.points[tri.p3].z) / 2
mid23idx = self.add_point(x = midX, y = midY, z = midZ)
midX = (self.points[tri.p3].x + self.points[tri.p1].x) / 2
midY = (self.points[tri.p3].y + self.points[tri.p1].y) / 2
midZ = (self.points[tri.p3].z + self.points[tri.p1].z) / 2
mid31idx = self.add_point(x = midX, y = midY, z = midZ)
newTri = Model.Triangle(parent = self, p1 = mid23idx, p2 = mid31idx, p3 = mid12idx, norm = tri.norm)
self.tris.insert(i + 1, newTri)
newTri = Model.Triangle(parent = self, p1 = mid31idx, p2 = mid23idx, p3 = tri.p3, norm = tri.norm)
self.tris.insert(i + 1, newTri)
newTri = Model.Triangle(parent = self, p1 = mid12idx, p2 = tri.p2, p3 = mid23idx, norm = tri.norm)
self.tris.insert(i + 1, newTri)
# must occur after above
tri.p2 = mid12idx
tri.p3 = mid31idx
########
# Determines if the line segment intersects the given model using the
# intersect function provided on initialization.
# Params:
# p1, p2 : Line endpoints.
# Returns: True if segment intersects model, False otherwise
def intersects(self, p1, p2):
return self._intersectFcn(self._size, p1, p2)
# Model
################
################
# Light: Class containing all information about a point light.
# Members:
# loc : Location of light in the world space.
# color : Light color as a 3-tuple of RGB values between 0 and 1.
class Light:
DEFAULT_COLOR = (1.0, 1.0, 1.0)
########
# Initializes internal variables.
# Params:
# x, y, z : Location of light in the world space.
# color : Light color as a 3-tuple of RGB values between 0 and 1.
def __init__(self, x = 0, y = 0, z = 0, color = None):
self.loc = Point(x = x, y = y, z = z)
if color != None:
self.color = color
else:
self.color = copy(Light.DEFAULT_COLOR)
########
# Returns a vector matrix representation of location for translations.
def mat(self):
return self.loc.mat()
# Light
################
########
# Generates an approximation of a sphere with provided radius.
# Params:
# radius : Radius of sphere
# numLaterals : Number of lateral divisions
# numVerticals : Number of vertical (longitudinal) divisions
# color : Model color as a 3-tuple of RGB values between 0 and 1.
# Returns: Model object approximation of sphere
def generate_sphere(radius = 1, numLaterals = 4, numVerticals = 6, color = Model.DEFAULT_COLOR):
m = Model(color = color, size = radius * 2, intersectFcn = sphere_intersect)
thetaStep = 2 * pi / numVerticals # clockwise rot from x
thetaInc = thetaStep * 2 / numLaterals # each ring is shifted by 2/numLaterals rotations
phiStep = pi / numLaterals # declination from pos z
startTheta = 0
rightTheta = startTheta
# goes clockwise from x-axis
for i in range(numVerticals): # for each vertical strip
botPhi = 0
leftTheta = startTheta
rightTheta = startTheta + thetaStep
startTheta += thetaStep
botLeftIdx = m.add_point(phi = 0, theta = leftTheta, radius = radius)
botLeft = m.points[botLeftIdx]
botRightIdx = m.add_point(phi = 0, theta = rightTheta, radius = radius)
botRight = m.points[botRightIdx]
# goes down from z-axis
for j in range(numLaterals):
botPhi += phiStep
leftTheta += thetaInc
rightTheta += thetaInc
topLeft = botLeft # copy previous data
topLeftIdx = botLeftIdx
topRight = botRight
topRightIdx = botRightIdx
botLeftIdx = m.add_point(phi = botPhi, theta = leftTheta, radius = radius)
botLeft = m.points[botLeftIdx]
if j > 0: # not the top, so include inverted triangle
phi = (topLeft.phi + topRight.phi + botLeft.phi) / 3
theta = (topLeft.theta + topRight.theta + botLeft.theta) / 3
normIdx = m.add_norm(phi = phi, theta = theta, radius = radius)
norm = m.norms[normIdx]
m.add_tri(p1 = topLeftIdx, p2 = topRightIdx, p3 = botLeftIdx, norm = normIdx)
if j < numLaterals - 1: # not the bottom, so include upright triangle
botRightIdx = m.add_point(phi = botPhi, theta = rightTheta, radius = radius)
botRight = m.points[botRightIdx]
phi = (topRight.phi + botLeft.phi + botRight.phi) / 3
theta = (topRight.theta + botLeft.theta + botRight.theta) / 3
normIdx = m.add_norm(phi = phi, theta = theta, radius = radius)
norm = m.norms[normIdx]
m.add_tri(p1 = topRightIdx, p2 = botLeftIdx, p3 = botRightIdx, norm = normIdx)
return m
########
# Generates a cube with provided side length.
# Params:
# size : Side length
# trisPerSide : Triangles to generate per side. Must be odd power of 2.
# color : Model color as a 3-tuple of RGB values between 0 and 1.
# Returns: Model object of cube
def generate_cube(size = 1, trisPerSide = 2, color = Model.DEFAULT_COLOR):
m = Model(color = color, size = 1)
halfSize = size / 2
# number of times to recursively subdivide triangles
if trisPerSide % 2 != 0 or int(log2(trisPerSide)) % 2 == 0: # input validation
raise ValueError("Invalid number of triangles. Must be odd power of 2.")
numDivisions = (int(log2(trisPerSide)) - 1) // 2
# generate pos x-y and neg x-y separately
for mul in (-1, 1):
# upper A tris
p1idx = m.add_point(x = mul * halfSize, y = -mul * halfSize, z = halfSize)
p1 = m.points[p1idx]
p2idx = m.add_point(x = mul * halfSize, y = -mul * halfSize, z = -halfSize)
p2 = m.points[p2idx]
p3idx = m.add_point(x = mul * halfSize, y = mul * halfSize, z = halfSize)
p3 = m.points[p3idx]
nIdx = m.add_norm(x = mul, y = 0, z = 0)
n = m.norms[nIdx]
m.add_tri(p1 = p1idx, p2 = p2idx, p3 = p3idx, norm = nIdx)
# lower A tris
p1idx = m.add_point(x = mul * halfSize, y = mul * halfSize, z = -halfSize)
p1 = m.points[p1idx]
m.add_tri(p1 = p1idx, p2 = p3idx, p3 = p2idx, norm = nIdx)
# lower B tris
p2idx = m.add_point(x = -mul * halfSize, y = mul * halfSize, z = -halfSize)
p2 = m.points[p2idx]
nIdx = m.add_norm(x = 0, y = mul, z = 0)
n = m.norms[nIdx]
m.add_tri(p1 = p1idx, p2 = p2idx, p3 = p3idx, norm = nIdx)
# upper B tris
p1idx = m.add_point(x = -mul * halfSize, y = mul * halfSize, z = halfSize)
p1 = m.points[p1idx]
m.add_tri(p1 = p1idx, p2 = p3idx, p3 = p2idx, norm = nIdx)
p2idx = m.add_point(x = mul * halfSize, y = -mul * halfSize, z = halfSize)
p2 = m.points[p2idx]
nIdx = m.add_norm(x = 0, y = 0, z = 1)
n = m.norms[nIdx]
m.add_tri(p1 = p3idx, p2 = p1idx, p3 = p2idx, norm = nIdx)
# bot tris
p1idx = m.add_point(x = mul * halfSize, y = -mul * halfSize, z = -halfSize)
p1 = m.points[p1idx]
p2idx = m.add_point(x = -mul * halfSize, y = -mul * halfSize, z = -halfSize)
p2 = m.points[p2idx]
p3idx = m.add_point(x = mul * halfSize, y = mul * halfSize, z = -halfSize)
p3 = m.points[p3idx]
nIdx = m.add_norm(x = 0, y = 0, z = -1)
n = m.norms[nIdx]
m.add_tri(p1 = p1idx, p2 = p2idx, p3 = p3idx, norm = nIdx)
while numDivisions > 0:
m.subdivide_triangles()
numDivisions -= 1
return m
########
# Generates a torus with provided size.
# Params:
# outRadius : Outer radius
# inRadius : Inner radius
# numStripes : Number of segments per cross-section
# numDivisions : Number of divisions around center (number cross-sections)
def generate_torus(outRadius = 1, inRadius = 0.5, numStripes = 6, numDivisions = 6):
m = Model()
# TODO: generate torus
faceStep = 2 * pi / numDivisions # clockwise rot from posx (model view)
stripeStep = 2 * pi / numStripes # clockwise rot from posx (division view)
stripeInc = stripeStep * 2 / numDivisions # each stripe is shifted by 2/numDivisions rotations
torusRad = (outRadius - inRadius) / 2 # actual radius used for calculations
torusMid = (outRadius + inRadius) / 2 # center point of each cross-section
for i in range(numStripes):
for j in range(numDivisions):
pass
# NOTE: Currently, this is an empty model. Torus generation not supported yet.
return m
########
# Main code architecture if run standalone.
# Times generation of several models and saves models to file.
if __name__ == "__main__":
# generate various models
print("Generating models... ", end = "")
start = clock()
sphereLowModel = generate_sphere(radius = 1, numLaterals = 6, numVerticals = 9)
sphereLowModel.name = "sphereLow"
cubeLowModel = generate_cube(size = 1, trisPerSide = 2)
cubeLowModel.name = "cubeLow"
torusLowModel = generate_torus(outRadius = 1, inRadius = 0.5, numStripes = 6, numDivisions = 6)
torusLowModel.name = "torusLow"
sphereMedModel = generate_sphere(radius = 1, numLaterals = 8, numVerticals = 12)
sphereMedModel.name = "sphereMed"
cubeMedModel = generate_cube(size = 1, trisPerSide = 8)
cubeMedModel.name = "cubeMed"
torusMedModel = generate_torus(outRadius = 1, inRadius = 0.5, numStripes = 8, numDivisions = 9)
torusMedModel.name = "torusMed"
sphereHighModel = generate_sphere(radius = 1, numLaterals = 16, numVerticals = 24)
sphereHighModel.name = "sphereHigh"
cubeHighModel = generate_cube(size = 1, trisPerSide = 32)
cubeHighModel.name = "cubeHigh"
torusHighModel = generate_torus(outRadius = 1, inRadius = 0.5, numStripes = 10, numDivisions = 12)
torusHighModel.name = "torusHigh"
elapsed = clock() - start
print("Completed in {:.3f}s.".format(elapsed))
# validate file okay
try:
f = open(OUTPUT_FILE, 'w')
except Exception:
print("ERROR: Could not open output file.")
f = None
if f != None:
# write objects to file
print("Writing models to file... ", end = "")
start = clock()
f.write(str(sphereLowModel))
f.write(str(cubeLowModel))
f.write(str(torusLowModel))
f.write(str(sphereMedModel))
f.write(str(cubeMedModel))
f.write(str(torusMedModel))
f.write(str(sphereHighModel))
f.write(str(cubeHighModel))
f.write(str(torusHighModel))
f.close()
elapsed = clock() - start
print("Completed in {:.3f}s.".format(elapsed))
input("Press ENTER to close this window.")
| true
|
27f1b357d40888e1fd534944aad45048e59e4035
|
Python
|
crudelens/prog_fr_art
|
/day03/todo.py
|
UTF-8
| 1,234
| 4.09375
| 4
|
[] |
no_license
|
# Shopping list
shopping_list = []
# Menu
def menu_serve():
print('''
Choose an option:
1. Add Item
2. Remove Item
3. Show List
4. Quit
''')
# Add item
def add_item(item_name):
shopping_list.append(item_name)
print(f'{item_name} added to list\n {shopping_list}')
# remove item
def remove_item(item_name):
shopping_list.remove(item_name)
print(f'{item_name} removed from list\n {shopping_list}')
# see all items
def print_list():
print([i for i in shopping_list])
# quit program
def quit_prog():
print("Quiting Program")
if __name__ == "__main__":
while True:
menu_serve()
try:
input_frm_user = int(input("Enter choice: "))
if input_frm_user == 1:
item = input("Enter item name: ")
add_item(item)
elif input_frm_user == 2:
item = input("Enter item name: ")
remove_item(item)
elif input_frm_user == 3:
print_list()
elif input_frm_user == 4:
quit_prog()
break
else:
continue
except ValueError as e:
print("Bakwass Input")
| true
|
f20859d7c36966a687091744abc2a3c2b62114fe
|
Python
|
jkoser/euler
|
/work/p314.py
|
UTF-8
| 3,947
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
from fractions import Fraction
from math import sqrt
ratiomax = 0
bestpath = []
def step(r, pos, slope, path, area, perim):
global ratiomax
global bestpath
x, y = pos
if y == x - 1 or (slope == 1 and (x + y) % 2 == 1):
d = (x - y - 1) // 2
pos1 = x1, y1 = (x - d), (y + d)
perim1 = perim + d * sqrt(2)
da = d * (d + 1)
pos1 = x1, y1 = (x1 - 1), (y1 + 1)
path1 = path + [pos1]
perim1 += sqrt(2) / 2
da += Fraction(1, 4)
ratio = (area + da) / perim1
if ratio > ratiomax:
#print(path1, area + da, perim1)
#print((area + da) / perim1)
ratiomax = ratio
bestpath = path1
return
for dy in range(x - y, 0, -1):
if slope < 0:
dxmin = 0
else:
dxmin = int(slope * dy) + 1
for dx in range(dxmin, min(dy, x - y - dy) + 1):
da = dy * (Fraction(1, 2) * dy + (x - y - dy) - Fraction(1, 2) * dx)
x1, y1 = (x - dx, y + dy)
pos1 = x1, y1
path1 = path + [pos1]
perim1 = perim + sqrt(dx * dx + dy * dy)
if x1 == y1:
ratio = (area + da) / perim1
if ratio > ratiomax:
#print(path1, area + da, perim1)
#print((area + da) / perim1)
ratiomax = ratio
bestpath = path1
else:
slope1 = dx / dy
step(r, pos1, slope1, path1, area + da, perim1)
def show_path(r, path):
for y in range(r, -1, -1):
for x in range(0, r + 1):
if (x, y) in path or (y, x) in path:
print(' o', end='')
else:
print(' .', end='')
print()
def p314(r, showpath=True):
global ratiomax
global bestpath
pos = (r, 0)
path = [pos]
ratiomax = 0
step(r, pos, -1, path, 0, 0)
print(bestpath)
print(ratiomax)
if showpath:
show_path(r, bestpath)
def p314_dp(r, showpath=True):
# (area, pathlen, path) tuple matrix
m = [[]] * (r + 1)
for y in range(r, -1, -1):
m[y] = [None] * (r + 1)
m[y][y] = (Fraction(0, 1), 0, [(y, y)])
if y < r:
x0, y0 = y + Fraction(1, 2), y + Fraction(1, 2)
a = (y + 1) * y0 - Fraction(1, 8) - Fraction(y0 * x0, 2) - Fraction((y + 1) * y, 2)
m[y+1][y] = (a, sqrt(2) / 2, [(y, y+1), (y+1, y)])
for x in range(y + 2, r + 1):
best_ratio = 0
best_area = 0
best_perim = 0
best_path = []
for dy in range(x - y, 0, -1):
for dx in range(0, min(dy, x - y - dy) + 1):
x0, y0 = x - dx, y + dy
a0, p0, path0 = m[x0][y0]
a = a0 + x * y0 - Fraction(dx * dy, 2) - Fraction(y0 * x0, 2) - Fraction(x * y, 2)
p = p0 + sqrt(dx * dx + dy * dy)
ratio = a / p
if ratio > best_ratio:
best_ratio = ratio
best_area = a
best_perim = p
best_path = path0 + [(x, y)]
m[x][y] = (best_area, best_perim, best_path)
best_ratio = 0
best_path = 0
for x in range(1, r + 1):
a, p, path = m[x][0]
ratio = a / p
if ratio > best_ratio:
best_ratio = ratio
best_path = path
print(best_path)
print(best_ratio)
if showpath:
show_path(r, best_path)
for r in range(3, 50):
p314(r, False)
# show_path(250, [(214, 208), (220, 201), (226, 193), (230, 187), (234, 180), (238, 172), (242, 162), (244, 156), (246, 149), (247, 145), (248, 140), (249, 133), (250, 122)])
# p314(23, False)
#for r in range(6, 11):
# print('--- r = ' + str(r) + ' ---')
# p314(r, True)
# p314_dp(r, True)
| true
|
b4d5b99fcd12af9a88112f9e0b24b41183055701
|
Python
|
DavidLohrentz/LearnPy3HardWay
|
/puppies.py
|
UTF-8
| 171
| 3.171875
| 3
|
[] |
no_license
|
reps = int(input("\n\tHow many times to display puppy tracks? "))
print(f"\N{PAW PRINTS}" * reps)
# a = 0
# while a < reps:
# a += 1
# print(f"\N{PAW PRINTS}")
| true
|
f5f250694bb03fa18efd2a28fa0640ca136176cf
|
Python
|
vpozdnyakov/fca_lazy_clf
|
/setup.py
|
UTF-8
| 1,967
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
import setuptools
setuptools.setup(
name='fca_lazy_clf',
packages=['fca_lazy_clf'],
version='0.3',
license='MIT',
description='Lazy binary classifier based on Formal Concept Analysis',
long_description="""
### Installation
```sh
$ pip install fca_lazy_clf
```
### Requirements
The train and test datasets must be represented as ```pandas.DataFrame```. The classifier uses only attributes of types ```numpy.dtype('O')```, ```np.dtype('int64')``` and attributes with 2 any values. Other attributes will not be used. The target attribute must be binary.
### Example
```python
>>> import fca_lazy_clf as fca
>>> import pandas as pd
>>> from sklearn import model_selection
>>> data = pd.read_csv('https://datahub.io/machine-learning/tic-tac-toe-endgame/r/tic-tac-toe.csv')
>>> data.head()
TL TM TR ML MM MR BL BM BR class
0 x x x x o o x o o True
1 x x x x o o o x o True
2 x x x x o o o o x True
3 x x x x o o o b b True
4 x x x x o o b o b True
>>> X = data.iloc[:, :-1] # All attributes except the last one
>>> y = data.iloc[:, -1] # Last attribute
>>> X_train, X_test, y_train, y_test
= model_selection.train_test_split(X, y, test_size=0.33, random_state=0)
>>> clf = fca.LazyClassifier(threshold=0.000001, bias='false')
>>> clf.fit(X_train, y_train)
>>> clf.score(X_test, y_test)
0.9716088328075709
```
""",
long_description_content_type="text/markdown",
author='Vitaliy Pozdnyakov',
author_email='pozdnyakov.vitaliy@yandex.ru',
url='https://github.com/vpozdnyakov/fca_lazy_clf',
keywords=['fca', 'formal-concept-analysis', 'lazy-learning', 'binary-classification'],
install_requires=[
'pandas',
'numpy',
'sklearn',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
)
| true
|
70f3b5a7de972d6428a84012c41978696ddc2579
|
Python
|
Vinograd17/Python
|
/HW 5/hw5_task_1.py
|
UTF-8
| 230
| 3.75
| 4
|
[] |
no_license
|
# Task 1
with open('my_file.txt', 'w') as f_obj:
while True:
if input('To stop enter "q", any other letter to continue: ') == 'q':
break
line = input('Enter text: ')
print(line, file=f_obj)
| true
|
bff71f635c0913bcb0bf79f6f5c7c9a172bd8b8a
|
Python
|
sdytlm/sdytlm.github.io
|
/downloads/code/LeetCode/Python/Reconstruct-Itinerary.py
|
UTF-8
| 560
| 3.359375
| 3
|
[] |
no_license
|
class Solution(object):
def findItinerary(self, tickets):
"""
:type tickets: List[List[str]]
:rtype: List[str]
"""
# Create a dict with list in the value part
target = collections.defaultdict(list)
for i,j in sorted(tickets)[::-1]:
targets[i] += j,
ret = []
def visit(airport):
while targets[airport]:
visit(targets[airport].pop())
# find destination
ret.append(airport)
visit('JFK')
return ret[::-1]
| true
|
804531a04e9c8fa5ac38f51243885917ba825bb2
|
Python
|
NicoJG/Anfaengerpraktikum
|
/V355/frequenzentheorie.py
|
UTF-8
| 1,413
| 2.6875
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import json
from scipy.optimize import curve_fit
def vMinus_theorie(L,C,Ck):
return 1/(2 *np.pi * np.sqrt(L/((1/C)+2/(Ck))))
def vPlus_theorie(L,C):
return 1/(2*np.pi*np.sqrt(L*C))
# Konstanten im Experiment
L = 23.9540 * 10**(-3)
C = 0.7932 *10**(-9)
# Daten einlesen
Ck_gemessen,vPlus_gemessen,vMinus_gemessen = np.genfromtxt('frequenzen.csv',delimiter=',',unpack=True)
vPlus_gemessen = vPlus_gemessen*10**3 # Hz
vMinus_gemessen = vMinus_gemessen*10**3 # Hz
Ck_gemessen = Ck_gemessen*10**(-9) # F
Ck_linspace= np.linspace(0.5,12.5,100)
# Plot der Daten
plt.plot(Ck_gemessen*10**(9), vMinus_gemessen*10**(-3), 'rx', label='Messwerte')
#plt.plot(Ck_gemessen*10**(9), vPlus_gemessen*10**(-3), 'kx', label='Messwerte (v+)')
# Plot der Theoriekurve
plt.plot(Ck_linspace, vMinus_theorie(L,C,Ck_linspace*10**(-9))*10**(-3), 'b-', label='Theoriekurve')
#plt.plot(Ck_linspace, (Ck_linspace/Ck_linspace)*vPlus_theorie(L,C)*10**(-3), 'y-', label='Theoriekurve (v+)')
# Achsenbeschriftung
plt.xlabel(r'$C_\text{k} \:/\: \si{\nano\farad}$')
plt.ylabel(r'$\nu \:/\: \si{\kilo\hertz}$')
# in matplotlibrc leider (noch) nicht möglich
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.grid(True,which="both", linestyle='--')
# Speicherort
plt.savefig('build/plot_frequenzentheorie.pdf')
| true
|
557378b33172f2e48e27db48be5f3e27144ff4fa
|
Python
|
unclebae/python3-data-analysis
|
/ch02/convertArray.py
|
UTF-8
| 439
| 3.703125
| 4
|
[] |
no_license
|
# _*_ coding: utf-8 _*_
import numpy as num
a = num.array([ 1. + 1.j, 3. + 2.j])
print("num.array([ 1. + 1.j, 3. + 2.j]) : ", a)
# tolist()함수를 이용하여 배열을 리스트로 변환한다.
print("a.tolist() : ", a.tolist())
# 배열을 특정 타입의 배열로 변환한다.
print("a.astype(int) : ", a.astype(int))
# 배열을 복소수형 타입으로 변환한다.
print("a.astype('complex') : ", a.astype('complex'))
| true
|
a807f3f31808e7042d17b0a9611669a780b30b75
|
Python
|
FP1212/FP
|
/Test_QUEEN_ATTACK_Federico_Pinilla_Tarazona/Test_QueenAttack.py
|
UTF-8
| 2,500
| 2.8125
| 3
|
[] |
no_license
|
'''
Created on 17/10/2019
@author: FP
'''
import array
n=0;
k=0;
rq=0;
cq=0;
ro=0;
co=0;
p=255;
limite_inferior=0;
limite_superior=100000;
direccion=8;
cuadros_restantes=0;
import re
parametros_in=open("prueba.txt","r");
acumulador=0;
for lineas in parametros_in.readlines():
temp = re.findall(r'\d+', lineas);
res = list(map(int, temp));
if acumulador==0:
n=res[0];
k=res[1];
tablero=[[0 for x in range(n)]for y in range(n)];
obstaculos=[[0 for x in range(2)]for y in range(k)];
elif acumulador==1:
rq=res[0]-1;
cq=res[1]-1;
tablero[rq][cq]=1;
else:
obstaculos.append(res);
acumulador=acumulador+1;
parametros_in.close();
def Adir(rq,cq,i):
return rq-i,cq
def Bdir (rq,cq,i):
return rq,cq-i
def Cdir (rq,cq,i):
return rq-i,cq-i
def Ddir (rq,cq,i):
return rq+i,cq
def Edir (rq,cq,i):
return rq,cq+i
def Fdir (rq,cq,i):
return rq+i,cq+i
def Gdir (rq,cq,i):
return rq-i,cq+i
def Hdir (rq,cq,i):
return rq+i,cq-i
direcciones={0:Adir,1:Bdir,2:Cdir,3:Ddir,4:Edir,5:Fdir,6:Gdir,7:Hdir};
detectados_direccion=array.array('B', [True] * direccion);
if n<limite_inferior:n=0;
elif n>=limite_superior:n=limite_superior;
if k<=limite_inferior:k=limite_inferior;
elif k>=limite_superior:k=limite_superior;
for i in obstaculos:
tablero[i[0]-1][i[1]-1]=3;
for i in range(n):
p=int(''.join(map(str, map(int, detectados_direccion))), 2)
if p==0: break;
for j in range (direccion):
try:
resultado=direcciones[j](int(rq),int(cq),int(i+1));
bandera=detectados_direccion[j];
q=resultado[0];
c=resultado[1];
if tablero[q][c] !=3 and tablero[q][c] !=1 and (not(q>=n or c>=n or q<0 or c<0)) and bandera==True:
tablero[q][c]=2;
cuadros_restantes=cuadros_restantes+1;
else:
detectados_direccion[j]=False;
except:
detectados_direccion[j]=False;
print("Se Encontraron ",cuadros_restantes,"Cuadros Por Los Cuales Se Puede Desplazar La Reina");
| true
|
aad1dba2d235dbcd830545d6e9e3f883a5b59bf0
|
Python
|
jaabee/daily_code
|
/Django_Code/mysite/blog/forms.py
|
UTF-8
| 962
| 2.5625
| 3
|
[] |
no_license
|
# @Time : 2020/11/17 21:38
# @Author : GodWei
# @File : forms.py
from django import forms
from .models import Comment
# Django表单,通过继承Form基类创建了一个表单
class EmailPostForm(forms.Form):
# 该字段类型显示为<input type='text'>HTML元素
name = forms.CharField(max_length=25)
email = forms.EmailField()
to = forms.EmailField()
class CommentForm(forms.ModelForm):
# 1.required=False 将comments设置为可选项等
# 2.默认的微件可通过widget被覆写,如下使用了Textarea微件,
# 将其显示为<textarea>HTML元素,而非默认状态下的<input>元素
comments = forms.CharField(required=False, widget=forms.Textarea)
class Meta:
# 指定一个模型创建Meta类中的表单。
model = Comment
# 通知当前框架希望在表单中包含哪些字段;也可采用exclude列表定义希望排除的字段
fields = ('comments',)
| true
|
f47adc889daa101d6697768e037a7c9e09a9ec2b
|
Python
|
mkt-Do/codewars
|
/python/tests/test_sum_of_numbers_from_0_to_N.py
|
UTF-8
| 424
| 3.6875
| 4
|
[] |
no_license
|
from codewars.sum_of_numbers_from_0_to_N import show_sequence
from codewars.test import Test
class TestSumOfNumbersFrom0ToN(Test):
def test_show_sequence(self):
self.describe("Example Tests")
tests = (
(6, "0+1+2+3+4+5+6 = 21"),
(7, "0+1+2+3+4+5+6+7 = 28"),
(0, "0=0"),
(-1, "-1<0"),
(-10, "-10<0"),
)
for inp, exp in tests:
self.assert_equals(show_sequence(inp), exp)
| true
|
e15a108c13d3a66312478bd2159f90e39adda5e0
|
Python
|
DaianeFeliciano/python-fatec
|
/atv105.py
|
UTF-8
| 1,778
| 4.28125
| 4
|
[] |
no_license
|
import os
import sys
def lerNumero():
N = int(input('Digite um número: '))
return N
def numeroParouImpar(N):
if N % 2 == 0:
print("Número {} é par".format(N))
return True
else:
print("Número {} é impar".format(N))
return False
def numeroPrimo(N):
contdiv = 0;
for i in range(1, N + 1, 1):
if N % i == 0:
contdiv += 1; # incrementa 1 ao contdiv
if contdiv == 2:
return True # é primo
else:
return False # não é primo
def somaPrimos( x, y ):
somaprimo=0
for i in range (x, y+1):
if numeroPrimo (i):
somaprimo+= i # acumular os pares somapar = somapar + i
return somaprimo
def controle():
itemMenu = 0
linhasMenu = '\n\033[1;96m**Menu de Controle**\033[m'
linhasMenu += '\n 1 Ler'
linhasMenu += '\n 2 Par ou Impar'
linhasMenu += '\n 3 Verificar Primo'
linhasMenu += '\n 4 Somar Primo'
linhasMenu += '\n 5 Sair'
linhasMenu += '\n Item: '
while True:
itemMenu = int(input(linhasMenu))
if itemMenu == 1:
numero = lerNumero()
elif itemMenu == 2:
parouimpar = numeroParouImpar(numero)
elif itemMenu == 3:
primo = numeroPrimo(numero)
status = numeroPrimo(numero)
if status:
print(f'O número {numero} é um número primo.')
else:
print(f'O número {numero} não é um número primo.')
elif itemMenu == 4:
x = somaPrimos(1,numero)
print('A soma dos números primos de 1 até {} é {}'.format(numero, x))
elif itemMenu == 5:
print("Programa Finalizado")
break
sys.exit()
controle()
| true
|
f6c8f7a8b5ca78935d060f50f264ca2b83c177ea
|
Python
|
ketasaka/Python_study
|
/提出/sakamotokeita_1_10.py
|
UTF-8
| 199
| 4.09375
| 4
|
[] |
no_license
|
a = input("文字の入力:")
b = int(input("整数の入力:"))
c = input("小数の入力:")
print("入力された文字 =",a)
print("入力された整数 =",b)
print("入力された小数 =",c)
| true
|
938915d4fe69c7a61b5303d25f7aa2d6e060362c
|
Python
|
arunkumarpalaniappan/algorithm_tryouts
|
/arrays_matrices/compression.py
|
UTF-8
| 635
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
def compressString(string):
newString = ''
index = 0
tempCount = 1
while index < len(string):
tempIndex = 1
while index+tempIndex < len(string) and string[index] == string[index+tempIndex]:
tempCount+=1
tempIndex+=1
newString = newString + string[index]+str(tempCount)
tempCount = 1
index = index + tempIndex
if len(string) <= len(newString):
return string
else:
return newString
print compressString('test') # test ,O(n)
print compressString('thissusssssss') # a3b3c3a3,O(n)
print compressString('compression') # compression,O(n)
| true
|
ec4d7e70f8b229d97fa0c8033f087a46f7d8ccf3
|
Python
|
kurtw29/algorithmPractice
|
/maxPalindrome.py
|
UTF-8
| 1,846
| 3.625
| 4
|
[] |
no_license
|
def findPalindromeLength(index, arr):
#odd palindrome
trackerOdd = 1
i = 1
while(index-i >= 0 and index+i < len(arr)):
if arr[index-i] == arr[index+i]:
trackerOdd += 2
i += 1
else:
break
#even palindrome
trackerEven = 0
if index < len(arr)-1 and arr[index] == arr[index+1]:
trackerEven += 2
j = 1
while(index-j >= 0 and index+1+j < len(arr)):
if arr[index-j] == arr[index+1+j]:
trackerEven += 2
j += 1
else:
break
if trackerOdd > trackerEven:
return trackerOdd
else:
return trackerEven
def maxPalindrome(arr):
max = {
"index": 0,
"length": 0
}
for i in range(len(arr)):
# print(i, "and arr[i]: ", arr[i])
p = findPalindromeLength(i, arr)
# print("pLength: ",p)
if p > max["length"]:
max["index"] = i
max["length"] = p
if max["length"] > 1:
if max["length"] % 2 != 0:
output = arr[max["index"]]
# print((max["length"]-1)/2)
for j in range(1,int((max["length"]-1)/2)+1):
output = arr[max["index"]-j] + output + arr[max["index"]+j]
return output
else: # max["length"] % 2 == 0 (even palindrome)
output = arr[max["index"]] + arr[max["index"]+1]
if max["length"]/2 - 1 > 0:
for j in range(1,int((max["length"]/2))):
output = arr[max["index"]-j] + output + arr[max["index"]+1+j]
return output
else:
"no palindrome found"
arr = "jjjjjjbcajacbjabaj"
print("string:", arr, "\nlongest palindrom: ",maxPalindrome(arr))
arr = "dbcqbbabad"
print("string:", arr, "\nlongest palindrom: ",maxPalindrome(arr))
| true
|
35ed736b6d2ebedefb6eac7b916957a370efabe6
|
Python
|
knschuckmann/Modul-Learning-From-Images
|
/lfi-01/Dennis Baskan/lfi-01/filter.py
|
UTF-8
| 3,351
| 3.6875
| 4
|
[] |
no_license
|
"""
Kostja Comments:
- Can u explain the ravel() function and why you dont give it a order?? in im2double
- when using convolution2d is it fastser to create the variables and not claculate with calculated values?
- Very nice codestyle
- Is Gausian Vlur required before aplying the sobel?
- why do you use angle
"""
import numpy as np
import cv2
from time import time as t
import timeit
def im2double(im):
"""
Converts uint image (0-255) to double image (0.0-1.0) and generalizes
this concept to any range.
:param im:
:return: normalized image
"""
min_val = np.min(im.ravel())
max_val = np.max(im.ravel())
out = (im.astype('float') - min_val) / (max_val - min_val)
return out
def make_gaussian(size, fwhm = 3, center=None):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
k = np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
return k / np.sum(k)
def convolution_2d(img, kernel,mode='padding'):
"""
Computes the convolution between kernel and image
:param img: grayscale image
:param kernel: convolution matrix - 3x3, or 5x5 matrix
:return: result of the convolution
"""
k = kernel.shape[0]
offset = int(k/2)
imgm,imgn = img.shape
if mode == 'padding':
#img = np.pad(img,[(moff,moff2),(noff,noff2)])
img = np.pad(img,offset)
#calculate convolution
m = [np.sum(img[i:i+k,j:j+k] * kernel) for i in range(imgm) for j in range(imgn)]
newimg = np.matrix(m).reshape((imgm,imgn))
return newimg
if __name__ == "__main__":
# 1. load image in grayscale+scale
frame = cv2.imread('heart.jpg',0)
#frame = cv2.imread('Lenna.png',0)
m,n = frame.shape[:2]
s = 0.35
frame = cv2.resize(frame,(int(n*s),int(m*s)))
# 2. convert image to 0-1 image (see im2double)
frame = im2double(frame)
# image kernels
sobelmask_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
sobelmask_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
gk = make_gaussian(11)
# 3. use image kernels on normalized image
frame = convolution_2d(frame,gk)
print("Gaussian Blur Done!")
sobel_x = convolution_2d(frame,sobelmask_x)
print("Sobel X Done!")
sobel_y = convolution_2d(frame,sobelmask_y)
print("Sobel Y Done!")
# 4. compute magnitude of gradients
mog = np.sqrt(np.multiply(sobel_x,sobel_x)+np.multiply(sobel_y,sobel_y))
print("MOG Done!")
angle = np.arctan2(sobel_y,sobel_x)
print("Angle Done!")
# Show resulting images
cv2.imshow("sobel_x", sobel_x)
cv2.imshow("sobel_y", sobel_y)
cv2.imshow("mog", mog)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
References:
-https://lms.beuth-hochschule.de/pluginfile.php/781445/mod_resource/content/0/LFI-02-ImageProcessing.pdf
-https://docs.scipy.org/doc/numpy/reference/
-https://docs.python.org/2/library/timeit.html
-https://joblib.readthedocs.io/en/latest/parallel.html
'''
np.matrix(4).reshape((2,2))
| true
|
52b1ed386f65bb237f6737a2b76febad05f5a0b3
|
Python
|
BnkColon/parallel
|
/mergeParallel.py
|
UTF-8
| 6,761
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
# Bianca I. Colon Rosado
# This file is: mergeParallel.py
# To compile $ python mergeParallel.py
# I make this program last semester after CCOM3034.
# Learning Parallel and learning Python for my own in a Coursera class.
# http://www.tutorialspoint.com/python/python_multithreading.htm
# https://docs.python.org/dev/library/multiprocessing.html
# http://sebastianraschka.com/Articles/2014_multiprocessing_intro.html
from multiprocessing import Process, Pipe
import time, random, sys
# why we use sys: The list of command line arguments passed to a Python script.
#https://docs.python.org/2/library/sys.html
#Dependencies defined below main()
def main():
"""
This is the main method, where we:
- generate a random list. (it's more easy)
- time a sequential mergesort on the list.
- time a parallel mergesort on the list.
- time Python's built-in sorted on the list.
"""
print('''
# # #####
## ## ###### ##### #### ###### # # #### ##### #####
# # # # # # # # # # # # # # # #
# # # ##### # # # ##### ##### # # # # #
# # # ##### # ### # # # # ##### #
# # # # # # # # # # # # # # #
# # ###### # # #### ###### ##### #### # # #
''')
N = 500000
if len(sys.argv) > 1: #the user input a list size.
N = int(sys.argv[1])
#We want to sort the same list, so make a backup.
lystbck = [random.random() for x in range(N)]
#Sequential mergesort a copy of the list.
lyst = list(lystbck)
start = time.time() #start time
lyst = mergesort(lyst)
elapsed = time.time() - start #stop time
if not isSorted(lyst):
print('Sequential mergesort did not sort. oops.')
print('Sequential mergesort: %f sec' % (elapsed))
# I don't know why, I saw this line in a lot of implementations.
# I believe, it's required
time.sleep(3)
#Now, parallel mergesort.
lyst = list(lystbck)
start = time.time()
n = 3
#Instantiate a Process and send it the entire list,
#along with a Pipe so that we can receive its response.
pconn, cconn = Pipe()
p = Process(target=mergeSortParallel, \
args=(lyst, cconn, n))
p.start()
lyst = pconn.recv()
#Blocks until there is something (the sorted list)
#to receive.
p.join()
elapsed = time.time() - start
print('''
###### ######
# # ## ##### ## # # ###### # # # ##### #### #### ##### ## # # # # # # # ####
# # # # # # # # # # # # # # # # # # # # # # # # ## ## ## ## # ## # # #
###### # # # # # # # # ##### # ###### # # # # # # # # # # ## # # ## # # # # # #
# ###### ##### ###### # # # # # ##### # # # ### ##### ###### # # # # # # # # # ###
# # # # # # # # # # # # # # # # # # # # # # # # # # # # ## # #
# # # # # # # ###### ###### ###### ###### # # # #### #### # # # # # # # # # # # ####
''')
if not isSorted(lyst):
print('mergeSortParallel did not sort. oops sorry.')
print('Parallel mergesort: %f sec' % (elapsed))
time.sleep(3)
#Built-in test.
lyst = list(lystbck)
start = time.time()
lyst = sorted(lyst)
elapsed = time.time() - start
print('Built-in sorted: %f sec' % (elapsed))
def merge(left, right):
"""returns a merged and sorted version of the two already-sorted lists."""
ret = []
li = ri = 0
while li < len(left) and ri < len(right):
if left[li] <= right[ri]:
ret.append(left[li])
li += 1
else:
ret.append(right[ri])
ri += 1
if li == len(left):
ret.extend(right[ri:])
else:
ret.extend(left[li:])
return ret
def mergesort(lyst):
"""
The seemingly magical mergesort. Returns a sorted copy of lyst.
Note this does not change the argument lyst.
"""
if len(lyst) <= 1:
return lyst
ind = len(lyst)//2
return merge(mergesort(lyst[:ind]), mergesort(lyst[ind:]))
def mergeSortParallel(lyst, conn, procNum):
"""mergSortParallel receives a list, a Pipe connection to the parent,
and procNum. Mergesort the left and right sides in parallel, then
merge the results and send over the Pipe to the parent."""
#Base case, this process is a leaf or the problem is
#very small.
if procNum <= 0 or len(lyst) <= 1:
conn.send(mergesort(lyst))
conn.close()
return
ind = len(lyst)//2
#Create processes to sort the left and right halves of lyst.
#In creating a child process, we also create a pipe for that
#child to communicate the sorted list back to us.
pconnLeft, cconnLeft = Pipe()
leftProc = Process(target=mergeSortParallel, \
args=(lyst[:ind], cconnLeft, procNum - 1))
#Creat a process for sorting the right side.
pconnRight, cconnRight = Pipe()
rightProc = Process(target=mergeSortParallel, \
args=(lyst[ind:], cconnRight, procNum - 1))
#Start the two subprocesses.
leftProc.start()
rightProc.start()
#Recall that expression execution goes from first evaluating
#arguments from inside to out. So here, receive the left and
#right sorted sublists (each receive blocks, waiting to finish),
#then merge the two sorted sublists, then send the result
#to our parent via the conn argument we received.
conn.send(merge(pconnLeft.recv(), pconnRight.recv()))
conn.close()
#Join the left and right processes.
leftProc.join()
rightProc.join()
def isSorted(lyst):
"""
Return whether the argument lyst is in non-decreasing order.
"""
#Cute list comprehension way that doesn't short-circuit.
#return len([x for x in
# [a - b for a,b in zip(lyst[1:], lyst[0:-1])]
# if x < 0]) == 0
for i in range(1, len(lyst)):
if lyst[i] < lyst[i-1]:
return False
return True
#Execute the main method now that all the dependencies
#have been defined.
#The if __name is so that pydoc works and we can still run
#on the command line.
if __name__ == '__main__':
main()
| true
|
55148641c7e637e19ad100e0aa596a304c74241a
|
Python
|
LuciraSilva/FreelaDev
|
/app/models/contractor_model.py
|
UTF-8
| 2,011
| 2.78125
| 3
|
[] |
no_license
|
from app.configs.database import db
from dataclasses import dataclass
from werkzeug.security import generate_password_hash, check_password_hash
import re
from flask import request, jsonify
@dataclass
class ContractorModel(db.Model):
name: str
email: str
cnpj: str
__tablename__ = 'contractors'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
email = db.Column(db.String, nullable=False, unique=True)
password_hash = db.Column(db.String, nullable=False)
cnpj = db.Column(db.String, nullable=True, unique=True)
@property
def password(self):
raise AttributeError("Password cannot be accessed!")
@password.setter
def password(self, password_to_hash):
self.password_hash = generate_password_hash(password_to_hash)
def verify_password(self, password_to_compare):
return check_password_hash(self.password_hash, password_to_compare)
@staticmethod
def verify_pattern_email(email):
pattern_email = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
if(re.fullmatch(pattern_email, email)):
return True
else:
return False
@staticmethod
def verify_pattern_password(password):
pattern_password = "^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!#%*?&]{6,20}$"
if(re.search( pattern_password, password)):
return True
return False
@staticmethod
def verify_cnpj(cnpj):
pattern_cnpj = r'(^\d{2}.\d{3}.\d{3}/\d{4}-\d{2}$)'
if(re.fullmatch(pattern_cnpj, cnpj)):
return True
else:
return False
@staticmethod
def unique_email(user_email):
user = ContractorModel.query.filter_by(email=user_email).first()
if user:
return True
@staticmethod
def unique_cnpj(user_cnpj):
user = ContractorModel.query.filter_by(cnpj=user_cnpj).first()
if user:
return True
| true
|
8f21b4e9a89dc3e6605ec185a87bb98992fc667b
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03146/s788338217.py
|
UTF-8
| 195
| 3
| 3
|
[] |
no_license
|
s=int(input())
a=[s]
res=0
while True:
tmp=0
if a[res]%2==0:
tmp=a[res]/2
else:
tmp=3*a[res]+1
res+=1
if tmp in a:
break
a.append(tmp)
print(res+1)
| true
|
4fcd5f7a94f65e8208038c8f3ad8ad80fbf84495
|
Python
|
zhipenglu/xist_structure
|
/pca2tracks.py
|
UTF-8
| 1,819
| 2.796875
| 3
|
[] |
no_license
|
"""
pca2tracks.py
This script converts the PCA analysis results for RIP/CLIP enrichment to a
minimal number of tracks for display on IGV. This approach provides more useful
information than the heatmap. The input file is *pca_array.pc.txt, and output
are the first few tracks that explain the most variance (e.g. *pc1.bedgraph).
Input format:
Interval NAME MEAN PC1 PC2 ...
hsXIST_0_100 hsXIST_0_100 value value value ...
Example:
cd /Users/lu/Documents/chang/eCLIP/fripsum
python ~/Documents/scripts/pca2tracks.py \
frip_gap_hsXIST_geometric_100nt_pca_array.pc.txt 7 \
frip_gap_hsXIST_geometric_100nt_pca_array
For the PCA results from gene level, need to transpose the matrix###############
python ~/Documents/scripts/pca2tracks.py \
frip_gap_hsXIST_geometric_100nt_pca_gene.pc.txt 7 array \
frip_gap_hsXIST_geometric_100nt_pca_gene
"""
import sys
if len(sys.argv) < 4:
print "Usage: python pca2tracks.py pca_file track_number dim output_prefix"
print "dim: gene or array"
sys.exit()
pcafile = open(sys.argv[1], 'r')
ntracks = int(sys.argv[2])
dimension = sys.argv[3]
outputprefix = sys.argv[4]
pcadata = pcafile.readlines()[1:] #input as a list, remove the header line
pcamatrix = [line.strip('\n').split() for line in pcadata]
meanbedgraph = open(outputprefix + "_mean.bedgraph", 'w') #output mean bedgraph
meanout = ''
for row in pcamatrix: meanout += ('\t'.join(row[0].split('_') + row[2:3]) +'\n')
meanbedgraph.write(meanout)
meanbedgraph.close()
for i in range(ntracks): #output major principal component tracks
pctrack = open(outputprefix + '_pc' + str(i+1) + '.bedgraph', 'w')
pctrackout = ''
for row in pcamatrix:
pctrackout += ('\t'.join(row[0].split('_') + row[3+i:4+i]) + '\n')
pctrack.write(pctrackout)
pctrack.close()
pcafile.close()
| true
|
41661be35d826e93188e0074f02d1248dcb4317f
|
Python
|
chang-change/9eqModel_KNFandLSTM
|
/Post-processing/compare_long_term_statistics.py
|
UTF-8
| 3,843
| 2.9375
| 3
|
[] |
no_license
|
"""
compare_long_term_statistics.py
---------------------
This file compares the performance of KNF, HDMD, and LSTM models in the
reproduction of the long-term statistics by producing Figures 5 of the paper.
Requires:
stats_KNF_model_name.npz - reproduction of the long-term statistics by KNF.
stats_HDMD_model_name.npz - reproduction of the long-term statistics by HDMD.
stats_LSTM_model_name.npz - reproduction of the long-term statistics by LSTM.
Each of these files contain a 2-D array of size (nGP, 6) where
nGP - number of grid points
Rows represent:
u_ref
u_pred
uu_ref
uu_pred
uv_ref
uv_pred
Creates:
Figures 5 of the paper.
The code has been used for the results in:
"Recurrent neural networks and Koopman-based frameworks for temporal
predictions in a low-order model of turbulence"
Hamidreza Eivazi, Luca Guastoni, Philipp Schlatter, Hossein Azizpour,
Ricardo Vinuesa
International Journal of Heat and Fluid Flow (accepted)
https://arxiv.org/abs/2005.02762
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.interpolate import make_interp_spline
plt.rc('text', usetex = True)
plt.rc('font', family = 'serif')
plt.rc('axes', labelsize = 16, linewidth = 1)
plt.rc('font', size = 14)
plt.rc('legend', fontsize = 12, handletextpad=0.3)
plt.rc('xtick', labelsize = 14)
plt.rc('ytick', labelsize = 14)
direc = './../Predictions/'
models = ['KNF_10000_5', 'HDMD_10000_5', 'LSTM1_t10000']
knf = np.loadtxt(direc + 'stats_' + models[0] + '.txt')
hdmd = np.loadtxt(direc + 'stats_' + models[1] + '.txt')
lstm = np.loadtxt(direc + 'stats_' + models[2] + '.txt')
u_ref = knf[0]; uu_ref = knf[2]; uv_ref = knf[4]
u_knf = knf[1]; uu_knf = knf[3]; uv_knf = knf[5]
u_hdmd = hdmd[1]; uu_hdmd = hdmd[3]; uv_hdmd = hdmd[5]
u_lstm = lstm[1]; uu_lstm = lstm[3]; uv_lstm = lstm[5]
Np = 21
Y = np.linspace(-1, 1, Np)
Yn = np.linspace(-1, 1, 51)
def spl(x):
spl1 = make_interp_spline(Y, x, k=3)
y = spl1(Yn)
return y
u_ref = spl(u_ref)
u_knf = spl(u_knf)
u_lstm = spl(u_lstm)
u_hdmd = spl(u_hdmd)
uu_ref = spl(uu_ref)
uu_knf = spl(uu_knf)
uu_lstm = spl(uu_lstm)
uu_hdmd = spl(uu_hdmd)
uv_ref = spl(uv_ref)
uv_knf = spl(uv_knf)
uv_lstm = spl(uv_lstm)
uv_hdmd = spl(uv_hdmd)
lsiz = 18
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex = False, sharey = True, figsize = (10, 4))
ax1.plot(u_ref, Yn, color = 'k', linestyle = '--', label = 'Reference', linewidth = 1)
ax1.plot(u_knf, Yn, color = 'tab:orange', linestyle = '-', label = 'KNF', linewidth = 1)
ax1.plot(u_lstm, Yn, color = 'tab:blue', label = 'LSTM', linewidth = 1)
ax1.plot(u_hdmd, Yn, color = 'tab:green', label = 'HDMD', linewidth = 1)
ax1.set_xlabel('$\overline{u}$', fontsize = lsiz)
ax1.set_ylabel('$y$', fontsize = lsiz)
ax1.set_xlim(-0.5, 0.5)
ax1.set_ylim(-1, 1)
ax1.set_yticks(np.linspace(1, -1, 5))
ax1.legend(frameon = False, loc = 0)
ax2.plot(uu_ref, Yn, color = 'k', linestyle = '--', linewidth = 1)
ax2.plot(uu_knf, Yn, color = 'tab:orange', linestyle = '-', linewidth = 1)
ax2.plot(uu_lstm, Yn, color = 'tab:blue', linewidth = 1)
ax2.plot(uu_hdmd, Yn, color = 'tab:green', linewidth = 1)
ax2.set_xlabel('$\overline{u^{\prime 2}}$', fontsize = lsiz)
ax2.set_xlim(-0.01, 0.06)
ax3.plot(uv_ref, Yn, color = 'k', linestyle = '--', linewidth = 1)
ax3.plot(uv_knf, Yn, color = 'tab:orange', linestyle = '-', linewidth = 1)
ax3.plot(uv_lstm, Yn, color = 'tab:blue', linewidth = 1)
ax3.plot(uv_hdmd, Yn, color = 'tab:green', linewidth = 1)
ax3.set_xlabel('$\overline{u^{\prime}v^{\prime}}$', fontsize = lsiz)
ax3.set_xlim(-0.007, 0.001)
plt.subplots_adjust(wspace = 0.3)
# plt.savefig('statistics.pdf', bbox_inches='tight')
| true
|
dd955cbd954e5f4a914336c509e4a38b115f07c4
|
Python
|
mgzhao/test
|
/modelmanager/distributed.py
|
UTF-8
| 2,167
| 2.609375
| 3
|
[] |
no_license
|
import os
import random
import torch.distributed as td
from torch.multiprocessing import Process
def train(Model, model_args):
# Run one worker node for each gpu
gpus = model_args['gpus']
model_args["distributed"]["world_size"] *= len(gpus)
processes = []
for gpu in gpus:
p = Process(target=launch_worker_thread, args=(gpu, Model, model_args))
p.start()
processes.append(p)
for p in processes:
p.join()
def launch_worker_thread(gpu, Model, model_args):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
model = Model(**model_args)
model.train()
def init_processes(
world_size, sharedfile,
group_name, backend='gloo',
initialized=False
):
""" Initialize the distributed environment. """
if not initialized:
filepath = 'file://{}'.format(sharedfile)
td.init_process_group(
backend,
init_method=filepath,
group_name=group_name,
world_size=world_size)
rank = td.get_rank()
return rank
class Partition(object):
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
class DataPartitioner(object):
def __init__(self, data, sizes=[0.7, 0.2, 0.1], seed=1234):
self.data = data
self.partitions = []
random.seed(seed)
data_len = len(data)
indexes = [x for x in range(0, data_len)]
random.shuffle(indexes)
for frac in sizes:
part_len = int(frac * data_len)
self.partitions.append(indexes[0:part_len])
indexes = indexes[part_len:]
def use(self, partition):
return Partition(self.data, self.partitions[partition])
def get_partition(dataset):
size = td.get_world_size()
partition_sizes = [1.0 / size for _ in range(size)]
partition = DataPartitioner(dataset, partition_sizes)
partition = partition.use(td.get_rank())
return partition
| true
|
d244e73f778f596c0c9f213c7f83175c7079fff9
|
Python
|
hemanthkumark005/Insurance_classification_task
|
/trainer/model.py
|
UTF-8
| 1,965
| 2.53125
| 3
|
[] |
no_license
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
def input_fn(features, labels, shuffle, num_epochs, batch_size):
#Input function to generate data which will be used for model training.
if labels is None:
inputs = features
else:
inputs = (features, labels)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
if shuffle:
dataset = dataset.shuffle(buffer_size=len(features))
# We call repeat after shuffling, to prevent separate epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
return dataset
def create_keras_model(input_dim, learning_rate):
#Creating a DNN from Keras for classification
#Returns a model which is ready to be trained
Dense = tf.keras.layers.Dense
model = tf.keras.Sequential(
[
Dense(125, activation=tf.nn.relu, kernel_initializer='uniform',
input_shape=(input_dim,)),
Dense(75, activation=tf.nn.relu),
Dense(65, activation=tf.nn.relu),
Dense(40, activation=tf.nn.relu),
Dense(25, activation=tf.nn.relu),
Dense(1, activation=tf.nn.sigmoid) #since it is a binary classification, last layer is sigmoid
])
# using custom optimizer:
optimizer = tf.keras.optimizers.RMSprop(lr=learning_rate)
# Compile Keras model
model.compile(
loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
| true
|
a029f8b95bc862ea028df7eaa2d3f3ba9816b458
|
Python
|
alexhkurz/introduction-to-programming
|
/src/insertion_sort.py
|
UTF-8
| 534
| 4.03125
| 4
|
[] |
no_license
|
list = [2,1]
def mysort(list):
new_list = [] # the sorted list
for x in list:
# insert x in new_list
j = 0 # j is used to computer the index where to insert x
# compare x against the elements y of new_list
for y in new_list:
if x <= y:
break # insert x "here", ie at index j
else:
j = j + 1 # continue with the next element of new_list
new_list.insert(j,x)
return new_list
print(mysort(list))
| true
|
85c9b1df58172c0eb2a2f5a4ac6185c8807913bf
|
Python
|
linea-it/tno
|
/backend/skybot/skybot_server.py
|
UTF-8
| 11,011
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
import os
from datetime import datetime
from io import StringIO
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
from requests.exceptions import HTTPError
class SkybotServer:
"""Esta classe tem a função de facilitar consultas ao serviço Skybot."""
def __init__(self, url):
self.result_columns = [
"expnum",
"band",
"date_obs",
"skybot_downloaded",
"skybot_url",
"download_start",
"download_finish",
"download_time",
"filename",
"file_size",
"file_path",
"import_start",
"import_finish",
"import_time",
"count_created",
"count_updated",
"count_rows",
"ccd_count",
"ccd_count_rows",
"ccd_start",
"ccd_finish",
"ccd_time",
"error",
]
# Url para o serviço do Skybot incluindo o endpoint.
# exemplo: http://vo.imcce.fr/webservices/skybot/skybotconesearch_query.php
self.server_url = url
def __check_date(self, date):
"""Verifica se date é uma datetime, se for converte para string.
Arguments:
date {datetime} -- Data de observação.
Returns:
str -- Data de observação em no formato ("%Y-%m-%d %H:%M:%S")
"""
# Converte a data para string se for um objeto do tipo datetime.
if isinstance(date, datetime):
date = date.strftime("%Y-%m-%d %H:%M:%S")
return date
def __check_ra(self, ra):
"""Valida o valor para RA. que deve ser um float entre 0 e 360.
Arguments:
ra {float} -- valor para RA em degrees.
Raises:
Exception: lança excessão caso o valor de ra não seja um float ou seu valor não esteja entre 0 e 360.
Returns:
float -- RA
"""
ra = float(ra)
if 0 <= ra <= 360:
return ra
else:
raise Exception(
"Right ascension or ecliptic longitude of the FOV center limits 0..360 degrees."
)
def __check_dec(self, dec):
"""Valida o valor para Dec. que deve ser um float entre -90 e 90.
Arguments:
dec {float} -- valor para Dec em degrees.
Raises:
Exception: lança excessão caso o valor de dec não seja um float ou seu valor não esteja entre -90 e 90.
Returns:
float -- Dec
"""
dec = float(dec)
if -90 <= dec <= 90:
return dec
else:
raise Exception(
"Declination or ecliptic latitude of the FOV center limits -90..+90 degrees."
)
def __check_radius(self, radius):
"""Valida os valores para Radius, dever ser um float entre 0 e 10.
Arguments:
radius {float} -- Cone search radius in degrees.
Raises:
Exception: lança excessão caso o radius não seja um float ou seu valor não esteja entre 0 e 10.
Returns:
float -- radius
"""
radius = float(radius)
if 0 <= radius <= 10:
return radius
else:
raise Exception("Radius of the FOV must be float between 0 and 10 degrees.")
def __get_ticket_from_response(self, data):
"""Read the output file and retrieve the ticket number on the second line.
this ticket identifies the request that was made for the Skybot service.
Arguments:
data {str} -- response from skybot server
Returns:
int -- Ticket number, example: 166515392791779001
"""
line = data.splitlines()[1]
ticket = int(line.split(":")[1].strip())
return ticket
def cone_search(
self, date, ra, dec, radius, observer_location, position_error, output
):
"""Faz uma requisição ao serviço do skybot e grava o resultado em um arquivo.
Utiliza a função conesearch do Skybot.
Exemplo de uma url de requisição:
# http://vo.imcce.fr/webservices/skybot/skybotconesearch_query.php?-ep=2012-11-10%2003:27:03&-ra=37.44875&-dec=-7.7992&-rd=1.1&-mime=text&-output=object&-loc=w84&-filter=0
a execução desta função sempre retornara o dict de resultado, mesmo que ocorra uma Excessão.
neste caso o atributo success é False e error é preenchido com a Exception.
para os casos de sucesso, success = True e error é None todos os outros campos serão preenchidos.
Arguments:
date {datetime} -- Epoch requested, expressed in Julian day or formatted as any English textual datetime.
ra {float} -- Right ascension or ecliptic longitude of the FOV center in degrees. limits 0..360 degrees.
dec {float} -- Declination or ecliptic latitude of the FOV center in degrees. limits -90..+90.
radius {float} -- Radius of the FOV in degrees limits 0..10 degrees.
observer_location {str} -- Code of the observer location based in this list of ObsCodes: https://minorplanetcenter.net//iau/lists/ObsCodes.html.
position_error {float} -- Filter to retrieve only objects with a position error lesser than the given value. optional parameter 0 implies no filter Default: 120 arcsec .
output {str} -- filepath p
Returns:
{dict} -- returns a dict with the request information, execution time, status...
dict({
'success': False,
'ticket': None, # Identificação retornada pelo skybot.
'positions': 0, # Quantidade de posições encontradas.
'start': None, # Inicio do Download.
'finish': None, # Fim do Download
'execution_time': 0, # Tempo de execução do download.
'file_size': 0, # Tamanho do arquivo criado
'skybot_url': None, # Url usada na requisição ao skybot
'error': None # Caso ocorra alguma excessão ela sera colocada neste campo.
})
"""
t0 = datetime.now()
result = dict(
{
"success": False,
"ticket": None,
"positions": 0,
"start": None,
"finish": None,
"execution_time": 0,
"output": None,
"file_size": 0,
"skybot_url": None,
"error": None,
}
)
try:
# Monta a url para o serviço do skybot na função de conesearch
# exemplo: http://vo.imcce.fr/webservices/skybot/skybotconesearch_query.php
url = urljoin(self.server_url, "skybotconesearch_query.php")
# Faz a requisição
r = requests.get(
url,
params={
"-ep": self.__check_date(date),
"-ra": self.__check_ra(ra),
"-dec": self.__check_dec(dec),
"-rd": self.__check_radius(radius),
"-loc": str(observer_location),
"-mime": "text",
"-output": "all",
"-filter": float(position_error),
},
)
# Checa o status da requisição
# If the response was successful, no Exception will be raised
r.raise_for_status()
# Cria um arquivo com o resultado
with open(output, "w+") as csv:
csv.write(r.text)
# Atualiza o dict de retorno com os dados da execução.
result.update(
{
"success": True,
"skybot_url": r.url,
"file_size": os.path.getsize(output),
"ticket": self.__get_ticket_from_response(r.text),
"positions": len(r.text.splitlines()) - 3,
}
)
# Checa se o resultdo retornado pelo Skybot possui algum valor Nulo.
# Caso um dos campos esteja Nulo o resultado é considerado com Falha.
if not self.check_missing_values(r.text):
result.update(
{
"success": False,
"error": "Result of skybot has fields missing values.",
}
)
except HTTPError as http_err:
result.update(
{"success": False, "error": "HTTP error occurred: %s" % http_err}
)
except Exception as e:
result.update({"success": False, "error": e})
# tempo de execução
t1 = datetime.now()
tdelta = t1 - t0
result.update(
{
"start": t0.strftime("%Y-%m-%d %H:%M:%S"),
"finish": t1.strftime("%Y-%m-%d %H:%M:%S"),
"execution_time": tdelta.total_seconds(),
}
)
return result
def check_missing_values(self, data):
"""Valida o retorno do skybot buscando por valores Nulos.
Se houver algum valor Nulo o retorno é False, o resultado não passou na validação.
Se NÃO Houver valores Nulos o retorno é True, o resultado é valido.
Args:
data (String): Skybot cone search result
Returns:
bool: True se o resultado for valido, False para resultados invalidos.
"""
df = self.read_output_file(data)
df = df.replace("", np.nan)
has_null_value = df.isnull().values.any()
return not has_null_value
# # TODO: REMOVER ISSO QUE É SO TESTE
# from common.random import randbool
# return randbool(2)
def read_output_file(self, data):
"""Le a o resultado do Skybot e retorna um dataframe.
Args:
data (String): Skybot cone search result
Returns:
pandas.dataframe: Dataframe com o resultado do Skybot
"""
# Headers que estão no arquivo e na ordem correta de leitura.
headers = [
"number",
"name",
"ra",
"dec",
"dynclass",
"mv",
"errpos",
"d",
"dracosdec",
"ddec",
"dgeo",
"dhelio",
"phase",
"solelong",
"px",
"py",
"pz",
"vx",
"vy",
"vz",
"jdref",
]
df = pd.read_csv(StringIO(data), skiprows=3, delimiter="|", names=headers)
# Retirar os espaços entre os valores
df = df.applymap(lambda x: x.strip() if type(x) == str else x)
return df
| true
|
1fb9d230a339be93c1da0bd1679a6a53d2dec39b
|
Python
|
quarkgluant/exercism
|
/python/perfect-numbers/perfect_numbers.py
|
UTF-8
| 376
| 3.5625
| 4
|
[] |
no_license
|
def classify(number):
if number <= 0:
raise ValueError('then number must be strictly posistive')
sum = sum_aliquots(number)
if sum == number:
return 'perfect'
elif sum > number:
return 'abundant'
else:
return 'deficient'
def sum_aliquots(number):
return sum(set([div for div in range(1, number) if number % div == 0]))
| true
|
5ca558d6794e3937f8793f63cf2f1d4cab72add2
|
Python
|
shivam90y/practice-py
|
/practice4.py
|
UTF-8
| 156
| 3.640625
| 4
|
[] |
no_license
|
# Write a Python program to convert a tuple to a string.
tup = ('S', 'h', 'i', 'v', 'a', 'm', ' ', 'y', 'a', 'd', 'a', 'v')
str = ''.join(tup)
print(str)
| true
|