blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1d48a81b37ff9e19b250447e282675092e8ecb10 | Python | fparedesg/RecipeProject | /TeamLarry/recipeBuilder.py | UTF-8 | 7,151 | 2.71875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import urllib2
import json
import pymongo
import string
import re
READ_ME = "This script doesn't do anything anymore. This was used to build our initial knowledge base."
#this method is used to retrieve the information about the food name,quantity and the direction.
buildCountry = ["mexico","chinese","japanese"]
buildFalvor = ["sweet","sour","spicy","bitter"]
def buildSoup(url):
try:
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
hdr = {'User-Agent' : user_agent}
req = urllib2.Request(url,headers=hdr)
html = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print "deadlink!"
try:
soup = BeautifulSoup(html)
except:
print "invalid data!"
return soup
def retrieveWeb(url):
fooddict = {}
foodResult = {}
fooddirction = []
soup = buildSoup(url)
for pond in soup.find_all("li",{"id":"liIngredient"}):
if pond.find("span",{"id":"lblIngAmount"}) != None:
fooddict[pond.find("span",{"id":"lblIngName"}).text] = pond.find("span",{"id":"lblIngAmount"}).text
foodResult["foodname"] = fooddict
dircection = soup.find("div",{"itemprop":"recipeInstructions"})
for li in dircection.find_all("li"):
fooddirction.append(li.text)
foodResult["dircection"] = fooddirction
return foodResult
def importFoodToMongoDB():
conn = pymongo.Connection()
db = conn.recipe
foodlist = db.foodlist
for i, line in enumerate(open("meat", 'r')):
record = dict(name=line.lower().strip(),type="M",region=[]) #stands for the meat
db.foodlist.insert(record)
for i, line in enumerate(open("vegetable_all",'r')):
record = dict(name=line.lower().strip(),type="V",region=[])
db.foodlist.insert(record)
return
def generateFoodFlavor(flavor,NoTcondition=[]):
global buildFalvor
#http://allrecipes.com/search/default.aspx?wt=Mexico&Page=2
#This is the url related to search
FoodFlavor = {}
url = "http://allrecipes.com/search/default.aspx?wt=#&Page=@"
if len(NoTcondition)>0:
for i in range(0,len(NoTcondition)):
url+=("&u"+str(i)+"="+NoTcondition[i])
FoodFlavor[flavor] = {}
for i in range(1,6):
print "retrieve "+flavor+" Page:"+str(i)
realUrl = url.replace("#",flavor).replace("@",str(i)) #injection number here
soup = buildSoup(realUrl)
count = 0
for grid in soup.find_all("div",{"id":"divGridItemWrapper"}):
link = grid.find("a",{"class","img-link"})
if link!=None:
retrieve = retrieveWeb("http://allrecipes.com/"+link["href"])
retrieve["dircection"] = ""
foodname = link.find("img")["title"]
FoodFlavor[flavor][foodname] = retrieve
count+=1
print count
return FoodFlavor[flavor]
def generateFoodOrigin(country,NoTcondition=[]):
global buildCountry
#http://allrecipes.com/search/default.aspx?wt=Mexico&Page=2
#This is the url related to search
countryFood = {}
url = "http://allrecipes.com/search/default.aspx?wt=#&Page=@"
if len(NoTcondition)>0:
for i in range(0,len(NoTcondition)):
url+=("&u"+str(i)+"="+NoTcondition[i])
countryFood[country] = {}
for i in range(1,6):
print "retrieve "+country+" Page:"+str(i)
realUrl = url.replace("#",country).replace("@",str(i)) #injection number here
soup = buildSoup(realUrl)
count = 0
for grid in soup.find_all("div",{"id":"divGridItemWrapper"}):
link = grid.find("a",{"class","img-link"})
if link!=None:
retrieve = retrieveWeb("http://allrecipes.com/"+link["href"])
retrieve["dircection"] = ""
foodname = link.find("img")["title"]
countryFood[country][foodname] = retrieve
count+=1
print count
return countryFood[country]
#URLLIST = ["http://en.wikipedia.org/wiki/List_of_vegetables","http://en.wikipedia.org/wiki/List_of_culinary_herbs_and_spices","http://en.wikipedia.org/wiki/List_of_domesticated_meat_animals"]
#this actually shall work for all normalize method ..sorry for narrow mind
def retrieveRelated(radwdata,resultFile):
#this can not be the word in the receipe, this actually can be analysised out..
global buildCountry
global buildFalvor
verbList = [r'roast',r'polish',r'half',r'halv',r'trim(m)*',r'condense',r'rins',r'reserve',r'toast',r'color',r'crack',r'flake',r'can(n)*',r'pack',r'drain',r'fry',r'sift',r'[0-9]',r'peel',r'grate',r'heat',r'shred(d)*',r'mince',r'shred(d)*',r'deveine',r'dice',r'remove',r'divide',r'chop(p)*',r'(un)*cook',r'extract',r'ground',r'cut(t)*',r'crush',r'slice',r'bak(e|ing)',r'mix',r'wash',r'soft(en)*']
reglist = []
stopSet = set()
for word in verbList:
reglist.append(re.compile(word+"(ed|s|es|ing)*"))
stopwordList = ['and','or','with','such','as','if','into','finely','to','of','for','from','back','inch','fresh']
for word in stopwordList:
stopSet.add(word)
for word in buildCountry:
stopSet.add(word)
for word in buildFalvor:
stopSet.add(word)
foodRelatedTable = radwdata
#this is the possible ingredient name for food..
foodnameTable = {}
conn = pymongo.Connection()
db = conn.recipe
for foodname in foodRelatedTable:
foodIngredient = foodRelatedTable[foodname]["foodname"]
for ingredient in foodIngredient:
ingredient = ingredient.encode('utf-8')
#delete the punctuation here
delset = string.punctuation
ingredient = ingredient.translate(None,delset).lower().split()
ingredientWord = []
#this is for the possible ingredient word
for i,word in enumerate(ingredient):
if any(regex.match(word) for regex in reglist) or word in stopSet or len(word)<3:
continue
buffer = []
for ingredientName in ingredientWord:
if ingredientName[len(ingredientName)-1] == ingredient[i-1]:
temp = []
for T in ingredientName:
temp.append(T)
buffer.append(temp)
buffer[len(buffer)-1].append(word)
for temp in buffer:
ingredientWord.append(temp)
ingredientWord.append([word])
for c in ingredientWord:
name = ' '.join(c)
if name in foodnameTable:
foodnameTable[name]+=1
else:
foodnameTable[name] =1
for k in foodnameTable:
foodnameTable[k]*=len(k.split())
finalResult = []
for k,v in sorted(foodnameTable.iteritems(), key=lambda d:d[1], reverse = True):
print k,v
finalResult.append(k)
with open(resultFile, 'w') as out_file:
out_file.write('\n'.join(finalResult))
return
def retrieveRawData(srcfile,type):
data = {}
with open(srcfile, 'r') as f:
data = json.load(f)
return data[type]
def intersectData(file1,file2list):
src = set()
for i,line in enumerate(open(file1)):
src.add(line.strip())
dst = set()
for dstfile in file2list:
for i,line in enumerate(open(dstfile)):
dst.add(line.strip())
src = src-dst
print src
def retrieveCountryData():
global buildCountry
countryDict = {}
for country in buildCountry:
countryDict[country] = generateFoodOrigin(country)
result = json.dumps(countryDict)
with open('result.json', 'w') as out_file:
out_file.write(result)
def retriveFlavorData():
global buildFalvor
flavordict = {}
for flavor in buildFalvor:
flavordict[flavor] = generateFoodFlavor(flavor)
result = json.dumps(flavordict)
with open('Flavor.json', 'w') as out_file:
out_file.write(result)
def main():
print "Please run gui.py to execute program."
print READ_ME
if __name__ == '__main__':
main() | true |
8d64d1d87cdb0c5cfcb69bec1ce2d7c8e9969a1a | Python | alaxion/Exercises | /LeetCode/06/lc697.py | UTF-8 | 466 | 3.375 | 3 | [
"MIT"
] | permissive | # lc697.py
# LeetCode 697. Degree of an Array `E`
# acc | 100% | 41'
# A~0g29
class Solution:
def findShortestSubArray(self, nums: List[int]) -> int:
if len(nums) == len(set(nums)):
return 1
count = Counter(nums)
deg = max(count.values())
ans = len(nums)
for k, v in count.items():
if v == deg:
ans = min(len(nums) - nums[::-1].index(k) - nums.index(k), ans)
return ans
| true |
101bda86635e6e2ccaea43eb7f64cdfb75803a19 | Python | antonio0219/rpgmap | /player.py | UTF-8 | 3,129 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 12 17:03:38 2020
@author: anton
"""
import pygame
import pygame.time as GAME_TIME
class player:
def __init__(self):
#self.x = xo Más adelante se podrá definir la posición.
#self.y = yo
self.height = 75
self.vel = 5 #Píxeles que se mueve por segundo
self.stepMoment = GAME_TIME.get_ticks() # Coge el instante actual para cambiar de postura andando o quieto.
self.lastDirection = 'right' #La última posición que tenía
self.brake = 2 #Potencia de los frenos, los píxeles/frame que reduce.
self.images = [
pygame.image.load("assets/player/oldMan/right.png"), #0
pygame.image.load("assets/player/oldMan/right2.png"), #1
pygame.image.load("assets/player/oldMan/left.png"), #2
pygame.image.load("assets/player/oldMan/left2.png"), #3
pygame.image.load("assets/player/oldMan/walkingRight1.png"), #4
pygame.image.load("assets/player/oldMan/walkingRight2.png"), #5
pygame.image.load("assets/player/oldMan/walkingLeft1.png"), #6
pygame.image.load("assets/player/oldMan/walkingLeft2.png"), #7
]
self.imageToDraw = self.images[0] #La imagen a dibujar en cada frame
def state(self, info): #Estado en el que se encuentra (ej. moviéndose a la derecha, atacando...)
if info == 'movingRight':
self.lastDirection = 'right'
if GAME_TIME.get_ticks() - self.stepMoment >= 60:
if self.imageToDraw == self.images[4]:
self.imageToDraw = self.images[5]
else:
self.imageToDraw = self.images[4]
self.stepMoment = GAME_TIME.get_ticks()
if info == 'movingLeft':
self.lastDirection = 'left'
if GAME_TIME.get_ticks() - self.stepMoment >= 60:
if self.imageToDraw == self.images[6]:
self.imageToDraw = self.images[7]
else:
self.imageToDraw = self.images[6]
self.stepMoment = GAME_TIME.get_ticks()
if info == 'still':
if GAME_TIME.get_ticks() - self.stepMoment >= 200:
if self.lastDirection == 'right':
if self.imageToDraw == self.images[0]:
self.imageToDraw = self.images[1]
else:
self.imageToDraw = self.images[0]
if self.lastDirection == 'left':
if self.imageToDraw == self.images[2]:
self.imageToDraw = self.images[3]
else:
self.imageToDraw = self.images[2]
self.stepMoment = GAME_TIME.get_ticks()
def get_vel(self):
return self.vel
def get_info(self, surface): #Función que devuelve [image, pos[0], pos[1], height]
return [self.imageToDraw, surface.get_width()/2, surface.get_height()/2, self.height] | true |
899b79055b1b4c35052df2ba03e7629bdf92fdeb | Python | ander-garcia/theegg_ai | /tarea_44/sumatorio_test.py | UTF-8 | 511 | 2.953125 | 3 | [] | no_license | import pytest
from sumatorio import Sumatorio
def test_suma_lineal():
sumatorio = Sumatorio()
assert sumatorio.suma_lineal(0) == 0
assert sumatorio.suma_lineal(1) == 1
assert sumatorio.suma_lineal(10) == 55
assert sumatorio.suma_lineal(100) == 5050
def test_suma_constante():
sumatorio = Sumatorio()
assert sumatorio.suma_constante(0) == 0
assert sumatorio.suma_constante(1) == 1
assert sumatorio.suma_constante(10) == 55
assert sumatorio.suma_constante(100) == 5050
| true |
028b5a3d52a678e241e30337d841aff39dad6cd5 | Python | RohanRadia/BaguetteBot | /bot/cogs/owner.py | UTF-8 | 1,626 | 2.578125 | 3 | [
"MIT"
] | permissive | from bot import logger
from discord.ext import commands
class Owners(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=("sd", "shutdown", "lo"))
@commands.is_owner()
async def logout(self, ctx):
"""Logout of bot"""
await self.bot.http_session.close()
await ctx.send("`Bot logging out... closing connections.`")
logger.info("Logged out - all connections closed.")
await self.bot.logout()
@commands.command()
@commands.is_owner()
async def load(self, ctx, cog: str):
"""Load a cog"""
self.bot.load_extension(cog)
logger.info(f"Successfully loaded extension: {cog}")
await ctx.send(f"`{cog}` successfully loaded.")
@commands.command()
@commands.is_owner()
async def unload(self, ctx, cog: str):
"""Unload a cog"""
self.bot.unload_extension(cog)
logger.info(f"Successfully unloaded extension: {cog}")
await ctx.send(f"`{cog}` successfully unloaded.")
@commands.command()
@commands.is_owner()
async def reload(self, ctx, cog: str):
"""Reload a cog"""
try:
self.bot.unload_extension(cog)
except Exception as e:
logger.error(f"Attempted to unload cog: {cog} which was never loaded on start up. Bypassing and attempting "
f"to complete function.")
self.bot.load_extension(cog)
logger.info(f"Successfully reloaded extension: {cog}")
await ctx.send(f"`{cog}` successfully reloaded.")
def setup(bot):
bot.add_cog(Owners(bot))
| true |
35e8066ee7f0e0139d3f42a0081d8daf71e77f09 | Python | bopopescu/icsw | /initat/cluster/sign_data.py | UTF-8 | 1,698 | 2.6875 | 3 | [] | no_license | #!/usr/bin/python-init
# -*- coding: utf-8 -*-
import argparse
import base64
from M2Crypto import EVP
from lxml import etree
from lxml.builder import E
PRIVATE_KEY = """
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA4p6JXKFMDbUpe45HhGZs/fBsQQki80YqQzgeRB42f0waKKqT
oia929RcMil1g4JRmpOu6IAWaKTthuueIKjeB1bnXYZhHKwod4Bmp6cooU3A1i+6
2F6AHqLJQvmaxK6UPMWyAvX6BPqJKdRcyH+XtFXGXEDnokfxzI1Cxgm1hYtpRwvh
inERFOl1BVNgKRJD3QoYcztHDpRf58eO7i/Eglk1qrAYb+COX14ighkhQrUZ8Q3W
cOMuUvEjupUCOiBTVIU5XTMnaggmdBZETg7q2V6hjXVdEVg7PBYpwcGFS2TQY8g8
2Di6nERR4LNEAsvUX/8tRzNokOBsjSfcSWShGwIDAQABAoIBAFB1QQemLL5hJ406
gqG7S88M4SJmAAanTrH25qgAohCoEFGH3kqfvqCh0OnuVk6OojJjZKIfd9VHWR2h
4c5upgWtEQ/fefMYHHXxHIFBk+dRF7nz0D6prosx+IrS2+Qgp3i8J+ttMYs6+B/l
ydtVkaLxIS/3y0WOjYa2UJLHN69lmLnANGKv6emUmrCiSGS2kAJJKFwnjvw3tej0
K+nuAd2SrUIi0LM5hVfUlxzBavqRUYk5Isl2JEvl/E+z0isammtCw6DIpRSA1DAL
o76M3qqCX2rf0mmYNV0sUzdo9K8S1KSxf0E7PRZYMh2YTzQtDjN6rK316Rb1idYv
GMaWPKECgYEA9Jjrzu2cJi0086ytzMVCZ5WleIOcRLqPEpApLiJZ1aDvUYW+rFIF
X5iIFC8r5k53WxjLPJrTGm2TCndqN+e+X0LvlIGmvLHVUnLJpiJfBNP5mNPOTlk8
LaxJW0BpiU7kBJP+/7D3z3np9MKBBmYIyVgOyFB5So+EQIteWhbEepcCgYEA7S8P
rYlJ176pPGom0ZXn1l9SMyztLa3yALcjoKyRdRzGlj1SvSidKvohGlLVNE8Shw62
vYr5LHl5/3+iTErCtlj/f2k11K4wAQQ/8hJUyWDKMH7dcDo3Ff8JOgWh9lpV4/pR
tg9UTQliw67e94t2qVNF0GHyxGS/ULanUzjA8h0CgYBN/t1i1L3wJoY2FaAuJdCw
+zUSotUXzW2F+9ZF0cpXpsPpeP5+MIFqJFdwKEKVY/wHXnagUrZyPPKgacfDH/DC
q7N95YHntcVSTywh/9/QyE9U/mVQ8n+QCNozcOy2TiPDmfW8TxAWZsfFtqgyBCNV
IPFFyvOCZRVFB6wEijII7QKBgQCsSGbm8rZElCVx0Nlpm63PNWYL7jJJ3/PNOToT
18XAf6pwLxMOe5XOReoNqOVdHaKjn7h1baEZARPw1tEZAaT1tye/cLi9R9luo5uf
Rll3/WpgV4aZom+o9pvJHZZLz8pb0tPPnsrpOkwXP8qNnSwQSoCHoN4qcdPV2Rcp
iCv+sQKBgQCJ7LH2+xkfSRGDhyByMci1SNFLebDe64nEDIW2YPcnK8qRNGCXDsaP
qqKkd4Z694W+GSryGBf+tUo/mtgoSX8lYRfWTvPEyPiq1aEQveX27G6J/me6OCK5
RJFfUw9Ll0BI4y0xE+RV9MLkyVKbvn4KdeflTrU0b71GnF2DVji9GQ==
-----END RSA PRIVATE KEY-----
"""
class Signature(object):
def __init__(self, private_key=PRIVATE_KEY):
self.key = EVP.load_key_string(private_key)
def _sign(self, data):
""" Create the signature """
self.key.sign_init()
self.key.sign_update(data)
return self.key.sign_final()
def sign(self, data, no_xml=False, no_base64=False):
signature = self._sign(data)
if not no_xml:
signature = base64.encodestring(signature)
res = E.result(E.data(data), E.signature("\n" + signature))
else:
if no_base64 is False:
signature = base64.encodestring(signature)
res = signature
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Sign data with the private "
"key for monit license data")
parser.add_argument("data", type=str, help="The data you want signed!")
parser.add_argument("--no-xml", action="store_true", help="Don't output "
"the XML structure, just output the signature")
parser.add_argument("--no-base64", action="store_true", help="Don't "
"base64 encode the signature")
args = parser.parse_args()
result = Signature().sign(args.data, no_xml=args.no_xml,
no_base64=args.no_base64)
if isinstance(result, etree._Element):
print etree.tostring(result, pretty_print=True)
else:
print result
| true |
a53c89f8d9951191dd50f053be791d80815e4c5a | Python | seanlee10/gym | /examples/agents/ivv.py | UTF-8 | 6,847 | 2.78125 | 3 | [
"MIT"
] | permissive | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import gcf
import math
from collections import deque
from random import randint
import os
import imageio
from scipy.misc import imsave
import random
import itertools
from os import path
from sklearn.model_selection import train_test_split
df = pd.read_csv('IVV.csv')
close_list = df['Adj Close'].values
total_rows = close_list.shape[0]
window_size = 25
IMG_DIR = os.path.dirname(os.path.abspath(__file__)) + '/images/'
i = 0
# plot = np.array(total_rows, 2)
csv = pd.DataFrame()
print(total_rows)
print('----------')
while i < total_rows - window_size - 1:
window = close_list[i:i+window_size]
min = window.min()
max = window.max()
def normalize(x):
# print(min, max, x)
return (x - min) / (max - min)
label = (close_list[i+window_size+1] / close_list[i+window_size]) - 1
label = round(label * 100, 2)
if label > 1:
label = 1
elif label < -1:
label = -1
else:
label = 0
row = pd.Series([str(i) + '.jpg', label])
csv = csv.append(row, ignore_index=True)
data = list(map(normalize, window))
figure = np.zeros((101, 25, 3), dtype=np.uint8)
# print(np.asarray(data))
for idx, v in enumerate(data):
figure[int(round(v * 100)), idx] = 255
# print(figure)
# print(figure.shape)
# imageio.imwrite(IMG_DIR + str(i) + '.jpg', figure)
plt.imsave(IMG_DIR + str(i) + '.jpg', np.flip(figure, 0))
# print(type(list(map(normalize, window))))
# line_plot = pd.DataFrame(list(map(normalize, window)))
#
# line_plot.plot(kind='line', legend=False)
# plt.axis('off')
# fig = plt.gcf()
# fig.set_size_inches(.5, .5)
# fig.set_canvas()
# fig.set_clip_box()
# plt.savefig(IMG_DIR + str(i) + '-0.jpg', bbox_inches='tight', pad_inches=0)
i += 1
train, test = train_test_split(csv, test_size=0.25, shuffle=True)
train.to_csv('train.csv', header=False, index=False)
test.to_csv('test.csv', header=False, index=False)
# plt.axis('off')
# plt.show()
# plt.savefig(IMG_DIR + '0.png', bbox_inches='tight', pad_inches=0)
df2 = df.iloc[[0, -1]]
# print('2')
# print(df2.tail(5))
# total_rows = df2.index.values[1]
start = pd.to_numeric(df2['Adj Close']).values.item(0)
end = pd.to_numeric(df2['Adj Close']).values.item(1)
def get_velocity(start, end, span) :
return (end - start) / span
def get_v(data, span) :
i = 0
v = np.zeros(span).tolist()
while i < len(data) - span :
range = data[i:i+span]
v.append(get_velocity(range.item(0), range.item(span - 1), span))
i = i + 1
return v
# velocity = get_velocity(start, end, total_rows)
# 0.0311424636872 => 3 cents a day
# print velocity
# print '---'
v5 = get_v(pd.to_numeric(df['Adj Close']).values, 5)
a5 = get_v(np.asarray(v5), 5)
dfV5 = pd.DataFrame(v5)
dfA5 = pd.DataFrame(a5)
v20 = get_v(pd.to_numeric(df['Adj Close']).values, 20)
a20 = get_v(np.asarray(v20), 20)
dfV20 = pd.DataFrame(v20)
dfA20 = pd.DataFrame(a20)
ma5 = df['Adj Close'].rolling(window=5).mean()
ma20 = df['Adj Close'].rolling(window=20).mean()
ema12 = df['Adj Close'].ewm(span=12).mean()
ema26 = df['Adj Close'].ewm(span=26).mean()
macd = ema12 - ema26
signal = macd.ewm(span=9).mean()
oscillator = macd - signal
data = pd.concat([df.loc[:, ['Date', 'Adj Close']], ma5, ma20, dfA5, dfA20, macd, signal, oscillator], axis=1)
data.columns = ['Date', 'Adj Close', '5MA', '20MA', '5A', '20A', 'MACD', 'SIGNAL', 'OSCILLATOR']
# print pd.concat([df.loc[:, ['Date', 'Adj Close']], dfA5, dfA20], axis=1)
# print(data.tail(360))
# np_data = data.loc[data.shape[0] - 360: data.shape[0] - 356, 'Adj Close':].as_matrix()
# print(np_data[4, 0])
# print Decimal(3 * (-19 * -5) / 100.0).quantize(Decimal('.1'), rounding=ROUND_FLOOR)
# print Decimal(3 * (-19 * -5) / 100.0).quantize(Decimal('.1'), rounding=ROUND_DOWN)
# print '{0}% {1} - {2}'.format((-19 * -5), round(3 * (-19 * -5) / 100.0, 2), round(round(3 * (-19 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-18 * -5), round(3 * (-18 * -5) / 100.0, 2), round(round(3 * (-18 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-17 * -5), round(3 * (-17 * -5) / 100.0, 2), round(round(3 * (-17 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-16 * -5), round(3 * (-16 * -5) / 100.0, 2), round(round(3 * (-16 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-15 * -5), round(3 * (-15 * -5) / 100.0, 2), round(round(3 * (-15 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-14 * -5), round(3 * (-14 * -5) / 100.0, 2), round(round(3 * (-14 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-11 * -5), round(3 * (-11 * -5) / 100.0, 2), round(round(3 * (-11 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-10 * -5), round(3 * (-10 * -5) / 100.0, 2), round(round(3 * (-10 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-9 * -5), round(3 * (-9 * -5) / 100.0, 2), round(round(3 * (-9 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-8 * -5), round(3 * (-8 * -5) / 100.0, 2), round(round(3 * (-8 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-7 * -5), round(3 * (-7 * -5) / 100.0, 2), round(round(3 * (-7 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-6 * -5), round(3 * (-6 * -5) / 100.0, 2), round(round(3 * (-6 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-5 * -5), round(3 * (-5 * -5) / 100.0, 2), round(round(3 * (-5 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-4 * -5), round(3 * (-4 * -5) / 100.0, 2), round(round(3 * (-4 * -5) / 100.0, 0)))
# print '{0}% {1} - {2}'.format((-3 * -5), round(3 * (-3 * -5) / 100.0, 2), round(round(3 * (-3 * -5) / 100.0, 0)))
# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (1 * 5 / 100.0)), 1 * 5)
# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (2 * 5 / 100.0)), 2 * 5)
# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (3 * 5 / 100.0)), 3 * 5)
# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (4 * 5 / 100.0)), 4 * 5)
# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (5 * 5 / 100.0)), 5 * 5)
# print(data.loc[data.shape[0] - 360: data.shape[0] - 356, 'Adj Close':].as_matrix())
# print(data.loc[data.shape[0] - 359: data.shape[0] - 355, 'Adj Close':].as_matrix())
#
# print(np.ravel(data.loc[data.shape[0] - 360: data.shape[0] - 356, 'Adj Close':].as_matrix())[20])
# print(np.ravel(data.loc[data.shape[0] - 359: data.shape[0] - 355, 'Adj Close':].as_matrix())[20])
# plt.figure()
# plt.plot(dfA5.tail(30))
# plt.plot(dfA20.tail(30))
# plt.grid()
# plt.show()
| true |
6a0b0693eb8b00c8c05cc02962325f32f18a6c74 | Python | lxieyang/python-tutorial | /Functional Programming/Sorted.py | UTF-8 | 856 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
print '------------'
print 'Using sorted'
print '------------'
L = [36, 5, 12, 9, 21]
print 'Original:', L
print 'Sorted:', sorted(L)
print '\n------------'
print 'Reverse sort'
print '------------'
def reverse_cmp(x, y):
if x > y:
return -1
elif x == y:
return 0
else:
return 1
LL = [36, 5, 12, 9, 21]
print 'Original:', LL
print 'Sorted:', sorted(LL, reverse_cmp)
print '\n----------------------------'
print 'Sort characters without case'
print '----------------------------'
def cmp_ignore_case(ch1, ch2):
u1 = ch1.upper()
u2 = ch2.upper()
if u1 < u2:
return -1
elif u1 == u2:
return 0
else:
return 1
LLL = ['Jack', 'Zoo', 'ZOo', 'ariB', 'aRiC', 'hias']
print 'Original:', LLL
print 'Sorted:', sorted(LLL, cmp_ignore_case)
| true |
910fbc78f4f5cbe31dd875740b393c6f6e0ff7ef | Python | GabrielAmare/TextEngine | /item_engine/utils.py | UTF-8 | 1,897 | 3.265625 | 3 | [
"MIT"
] | permissive | from collections import deque
from typing import TypeVar, Generic, Deque, Iterator, Dict, Tuple, Iterable
K = TypeVar("K")
__all__ = ["Pile", "PositionRegister", "SetList"]
class SetList(list, Generic[K]):
def append(self, object: K) -> None:
if object not in self:
super().append(object)
def extend(self, iterable: Iterable[K]) -> None:
for object in iterable:
self.append(object)
class Pile(Generic[K]):
def __init__(self, *data: K):
self.data: Deque[K] = deque(data)
self.length = len(data)
def __contains__(self, item: K) -> bool:
return item in self.data
def __iter__(self) -> Iterator[K]:
while self.length > 0:
yield self.data.popleft()
self.length -= 1
def append(self, item: K) -> None:
self.length += 1
self.data.append(item)
INDEX = TypeVar("INDEX")
POSITION = TypeVar("POSITION")
class PositionRegister(Generic[INDEX, POSITION]):
def __init__(self):
self.positions: Dict[INDEX, POSITION] = {}
def new(self) -> POSITION:
if self.positions:
return max(self.positions.values()) + 1
else:
return 0
def get(self, index: INDEX) -> POSITION:
if index in self.positions:
return self.positions[index]
else:
self.positions[index] = position = self.new()
return position
def merge(self, p1: POSITION, p2: POSITION) -> Tuple[POSITION, POSITION]:
"""
With ``mn``/``mx`` resp. the minimum/maximum of ``p1`` and ``p2``
For any index pointing at position ``mn``, make it point to ``mx``
"""
mx = max(p1, p2)
mn = min(p1, p2)
for index, position in self.positions.items():
if position == mn:
self.positions[index] = mx
return mn, mx
| true |
c2168428d799cee8e0542f3c17871b065795f6a0 | Python | MalseMus/Arena-shooter | /src/entities/entity.py | UTF-8 | 1,260 | 3.1875 | 3 | [] | no_license | import pygame
class Entity:
def __init__(self, x, y, w, h, c, faction = 0, hp = 0):
self.x = x
self.y = y
self.w = w
self.h = h
self.c = c
self.faction = faction
self.hp = hp
self.alive = True
def update(self, entities):
raise Exception("not implemented")
def draw(self, screen):
raise Exception("not implemented")
def rect(self):
return pygame.Rect(self.x, self.y, self.w, self.h)
def rect_wanted_pos(self, pos):
return pygame.Rect(pos[0], pos[1], self.w, self.h)
def on_collision(self, target):
pass
def check_collisions(self, entities):
potential_collisions = [e for e in entities if e != self]
for candidate in potential_collisions:
if candidate.rect().colliderect(self.rect()):
self.on_collision(candidate)
def center(self):
return (self.x + self.w / 2, self.y + self.h / 2)
def is_outside_screen(self):
screen_w, screen_h = pygame.display.get_surface().get_size()
return (self.x + self.w < 0
or self.x > screen_w
or self.y + self.h < 0
or self.y > screen_h)
def hurt(self, dmg):
pass
| true |
30227b0696198cfe571c1fd4cd6d6075d736a559 | Python | kbespalov/nir | /coordinator.py | UTF-8 | 1,402 | 2.53125 | 3 | [] | no_license | import argparse
import igraph
import networkx as nx
import os
import worker
from multiprocessing import Process, Queue
def graphs_flow(dirname):
g_files = [str(z) + ".s6" for z in
sorted([int(x.split('.')[0]) for x in os.listdir(dirname)])]
for f in g_files:
with open(os.path.join(dirname, f), 'rb') as s6:
for encoded_graph in s6:
g = nx.parse_sparse6(encoded_graph.strip())
yield igraph.Graph(edges=g.edges())
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-w', dest='workers', type=int, default=4)
parser.add_argument('-i', dest='input_path', default='./input')
parser.add_argument('-o', dest='output_path', default='./out')
parser.add_argument('-c', dest='inv', default='adr')
return parser
def main():
parser = create_parser()
args = parser.parse_args()
queue = Queue(100)
workers = []
process = []
for x in range(args.workers):
gp = worker.GraphProcessor(queue=queue,
output=args.output_path,
invariants=args.inv)
p = Process(target=gp.run)
workers.append(gp)
process.append(p)
p.start()
for g in graphs_flow(args.input_path):
queue.put(g)
for x in range(0, args.workers):
queue.put("done")
main()
| true |
af40b4ef70433f0866f19d2697d2ece466a81c31 | Python | Josverl/MicroPython-Bootcamp | /Demos/Demo-2.2 Python 3/boot.py | UTF-8 | 543 | 3.015625 | 3 | [
"MIT"
] | permissive | # boot.py -- run on boot-up
# can run arbitrary Python, but best to keep it minimal
print("-- performing boot --")
# acces the (ESP32) hardware functionality using the machine library
import machine
# read the
reset , wake = machine.wake_reason()
#as an exaple it is possible to determine how/why the microcontroller was started or woken up
if reset == 0:
print('Power On')
elif reset == 1:
print('Hard reset')
elif reset == 6:
print('Soft reset')
if wake == 3:
print('woken by Touchpad')
print("-- end of boot.py --")
| true |
c8ab0635938e90f86ee28be30425c147b5db0098 | Python | samims/restapi-udemy | /status/api/shell_examples.py | UTF-8 | 1,325 | 2.734375 | 3 | [] | no_license | """
Just Notes
"""
from django.utils.six import BytesIO
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from status.api.serializers import StatusSerializer
from status.models import Status
obj = Status.objects.first()
serializer = StatusSerializer(obj)
print(serializer.data)
json_data = JSONRenderer().render(serializer.data)
print(json_data)
stream = BytesIO(json_data)
data = JSONParser().parse(stream)
print(data)
"""
serialize a queryset
"""
qs = Status.objects.all()
serializer2 = StatusSerializer(qs, many=True)
serializer2.data
json_data2 = JSONRenderer().render(serializer2.data)
print(json_data2)
stream2 = BytesIO(json_data2)
data2 = JSONParser().parse(stream2)
print(data2)
# create obj
data = {"user": 1}
serializer = StatusSerializer(data=data)
serializer.is_valid()
serializer.save()
# if serializer.is_valid():
# serializer.save()
'''
Update obj
'''
obj = Status.objects.first()
data = {'content': 'Some new content'}
update_serializer = StatusSerializer(object=obj, data=data) # returns object
update_serializer.is_valid()
update_serializer.save()
'''
Delete obj
'''
obj = Status.objects.first()
data = {'user': 1, 'content': 'Please delete me'}
serializer = StatusSerializer(object=obj, data=data)
serializer.is_valid()
serializer.save()
| true |
2bd9415d4e34bf0041a398defe3e9a5d74395b5e | Python | rumit91/4240CRM | /System/Entities/Person.py | UTF-8 | 3,679 | 2.921875 | 3 | [] | no_license | __author__ = 'Timur'
import xml.etree.ElementTree as ET
class Person:
def __init__(self):
self.person_id = ''
self.first_name = ''
self.last_name = ''
self.other_name = ''
self.birthday = ''
self.gender = ''
self.emails = []
self.phones = []
self.note = ''
#self.messages = [] #list of all the messages associated with this person
self.relationships = {} #list of all the relationships associated with the person including messages
# so there will be a "TO" key with a list of messages as the value and a "CC" key with a list of messages as a value
# this way we can have more relationships that we don't anticipate for at the writing of this method
def add_name_from_service(self, GetNameStrategy):
name = GetNameStrategy.get_name()
self.first_name = name[0]
self.last_name = name[1]
self.other_name = name[2]
def print_out(self):
print "---------------------------------"
print "First Name: %s" %(self.first_name)
print "Last Name: %s" %(self.last_name)
print "Other Name: %s" %(self.other_name)
print "Birthday: %s" %(self.birthday)
print "Gender: %s" %(self.gender)
print "Id: %s" % (self.person_id)
for email in self.emails:
email.print_out()
for phone in self.phones:
phone.print_out()
print self.note
print "---------------------------------"
def get_name(self):
name = self.first_name + " " + self.last_name
return name
class GetNameStrategy:
def get_name(self):
return ['', '', '']
class GetNameFromGoogleContactsStrategy(GetNameStrategy):
def __init__(self, contact):
self.contact = contact
self.first_name = ''
self.last_name = ''
self.other_name = ''
def get_name(self):
if self.contact.name is not None:
if self.contact.name.given_name is not None and self.contact.name.family_name is not None:
self.first_name = ET.XML(str(self.contact.name.given_name)).text
self.last_name = ET.XML(str(self.contact.name.family_name)).text
#print "First: {0} | Last: {1}".format(self.first_name, self.last_name)
elif self.contact.name.given_name is not None:
self.first_name = ET.XML(str(self.contact.name.given_name)).text
#print "Given Name: {0}".format(self.first_name)
elif self.contact.name.family_name is not None:
self.last_name = ET.XML(str(self.contact.name.family_name)).text
#print "Family Name: {0}".format(self.last_name)
elif self.contact.name.full_name is not None:
self.other_name = str(self.contact.name.full_name)
#print "Full Name: {0}".format(self.other_name)
#else:
#print "No name for this contact..."
#else:
#no name...
#print "No name for this contact..."
return [self.first_name, self.last_name, self.other_name]
class GetNameFromFacebookContactsStrategy(GetNameStrategy):
#@node The json representation of GraphAPI.get(id_entity)
def __init__(self, node):
self.node = node
self.first_name = ''
self.last_name = ''
self.other_name = ''
def get_name(self):
self.first_name = self.node["first_name"]
self.last_name = self.node["last_name"]
if "name" in self.node:
self.other_name = self.node["name"]
return [self.first_name,self.last_name, self.other_name] | true |
d5d0e70f7c0f99a3234915f4c5d62c7cc78b42a1 | Python | P450/MIT-6.0001x | /Notes/Lecture 8 - Exceptions and Assertions/exceptions.py | UTF-8 | 949 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 12:57:51 2018
@author: Jae You
"""
data = []
file_name = input("Provide a name of a file of data: ")
try:
fh = open(file_name, 'r')
except IOError:
print('cannot open', file_name)
else:
for line in fh:
if line != '\n':
addIt = line[:-1].split(',') # remove trailing \n
data.append(addIt)
finally:
fh.close() # close file even if fail
gradesData = []
if data:
for student in data:
try:
gradesData.append([student[0:2], [student[2]]])
except IndexError:
gradesData.append([student[0:2], []])
#data
#Out[56]:
#[['first', 'last', '100'],
# ['jae', 'you', '90'],
# ['eric', 'grimsom', '80'],
# ['no', 'grad']]
#gradesData
#Out[57]:
#[[['first', 'last'], ['100']],
# [['jae', 'you'], ['90']],
# [['eric', 'grimsom'], ['80']],
# [['no', 'grad'], []]]
#gradesData[3][1]
#Out[63]: [] | true |
4684dc1c594a7ff5cc782158e09825d7c1164c65 | Python | hongyeon-kyeong/Algorithm | /Greedy/무지의_먹방_라이브.py | UTF-8 | 1,069 | 3.4375 | 3 | [] | no_license | '''
# 내 풀이
def find_food(food_times,i,n) :
count2 = 0
if i == n :
i = 0
while food_times[i] == 0 :
i += 1
count2 += 1
#print('i', i , 'count2', count2)
if count2 == n :
return -2
if i == n :
i = 0
return i
def solution(food_times, k) :
food = -1
count = 0
n = len(food_times)
while count < k :
food = find_food(food_times, food+1, n)
if food == -2 :
return -1
food_times[food] -= 1 # 먹었다.
count += 1 # 1초가 지났다.
print(count, food_times, food)
res = find_food(food_times, food+1,n)
return res+1
'''
import heapq
def solution(food_times, k) :
if sum(food_times) <= k :
return -1
q = []
for i in range(len(food_times)) :
heapq.heappush(q,(food_times[i],i+1))
sum_value = 0
previous = 0
length = len(food_times)
while sum_value + (q[0][0]-previous) * length <= k :
now = heapq.heappop(q)[0]
sum_value += (now-previous) * length
length -= 1
previous = now
result = sorted(q, key=lambda x : x[1])
return result[(k-sum_value)%length][1]
print(solution([7,8,3,3,2,2,2,2,2,2,2,2],35))
| true |
f48b0f490a4e0368ccc33911575cc6310ce82ae8 | Python | idhant96/diginvoice | /core/utils/utils.py | UTF-8 | 3,798 | 3.09375 | 3 | [
"MIT"
] | permissive | import json
import re
class Utils(object):
@classmethod
def formatter(cls, word):
word = cls.cleaner(word)
word = word.replace(' ', '')
word = word.replace('.', '')
word = word.replace('!', '')
return word
@classmethod
def get_true_gst(cls, company):
data = cls.get_data('pharma', 'pharma_list')
for key in data.keys():
if key == company:
return data[key]['GSTIN'], data[key]['DLNO']
return None
@classmethod
def check_gst_format(cls, word):
if re.findall(r'\d{2}[A-Z]{5}\d{4}[A-Z]{1}\d[Z]{1}[A-Z\d]{1}', word):
return ''.join(re.findall(r'\d{2}[A-Z]{5}\d{4}[A-Z]{1}\d[Z]{1}[A-Z\d]{1}', word))
else:
return None
@classmethod
def change_gst_letters(cls, subtext):
final = ''
try:
subpart = subtext[0:2]
if not subpart.isdigit():
final = final + cls.to_digits(subpart)
else:
final = final + subpart
subpart = subtext[2:7]
if not subpart.isalpha():
final = final + cls.to_alphabets(subpart)
else:
final = final + subpart
subpart = subtext[7:11]
if not subpart.isdigit():
final = final + cls.to_digits(subpart)
else:
final = final + subpart
subpart = subtext[11]
if not subpart.isalpha():
final = final + cls.to_alphabets(subpart)
else:
final = final + subpart
subpart = subtext[12]
if not subpart.isdigit():
final = final + cls.to_digits(subpart)
else:
final = final + subpart
subpart = subtext[13]
if subpart is not 'Z':
final = final + 'Z'
else:
final = final + subpart
final = final + subtext[14].upper()
# print(final)
return final
except IndexError:
# print('Exception Handled')
return final
@classmethod
def to_alphabets(cls, numbers):
final = ''
for number in numbers:
if number.isalpha():
final = final + number
elif number == '1':
final = final + 'I'
elif number == '2':
final = final + 'Z'
elif number == '5':
final = final + 'S'
elif number == '8':
final = final + 'B'
elif number == '0':
final = final + 'O'
else:
final = final + number
return final
@classmethod
def to_digits(cls, characters):
final = ''
for character in characters:
if character.islower():
character = character.upper()
if character.isdigit():
final = final + character
elif character == 'B':
final = final + '8'
elif character == 'S':
final = final + '5'
elif character == 'O':
final = final + '0'
elif character == 'I':
final = final + '1'
elif character == 'Z':
final = final + '2'
else:
final = final + character
return final
@classmethod
def get_data(cls, file_name, obj):
with open('{}.json'.format(file_name)) as fh:
data = json.load(fh)
return data['{}'.format(obj)]
@classmethod
def cleaner(cls, st):
st = st.encode('ascii', 'ignore').decode('utf-8')
return re.sub(r'[(?|$|,+''"”*#:|!)]', r'', st)
| true |
97d3d71f261d6d40915d9c709d71904a15772b3f | Python | wdkang123/PythonOffice | /34-html_read/html_read.py | UTF-8 | 386 | 2.90625 | 3 | [
"MIT"
] | permissive | import requests
url = "https://www.baidu.com"
# 通过get请求
r = requests.get(url)
# 输出服务器状态码
print(r.status_code)
# 输出返回的文本
with open("baidu.html", "w", encoding="utf-8") as f:
f.write(r.text)
print("===================")
# 通过post方法
data = {
"username": "123",
"password": "123"
}
r = requests.post(url, data=data)
print(r.text)
| true |
f15cd474919e6851f54b465b0bb8d299c3114321 | Python | Aliendood/work-training | /week1/day2-intro_python/lcm.py | UTF-8 | 325 | 3.515625 | 4 | [] | no_license | """ find least common multiple """
from divisors import factors
def lcm(n,i):
f_num = factors(n*i)
for k in f_num:
if k >= max(n,i) and k % i == 0 and k % n == 0:
return k
if __name__ == "__main__":
x,y = int(input("enter first number ")),int(input("enter second number "))
print(lcm(x,y))
| true |
2a292e5b7663d7c045e06efd05096649b0cb4974 | Python | ngc92/DeepReinforcementLearning | /tfdeeprl/agent.py | UTF-8 | 2,920 | 2.5625 | 3 | [] | no_license | import copy
import tempfile
from typing import Tuple, Dict
import gym
import gym.spaces
import tensorflow as tf
from .builder import AgentBuilder
from .helpers.rollout_ops import rollout
class Agent:
"""
This is a much simplified version of the tf.estimator.Estimator for reinforcement
learning gym environments.
"""
def __init__(self, builder: AgentBuilder, model_dir=None, params=None):
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
tf.logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if not isinstance(builder, AgentBuilder):
raise TypeError('builder must be an AgentBuilder, got {}.'.format(builder))
self._builder = builder
self._params = copy.deepcopy(params) or {}
@property
def model_dir(self):
return self._model_dir
@property
def params(self):
return copy.deepcopy(self._params)
def build_training_graph(self, env: gym.Env) -> Tuple[Dict, tf.Graph]:
with tf.Graph().as_default() as g:
def explore_fn(observation):
act_spec = self._builder.explore(observation, params=self.params)
return act_spec.actions
def record_fn(state, action, reward, terminal, state_tp1):
data = {"observation": state,
"action": action,
"reward": reward,
"terminal": terminal,
"next_observation": state_tp1
}
spec = self._builder.train(data, params=self.params)
return spec.train_op
rollout_op = rollout(env, explore_fn, record_fn)
result = {"return": rollout_op[0], "duration": rollout_op[1]}
return result, g
def train(self, env: gym.Env, max_episodes: int):
returns = []
durations = []
result_op, g = self.build_training_graph(env)
with g.as_default():
# ensure we have a global step
global_step = tf.train.get_or_create_global_step()
# TODO make this frequency configurable
episode_logger = tf.train.LoggingTensorHook(result_op, every_n_iter=10)
checkpoint_saver = tf.train.CheckpointSaverHook(checkpoint_dir=self.model_dir, save_secs=60)
with tf.train.SingularMonitoredSession(checkpoint_dir=self.model_dir,
hooks=[episode_logger, checkpoint_saver]) as session:
for i in range(max_episodes):
# get action
result_val = session.run(result_op)
returns.append(result_val["return"])
durations.append(result_val["duration"])
return returns, durations
| true |
01244ed4fb575353987340d5fa07ebe5a5f0c2b1 | Python | Profix11pppo/Python_DONSTU | /Python Пройденное/second.py | UTF-8 | 545 | 3.578125 | 4 | [] | no_license | import math
while True:
a=input('Введите выражение ')
if '+' in a:
a=a.split('+')
first=a[0]
ch=int(first)
second=a[1]
nh=int(second)
print(ch+nh)
elif '*' in a:
a=a.split('*')
first=a[0]
ch=int(first)
second=a[1]
nh=int(second)
print(ch*nh)
elif '/' in a:
a=a.split('/')
first=a[0]
ch=int(first)
second=a[1]
nh=int(second)
print(ch/nh)
elif '-' in a:
a=a.split('-')
first=a[0]
ch=int(first)
second=a[1]
nh=int(second)
print(ch-nh)
| true |
a99e3b2e307710ab9fa1cf8a2d9072fe5adbd733 | Python | asgards1990/webmining2013 | /crawling/scripts/script_languages.py | UTF-8 | 16,358 | 2.625 | 3 | [
"MIT"
] | permissive | # encoding: utf-8
# THIS SCRIPT FILLS ALL LANGUAGE ENTRIES IN THE DATABASE
from BeautifulSoup import BeautifulSoup
from cinema.models import *
import re
input = """
<table class="splash">
<tr>
<td><a href="/language/ar">Arabic</a></td>
<td><a href="/language/bg">Bulgarian</a></td>
<td><a href="/language/zh">Chinese</a></td>
<td><a href="/language/hr">Croatian</a></td>
</tr>
<tr>
<td><a href="/language/nl">Dutch</a></td>
<td><a href="/language/en">English</a></td>
<td><a href="/language/fi">Finnish</a></td>
<td><a href="/language/fr">French</a></td>
</tr>
<tr>
<td><a href="/language/de">German</a></td>
<td><a href="/language/el">Greek</a></td>
<td><a href="/language/he">Hebrew</a></td>
<td><a href="/language/hi">Hindi</a></td>
</tr>
<tr>
<td><a href="/language/hu">Hungarian</a></td>
<td><a href="/language/is">Icelandic</a></td>
<td><a href="/language/it">Italian</a></td>
<td><a href="/language/ja">Japanese</a></td>
</tr>
<tr>
<td><a href="/language/ko">Korean</a></td>
<td><a href="/language/no">Norwegian</a></td>
<td><a href="/language/fa">Persian</a></td>
<td><a href="/language/pl">Polish</a></td>
</tr>
<tr>
<td><a href="/language/pt">Portuguese</a></td>
<td><a href="/language/pa">Punjabi</a></td>
<td><a href="/language/ro">Romanian</a></td>
<td><a href="/language/ru">Russian</a></td>
</tr>
<tr>
<td><a href="/language/es">Spanish</a></td>
<td><a href="/language/sv">Swedish</a></td>
<td><a href="/language/tr">Turkish</a></td>
<td><a href="/language/uk">Ukrainian</a></td>
</tr>
</table>
<h2>Less-Common Languages</h2>
<table class="splash">
<tr>
<td><a href="/language/ab">Abkhazian</a></td>
<td><a href="/language/qac">Aboriginal</a></td>
<td><a href="/language/guq">Aché</a></td>
<td><a href="/language/qam">Acholi</a></td>
</tr>
<tr>
<td><a href="/language/af">Afrikaans</a></td>
<td><a href="/language/qas">Aidoukrou</a></td>
<td><a href="/language/ak">Akan</a></td>
<td><a href="/language/sq">Albanian</a></td>
</tr>
<tr>
<td><a href="/language/alg">Algonquin</a></td>
<td><a href="/language/ase">American Sign Language</a></td>
<td><a href="/language/am">Amharic</a></td>
<td><a href="/language/apa">Apache languages</a></td>
</tr>
<tr>
<td><a href="/language/an">Aragonese</a></td>
<td><a href="/language/arc">Aramaic</a></td>
<td><a href="/language/arp">Arapaho</a></td>
<td><a href="/language/hy">Armenian</a></td>
</tr>
<tr>
<td><a href="/language/as">Assamese</a></td>
<td><a href="/language/aii">Assyrian Neo-Aramaic</a></td>
<td><a href="/language/ath">Athapascan languages</a></td>
<td><a href="/language/asf">Australian Sign Language</a></td>
</tr>
<tr>
<td><a href="/language/awa">Awadhi</a></td>
<td><a href="/language/ay">Aymara</a></td>
<td><a href="/language/az">Azerbaijani</a></td>
<td><a href="/language/ast">Bable</a></td>
</tr>
<tr>
<td><a href="/language/qbd">Baka</a></td>
<td><a href="/language/ban">Balinese</a></td>
<td><a href="/language/bm">Bambara</a></td>
<td><a href="/language/eu">Basque</a></td>
</tr>
<tr>
<td><a href="/language/bsc">Bassari</a></td>
<td><a href="/language/be">Belarusian</a></td>
<td><a href="/language/bem">Bemba</a></td>
<td><a href="/language/bn">Bengali</a></td>
</tr>
<tr>
<td><a href="/language/ber">Berber languages</a></td>
<td><a href="/language/bho">Bhojpuri</a></td>
<td><a href="/language/qbi">Bicolano</a></td>
<td><a href="/language/qbh">Bodo</a></td>
</tr>
<tr>
<td><a href="/language/bs">Bosnian</a></td>
<td><a href="/language/bzs">Brazilian Sign Language</a></td>
<td><a href="/language/br">Breton</a></td>
<td><a href="/language/bfi">British Sign Language</a></td>
</tr>
<tr>
<td><a href="/language/my">Burmese</a></td>
<td><a href="/language/yue">Cantonese</a></td>
<td><a href="/language/ca">Catalan</a></td>
<td><a href="/language/km">Central Khmer</a></td>
</tr>
<tr>
<td><a href="/language/qax">Chaozhou</a></td>
<td><a href="/language/ce">Chechen</a></td>
<td><a href="/language/chr">Cherokee</a></td>
<td><a href="/language/chy">Cheyenne</a></td>
</tr>
<tr>
<td><a href="/language/hne">Chhattisgarhi</a></td>
<td><a href="/language/kw">Cornish</a></td>
<td><a href="/language/co">Corsican</a></td>
<td><a href="/language/cr">Cree</a></td>
</tr>
<tr>
<td><a href="/language/mus">Creek</a></td>
<td><a href="/language/qal">Creole</a></td>
<td><a href="/language/crp">Creoles and pidgins</a></td>
<td><a href="/language/cro">Crow</a></td>
</tr>
<tr>
<td><a href="/language/cs">Czech</a></td>
<td><a href="/language/da">Danish</a></td>
<td><a href="/language/prs">Dari</a></td>
<td><a href="/language/dso">Desiya</a></td>
</tr>
<tr>
<td><a href="/language/din">Dinka</a></td>
<td><a href="/language/qaw">Djerma</a></td>
<td><a href="/language/doi">Dogri</a></td>
<td><a href="/language/dyu">Dyula</a></td>
</tr>
<tr>
<td><a href="/language/dz">Dzongkha</a></td>
<td><a href="/language/qbc">East-Greenlandic</a></td>
<td><a href="/language/frs">Eastern Frisian</a></td>
<td><a href="/language/egy">Egyptian (Ancient)</a></td>
</tr>
<tr>
<td><a href="/language/eo">Esperanto</a></td>
<td><a href="/language/et">Estonian</a></td>
<td><a href="/language/ee">Ewe</a></td>
<td><a href="/language/qbg">Faliasch</a></td>
</tr>
<tr>
<td><a href="/language/fo">Faroese</a></td>
<td><a href="/language/fil">Filipino</a></td>
<td><a href="/language/qbn">Flemish</a></td>
<td><a href="/language/fon">Fon</a></td>
</tr>
<tr>
<td><a href="/language/fsl">French Sign Language</a></td>
<td><a href="/language/ff">Fulah</a></td>
<td><a href="/language/fvr">Fur</a></td>
<td><a href="/language/gd">Gaelic</a></td>
</tr>
<tr>
<td><a href="/language/gl">Galician</a></td>
<td><a href="/language/ka">Georgian</a></td>
<td><a href="/language/gsg">German Sign Language</a></td>
<td><a href="/language/grb">Grebo</a></td>
</tr>
<tr>
<td><a href="/language/grc">Greek, Ancient (to 1453)</a></td>
<td><a href="/language/kl">Greenlandic</a></td>
<td><a href="/language/gn">Guarani</a></td>
<td><a href="/language/gu">Gujarati</a></td>
</tr>
<tr>
<td><a href="/language/gnn">Gumatj</a></td>
<td><a href="/language/gup">Gunwinggu</a></td>
<td><a href="/language/ht">Haitian</a></td>
<td><a href="/language/hak">Hakka</a></td>
</tr>
<tr>
<td><a href="/language/bgc">Haryanvi</a></td>
<td><a href="/language/qav">Hassanya</a></td>
<td><a href="/language/ha">Hausa</a></td>
<td><a href="/language/haw">Hawaiian</a></td>
</tr>
<tr>
<td><a href="/language/hmn">Hmong</a></td>
<td><a href="/language/qab">Hokkien</a></td>
<td><a href="/language/hop">Hopi</a></td>
<td><a href="/language/iba">Iban</a></td>
</tr>
<tr>
<td><a href="/language/qag">Ibo</a></td>
<td><a href="/language/icl">Icelandic Sign Language</a></td>
<td><a href="/language/ins">Indian Sign Language</a></td>
<td><a href="/language/id">Indonesian</a></td>
</tr>
<tr>
<td><a href="/language/iu">Inuktitut</a></td>
<td><a href="/language/ik">Inupiaq</a></td>
<td><a href="/language/ga">Irish Gaelic</a></td>
<td><a href="/language/jsl">Japanese Sign Language</a></td>
</tr>
<tr>
<td><a href="/language/dyo">Jola-Fonyi</a></td>
<td><a href="/language/ktz">Ju'hoan</a></td>
<td><a href="/language/qbf">Kaado</a></td>
<td><a href="/language/kea">Kabuverdianu</a></td>
</tr>
<tr>
<td><a href="/language/kab">Kabyle</a></td>
<td><a href="/language/xal">Kalmyk-Oirat</a></td>
<td><a href="/language/kn">Kannada</a></td>
<td><a href="/language/kpj">Karajá</a></td>
</tr>
<tr>
<td><a href="/language/mjw">Karbi</a></td>
<td><a href="/language/kar">Karen</a></td>
<td><a href="/language/kk">Kazakh</a></td>
<td><a href="/language/kca">Khanty</a></td>
</tr>
<tr>
<td><a href="/language/kha">Khasi</a></td>
<td><a href="/language/ki">Kikuyu</a></td>
<td><a href="/language/rw">Kinyarwanda</a></td>
<td><a href="/language/qar">Kirundi</a></td>
</tr>
<tr>
<td><a href="/language/tlh">Klingon</a></td>
<td><a href="/language/kfa">Kodava</a></td>
<td><a href="/language/kok">Konkani</a></td>
<td><a href="/language/kvk">Korean Sign Language</a></td>
</tr>
<tr>
<td><a href="/language/khe">Korowai</a></td>
<td><a href="/language/qaq">Kriolu</a></td>
<td><a href="/language/kro">Kru</a></td>
<td><a href="/language/kyw">Kudmali</a></td>
</tr>
<tr>
<td><a href="/language/qbb">Kuna</a></td>
<td><a href="/language/ku">Kurdish</a></td>
<td><a href="/language/kwk">Kwakiutl</a></td>
<td><a href="/language/ky">Kyrgyz</a></td>
</tr>
<tr>
<td><a href="/language/lbj">Ladakhi</a></td>
<td><a href="/language/lad">Ladino</a></td>
<td><a href="/language/lo">Lao</a></td>
<td><a href="/language/la">Latin</a></td>
</tr>
<tr>
<td><a href="/language/lv">Latvian</a></td>
<td><a href="/language/lif">Limbu</a></td>
<td><a href="/language/ln">Lingala</a></td>
<td><a href="/language/lt">Lithuanian</a></td>
</tr>
<tr>
<td><a href="/language/nds">Low German</a></td>
<td><a href="/language/lb">Luxembourgish</a></td>
<td><a href="/language/mk">Macedonian</a></td>
<td><a href="/language/qbm">Macro-Jê</a></td>
</tr>
<tr>
<td><a href="/language/mag">Magahi</a></td>
<td><a href="/language/mai">Maithili</a></td>
<td><a href="/language/mg">Malagasy</a></td>
<td><a href="/language/ms">Malay</a></td>
</tr>
<tr>
<td><a href="/language/ml">Malayalam</a></td>
<td><a href="/language/pqm">Malecite-Passamaquoddy</a></td>
<td><a href="/language/qap">Malinka</a></td>
<td><a href="/language/mt">Maltese</a></td>
</tr>
<tr>
<td><a href="/language/mnc">Manchu</a></td>
<td><a href="/language/cmn">Mandarin</a></td>
<td><a href="/language/man">Mandingo</a></td>
<td><a href="/language/mni">Manipuri</a></td>
</tr>
<tr>
<td><a href="/language/mi">Maori</a></td>
<td><a href="/language/arn">Mapudungun</a></td>
<td><a href="/language/mr">Marathi</a></td>
<td><a href="/language/mh">Marshallese</a></td>
</tr>
<tr>
<td><a href="/language/mas">Masai</a></td>
<td><a href="/language/mls">Masalit</a></td>
<td><a href="/language/myn">Maya</a></td>
<td><a href="/language/men">Mende</a></td>
</tr>
<tr>
<td><a href="/language/mic">Micmac</a></td>
<td><a href="/language/enm">Middle English</a></td>
<td><a href="/language/nan">Min Nan</a></td>
<td><a href="/language/min">Minangkabau</a></td>
</tr>
<tr>
<td><a href="/language/mwl">Mirandese</a></td>
<td><a href="/language/lus">Mizo</a></td>
<td><a href="/language/moh">Mohawk</a></td>
<td><a href="/language/mn">Mongolian</a></td>
</tr>
<tr>
<td><a href="/language/moe">Montagnais</a></td>
<td><a href="/language/qaf">More</a></td>
<td><a href="/language/mfe">Morisyen</a></td>
<td><a href="/language/qbl">Nagpuri</a></td>
</tr>
<tr>
<td><a href="/language/nah">Nahuatl</a></td>
<td><a href="/language/qba">Nama</a></td>
<td><a href="/language/nv">Navajo</a></td>
<td><a href="/language/nbf">Naxi</a></td>
</tr>
<tr>
<td><a href="/language/nd">Ndebele</a></td>
<td><a href="/language/nap">Neapolitan</a></td>
<td><a href="/language/yrk">Nenets</a></td>
<td><a href="/language/ne">Nepali</a></td>
</tr>
<tr>
<td><a href="/language/ncg">Nisga'a</a></td>
<td><a href="/language/zxx">None</a></td>
<td><a href="/language/non">Norse, Old</a></td>
<td><a href="/language/nai">North American Indian</a></td>
</tr>
<tr>
<td><a href="/language/qbk">Nushi</a></td>
<td><a href="/language/nyk">Nyaneka</a></td>
<td><a href="/language/ny">Nyanja</a></td>
<td><a href="/language/oc">Occitan</a></td>
</tr>
<tr>
<td><a href="/language/oj">Ojibwa</a></td>
<td><a href="/language/qaz">Ojihimba</a></td>
<td><a href="/language/ang">Old English</a></td>
<td><a href="/language/or">Oriya</a></td>
</tr>
<tr>
<td><a href="/language/pap">Papiamento</a></td>
<td><a href="/language/qaj">Parsee</a></td>
<td><a href="/language/ps">Pashtu</a></td>
<td><a href="/language/paw">Pawnee</a></td>
</tr>
<tr>
<td><a href="/language/qai">Peul</a></td>
<td><a href="/language/qah">Polynesian</a></td>
<td><a href="/language/fuf">Pular</a></td>
<td><a href="/language/tsz">Purepecha</a></td>
</tr>
<tr>
<td><a href="/language/qu">Quechua</a></td>
<td><a href="/language/qya">Quenya</a></td>
<td><a href="/language/raj">Rajasthani</a></td>
<td><a href="/language/qbj">Rawan</a></td>
</tr>
<tr>
<td><a href="/language/rm">Romansh</a></td>
<td><a href="/language/rom">Romany</a></td>
<td><a href="/language/rtm">Rotuman</a></td>
<td><a href="/language/rsl">Russian Sign Language</a></td>
</tr>
<tr>
<td><a href="/language/qao">Ryukyuan</a></td>
<td><a href="/language/qae">Saami</a></td>
<td><a href="/language/sm">Samoan</a></td>
<td><a href="/language/sa">Sanskrit</a></td>
</tr>
<tr>
<td><a href="/language/sc">Sardinian</a></td>
<td><a href="/language/qay">Scanian</a></td>
<td><a href="/language/sr">Serbian</a></td>
<td><a href="/language/qbo">Serbo-Croatian</a></td>
</tr>
<tr>
<td><a href="/language/srr">Serer</a></td>
<td><a href="/language/qad">Shanghainese</a></td>
<td><a href="/language/qau">Shanxi</a></td>
<td><a href="/language/sn">Shona</a></td>
</tr>
<tr>
<td><a href="/language/shh">Shoshoni</a></td>
<td><a href="/language/scn">Sicilian</a></td>
<td><a href="/language/sjn">Sindarin</a></td>
<td><a href="/language/sd">Sindhi</a></td>
</tr>
<tr>
<td><a href="/language/si">Sinhala</a></td>
<td><a href="/language/sio">Sioux</a></td>
<td><a href="/language/sk">Slovak</a></td>
<td><a href="/language/sl">Slovenian</a></td>
</tr>
<tr>
<td><a href="/language/so">Somali</a></td>
<td><a href="/language/son">Songhay</a></td>
<td><a href="/language/snk">Soninke</a></td>
<td><a href="/language/wen">Sorbian languages</a></td>
</tr>
<tr>
<td><a href="/language/st">Sotho</a></td>
<td><a href="/language/qbe">Sousson</a></td>
<td><a href="/language/ssp">Spanish Sign Language</a></td>
<td><a href="/language/srn">Sranan</a></td>
</tr>
<tr>
<td><a href="/language/sw">Swahili</a></td>
<td><a href="/language/gsw">Swiss German</a></td>
<td><a href="/language/syl">Sylheti</a></td>
<td><a href="/language/tl">Tagalog</a></td>
</tr>
<tr>
<td><a href="/language/tg">Tajik</a></td>
<td><a href="/language/tmh">Tamashek</a></td>
<td><a href="/language/ta">Tamil</a></td>
<td><a href="/language/tac">Tarahumara</a></td>
</tr>
<tr>
<td><a href="/language/tt">Tatar</a></td>
<td><a href="/language/te">Telugu</a></td>
<td><a href="/language/qak">Teochew</a></td>
<td><a href="/language/th">Thai</a></td>
</tr>
<tr>
<td><a href="/language/bo">Tibetan</a></td>
<td><a href="/language/qan">Tigrigna</a></td>
<td><a href="/language/tli">Tlingit</a></td>
<td><a href="/language/tpi">Tok Pisin</a></td>
</tr>
<tr>
<td><a href="/language/to">Tonga (Tonga Islands)</a></td>
<td><a href="/language/ts">Tsonga</a></td>
<td><a href="/language/tsc">Tswa</a></td>
<td><a href="/language/tn">Tswana</a></td>
</tr>
<tr>
<td><a href="/language/tcy">Tulu</a></td>
<td><a href="/language/tup">Tupi</a></td>
<td><a href="/language/tk">Turkmen</a></td>
<td><a href="/language/tyv">Tuvinian</a></td>
</tr>
<tr>
<td><a href="/language/tzo">Tzotzil</a></td>
<td><a href="/language/qat">Ungwatsi</a></td>
<td><a href="/language/ur">Urdu</a></td>
<td><a href="/language/uz">Uzbek</a></td>
</tr>
<tr>
<td><a href="/language/vi">Vietnamese</a></td>
<td><a href="/language/qaa">Visayan</a></td>
<td><a href="/language/was">Washoe</a></td>
<td><a href="/language/cy">Welsh</a></td>
</tr>
<tr>
<td><a href="/language/wo">Wolof</a></td>
<td><a href="/language/xh">Xhosa</a></td>
<td><a href="/language/sah">Yakut</a></td>
<td><a href="/language/yap">Yapese</a></td>
</tr>
<tr>
<td><a href="/language/yi">Yiddish</a></td>
<td><a href="/language/yo">Yoruba</a></td>
<td><a href="/language/zu">Zulu</a></td>
</table>
"""
soup = BeautifulSoup(input)
countries = []
tables = soup.findAll('table')
for table in tables:
trs = soup.findAll('tr')
for tr in trs:
all_a = tr.findAll('a')
for item in all_a:
name = item.string
url = item['href']
country = [name,url[10:]]
countries.append(country)
for country in countries:
Language.objects.create(name=country[0], identifier=country[1])
| true |
ba34563a1383be246d455d0a35bf150f1397cbce | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2136/60624/275974.py | UTF-8 | 381 | 3.078125 | 3 | [] | no_license | import math
def func23():
n = int(input())
n_max = int(math.floor(math.log(n+1)/math.log(2)))-1
flag = True
for i in range(n_max,1,-1):
j = int(max(math.floor(math.pow(n,1/i)),2))
k = (pow(j,i+1)-1)/(j-1)
if abs(n-k) < 0.001:
flag = False
print(j)
break
if flag:
print(n-1)
return
func23() | true |
1d0ddf091a5eb176753d15066335642f6733adc5 | Python | zjwyx/python | /day24/1.内容回顾.py | UTF-8 | 1,672 | 4.1875 | 4 | [] | no_license |
# 组合:一个类的对象作为另一个类对象的属性
from math import pi
class Circle:
def __init__(self,r):
self.r = r
def area(self):
return pi*self.r*self.r
def perimeter(self):
return 2*pi*self.r
class Ring:
# 一个类的对象成为另一个类对象的属性
# Circle() 实例化对象
def __init__(self,outer_r,inner_r):
outer_r,inner_r = (outer_r,inner_r) if outer_r > inner_r else (inner_r,outer_r)
self.out_c = Circle(outer_r)
self.in_c = Circle(inner_r)
def area(self):
return self.out_c.area() - self.in_c.area()
def perimeter(self):
return self.out_c.perimeter() + self.in_c.perimeter()
r = Ring(10,8)
# 1.传递的半径大小顺序问题
# 2.为什么要用组合
# 程序里面有两个需求:和圆形相关 和环形相关 求环形相关的内部的时候用到了圆形的公式
# 圆柱形类 圆锥形类
# 命名空间
# 在类的命名空间中:静态变量 绑定方法
# 在对象的命名空间里:类指针 对象的属性(实例变量)
# 调用的习惯
# 类名.静态方法
# 对象 静态变量(对象不能调用静态变量,不能对静态变量进行赋值操作 对象.静态变量 += 1174)
# 绑定方法
# 对象.绑定方法() ==> 类名.绑定方法(对象)
# 对象.实例变量
# 组合
# 一个类的对象是另一个类对象的属性
# 两个类之间 有什么有什么的关系:班级有学生 学生有班级 班级有课程 图书有作者 学生有成绩
# 学生 和 课程
# 班级 和 课程
# 圆形 和 圆环 | true |
9b0746328bbcd148b9c40443d8fda91a6b281a5b | Python | rdonalpaul/music-quiz-website | /comments.py | UTF-8 | 2,012 | 2.671875 | 3 | [] | no_license | #!/usr/local/bin/python3
from cgitb import enable
enable()
def show_comments():
from cgi import FieldStorage
import pymysql as db
from os import environ
user_comments = ''
url = environ.get('SCRIPT_NAME')
try:
connection = db.connect('cs1.ucc.ie','dr13','chujohqu','csdipact2017_dr13')
cursor = connection.cursor(db.cursors.DictCursor)
form_data = FieldStorage()
if len(form_data) != 0:
username = form_data.getfirst('username')
new_comment = form_data.getfirst('new_comment')
cursor.execute("""INSERT INTO user_comments (username, url, comment)
VALUES (%s, %s, %s)""", (username, url, new_comment))
connection.commit()
cursor.execute("""SELECT * FROM user_comments
WHERE url = %s
ORDER BY comment_id DESC""", (url))
for row in cursor.fetchall():
user_comments += '<article><h1>%s</h1><p>%s</p></article>' % (row['username'], row['comment'])
cursor.close()
connection.close()
except db.Error:
user_comments = '<p>User Comments cannot be displayed. Please try again later.</p>'
return """
<section>
<h1>Did you like the Quiz? Have you ideas for a quiz?</h1>
<form action="%s" method="post">
<fieldset>
<legend>Tell us below</legend>
<label for="username">Name:</label>
<input type="text" name="username" id="username" />
<label for="new_comment">Comment:</label>
<textarea name="new_comment" id="new_comment" rows="5" cols="50">
</textarea>
<input type="submit" id="x"/>
</fieldset>
</form>
</section>
<section id="comments">
<h1>User Comments</h1>
%s
</section>""" % (url, user_comments)
| true |
59858d9a06ea23292b3c70398f61e892ea088874 | Python | gkeeth/aoc19 | /01/01.py | UTF-8 | 1,298 | 3.625 | 4 | [
"MIT"
] | permissive | #! /usr/bin/env python
from __future__ import print_function
import argparse
def get_module_masses(massfile):
"""Parse input file to get masses of each module.
Input file has the mass of each module on its own line.
Return list of module masses.
"""
module_masses = []
with open(massfile, "r") as infile:
module_masses = [int(line) for line in infile]
return module_masses
def calculate_module_fuel(mass):
"""calculate fuel required for module, based on module's mass.
Also calculates the additional fuel required for the fuel itself."""
def fuel_by_mass(m):
return (m // 3) - 2 # // is floor division
fuel = fuel_by_mass(mass)
if fuel > 0:
return fuel + calculate_module_fuel(fuel)
else:
return 0
def calculate_total_fuel(module_fuels):
"""sum up module fuels."""
total_fuel = sum(module_fuels)
return total_fuel
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input file containing module masses")
args = parser.parse_args()
module_masses = get_module_masses(args.input)
module_fuels = [calculate_module_fuel(mass) for mass in module_masses]
total_fuel = calculate_total_fuel(module_fuels)
print(total_fuel)
| true |
2cdb23ae591cc9582dd004632b4ba492b5c2faa1 | Python | Stektpotet/it3105-mcts | /games/nim.py | UTF-8 | 3,403 | 3.671875 | 4 | [] | no_license | from typing import List
import numpy as np
from game import Game
# https://en.wikipedia.org/wiki/Nim
# Nim parameters
# collections/heaps of stones
# Min & Max stones removed
# Simple version of Nim:
# N: number of stones on the board
# K: max number of stones a player can take of the board
# MIN: 1 :: the minimum number of stones that can be removed in a turn
# Rules:
# The 2 players take turns removing n stones (MIN <= n <= K && n <= N)
# the player who takes the last stone wins.
# THE SYSTEM SHOULD BE ABLE TO PLAY WITH ANY VALUE FOR N & K, WHERE 1 < K < N < 100
class Nim(Game):
def clone(self):
clone = Nim(self.player_start_mode, self._stones, self._max_move, self._min)
clone._player1_starts = self._player1_starts
clone._player1s_turn = self._player1s_turn
return clone
def __init__(self, player_start_mode: int, stones: int, max_move: int, min_move: int = 1):
Game.__init__(self, player_start_mode)
self._initial_stone_count = stones
if stones < 1:
print(f"Too few, or negative amount of stones, clamping to 1!")
stones = 1
self._stones = np.int32(stones)
self._max = np.int32(max_move)
self._min = np.int32(min_move)
self._starting_allowed_moves = list(np.arange(1, self._max_move + 1))
self._last_state = self._stones
# ================= PROPERTIES =================
@property
def completed(self) -> bool:
"""
:return: game completion state:
true = done, false = not done
"""
return self._stones == 0
# @property
# def initial_stone_count(self) -> int:
# return self._initial_stone_count
#
# @property
# def stones_left(self) -> int:
# return self._stones
@property
def _max_move(self) -> np.int32:
"""
:return: the maximum number of stones that can currently
be taken given the number of remaining stones
"""
return min(self._stones, self._max)
# ================= METHODS =================
def get_state(self) -> (bool, bytes):
return self.player1s_turn, self._stones.item().to_bytes(1, byteorder="big")
def allowed_moves(self) -> List:
"""
Get all the allowed moves at this point
:return: (MIN <= n <= K && n <= N)
"""
# minor optimization to avoid reallocation of new arrays every turn
if self._stones < self._max:
return list(np.arange(1, self._max_move + 1))
return self._starting_allowed_moves
def apply_move(self, move: int) -> None:
self._last_state = self._stones
# Apply the move
if move < self._min or move > self._max_move:
exit(f"Illegal move performed! tried to remove {move}, "
f"when the constraints were ({self._min} <= {move} <= {self._max_move})!")
self._stones -= move
# End turn, next player's turn
self._swap_player()
def print_move(self, move) -> None:
# This is inverted because the player was just swapped
print(f"{'Player 2' if self._player1s_turn else 'Player 1'}:\n"
f"taking {move} of {self._last_state} stones, {self._stones} stones remain...\n")
def __repr__(self):
return f"Stones left: {self._stones}\nselection range: {self._min} - {self._max_move}"
| true |
9bd5fcac0d54c6da1480d77d4b8cf13a082434c2 | Python | ctiede/eccentricity_study_supp | /plot_edot_ldot.py | UTF-8 | 4,535 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
import numpy as np
import h5py
import matplotlib as mpl
import matplotlib.pyplot as plt
import figures as figs
import scipy.signal as sps
plt.rc('font', family='serif')
mpl.rcParams['text.usetex'] = True
red = [237/255, 102/255, 93/255]
blue = [114/255, 158/255, 206/255]
purp = [123/255, 102/255, 210/255]
green = [105/255, 183/255, 100/255]
orange = [255/255, 187/255, 120/255]
babyblu = [174/255, 199/255, 232/255]
babyred = [255/255, 152/255, 150/255]
t_nu = 1e3
rbin = 32. / 1000
def E(e, M=1.0, a=1.0, mu=0.25):
return -mu * M / 2. / a
def L(e, M=1.0, a=1.0, mu=0.25):
return mu * np.sqrt(M * a * (1 - e * e))
def midpoint(x):
return (x[:-1] + x[1:]) / 2
def smooth(A):
A = np.array(A)
return np.array([A[0]] + list((A[2:] + A[1:-1] + A[:-2]) / 3) + [A[-1]])
def smooth_savgol(A, window_size=5, polyorder=3):
return sps.savgol_filter(A, window_length=window_size, polyorder=polyorder)
def plot_cumsum(ax, x, y, c=None, **kwargs):
ax.fill_between(x, np.cumsum(y), facecolor='purple', alpha=0.1)
return ax.plot(x, np.cumsum(y), c=c, **kwargs)
def config_ax1(ax, e):
ax.set_xlim([ 0.0 , 5.0])
ax.set_ylim([-4e-6, 4e-6])
ax.set_yticks([])
ax.set_ylabel(r'$d \dot{e} / dr$' + ' [Arb.]', fontsize=12)
ax.axhline(0.0, color='grey', lw=0.5)
def config_ax2(ax, e):
ax.set_ylim([-1e-6, 1e-6])
ax.set_yticks([])
ax.set_ylabel(r'$\dot e (r)$ [Arb.]', fontsize=12)
box = dict(boxstyle='round', facecolor='white', alpha=0.2)
ax.text(0.015, 0.85, r'e = ' + str(e), transform=ax.transAxes, bbox=box)
def plot_edot_ldot_single(axa, axb, e, r, s, Edot, Ldot, edot):
print(Edot.shape)
rel_Edot = Edot / E(e) / 2.
rel_Ldot = Ldot / L(e)
lab1 = 'Relative power'
lab2 = 'Relative torque'
config_ax1(axa, e)
axa.plot(midpoint(r), smooth_savgol(-rel_Edot, window_size=7) / rbin, lw=2, color=red , alpha=0.8, label=lab1)
axa.plot(midpoint(r), smooth_savgol(+rel_Ldot, window_size=7) / rbin, lw=2, color=blue, alpha=0.8, label=lab2)
config_ax2(axb, e)
axb.axhline(edot * e / (1 - e**2), color='green', ls='--', lw=0.8, alpha=0.7, label=r'Integrated $\dot e$')
plot_cumsum(axb, midpoint(r), -rel_Edot - rel_Ldot, ls='--', lw=0.75, color='purple', label=r'Integrated difference')
def config_axes(axs, key, ecc):
if key == 'u':
axs[0].text(0.02, 1.02, r'Mean anomaly = $\pi$ / 2', transform=axs[0].transAxes)
if key == 'd':
axs[0].text(0.02, 1.02, r'Mean anomaly = $3\pi$ / 2', transform=axs[0].transAxes)
if key == 'f':
axs[0].text(0.02, 1.02, r'200 Orbit Average (1200-1400)', transform=axs[0].transAxes)
axs[-1].set_xlabel(r'$r \, / \, a$', fontsize=14)
def load_radial_data(fname):
h5f = h5py.File(fname, 'r')
r = h5f['radial_bins'][...]
s = h5f['sigma' ][...]
P = h5f['work_on' ][...]
T = h5f['torque_on' ][...]
return r, s, P, T
def get_radial_series(key):
ecc = [0.025, 0.1, 0.3, 0.4, 0.5, 0.6, 0.75]
e025 = load_radial_data('./Data/fulls/time_avg_reductions_e025.h5')
e100 = load_radial_data('./Data/full/time_avg_reductions_e100.h5')
e300 = load_radial_data('./Data/full/time_avg_reductions_e300.h5')
e400 = load_radial_data('./Data/full/time_avg_reductions_e400.h5')
e500 = load_radial_data('./Data/full/time_avg_reductions_e500.h5')
e600 = load_radial_data('./Data/full/time_avg_reductions_e600.h5')
e750 = load_radial_data('./Data/full/time_avg_reductions_e750.h5')
data = [e025, e100, e300, e400, e500, e600, e750]
return ecc, data
def plot_edot_ldot(ecc, data, key):
e_de_edot = np.load('./Data/e_de_dedt.npy')
e_ts = e_de_edot[:, 0]
edot = e_de_edot[:, 2]
# plt.plot(e_ts, edot * t_nu)
# plt.grid()
# plt.show()
fig, axs = plt.subplots(len(ecc), 1, figsize=[8, 12], sharex=True)
for e, ax, dat in zip(ecc, axs, data):
ax2 = ax.twinx()
plot_edot_ldot_single(ax, ax2, e, dat[0], dat[1], dat[2], dat[3], edot[e_ts==e])
if e==0.025:
ax2.legend(loc="upper right", fontsize=11)
config_axes(axs, key, ecc)
axs[0].legend(loc="upper center", fontsize=11)
plt.subplots_adjust(hspace=0.0)
plt.savefig('d_dots_dr_test.pdf', dpi=800, pad_inches=0.1, bbox_inches='tight')
plt.show()
if __name__ == '__main__':
key = 'f'
ecc, data = get_radial_series(key)
plot_edot_ldot(ecc, data, key)
| true |
45449a02a70ed9a466a2e5b054fe062dccee00e0 | Python | navetal39/Bcloud_Main_Server | /crypto.py | UTF-8 | 1,580 | 3.5625 | 4 | [] | no_license | # Imports: #
from Crypto.Cipher import AES
# Constants for ecryption: #
BLOCK_SIZE = 32
PADD_CHAR = '?'
# Ecryption funcs: #
def encrypt(plaintext):
''' Encrypts the plaintext received.
'''
from crypto_extended import generate_key as KEY
padded_plaintext = padd(plaintext)
encryptor = AES.new(KEY())
ciphertext = encryptor.encrypt(padded_plaintext)
encoded_ciphertext = ciphertext.encode("Base64") # Encoded in base64 for printing and other comforts sake's
return encoded_ciphertext
def decrypt(encoded_ciphertext):
''' Decrypts the Base64 encoded ciphertext received.
'''
from crypto_extended import generate_key as KEY
decryptor = AES.new(KEY())
ciphertext = encoded_ciphertext.decode("Base64") #Decode from base64, the reason for encoding is written above.^
padded_plaintext = decryptor.decrypt(ciphertext)
return depadd(padded_plaintext)
## Utill for encryption funcs: ##
def padd(not_padded):
''' Pad the plaintext before it is encrypted, so it will work with the AES block cipher.
'''
padding_amount = (BLOCK_SIZE - len(not_padded) % BLOCK_SIZE)
padding = padding_amount * PADD_CHAR
padded = not_padded + padding
return padded
def depadd(padded):
''' De-pad the plaintext efter it was decrypted.
'''
depadded = padded.strip(PADD_CHAR)
return depadded
''' # For checking:
pt = raw_input('plaintext: ')
print "==========> Encrypted: ", encrypt(pt)
ct = raw_input('ciphertext: ')
print "==========> Decrypted: ", decrypt(ct)
'''
'''
Exciting. Satisfying. Period.
.
'''
| true |
ed562d709f1232f71857ce438b6f8a3a0ab684b2 | Python | shengli881026/python | /test.py | GB18030 | 646 | 3.4375 | 3 | [] | no_license | # -*- coding: cp936 -*-
import sys
print '>>> zhangshl';
abc = '123'+\
'456'+\
'789';
print abc;
#print arr.lenth;
#raw_input û
#raw_input('logding....input....');
#x='foo';sys.stdout.write(x+'\n');
x = 2;
if x==1:
print 'this --1 if';
elif x==2:
print 'thiis--2 if';
#python string ַ
str ="abcdefghigklmn";
print str;
print str[0];
print str[1:];
print str[1:4];
print str*2;
#python list б
list_arr =['a','b','c','d','e','f','g','h','i','j','k'];
print list_arr;
print list_arr[1];
print list_arr[3:];
#python Ԫֵ
tdict = {'a':1,'b':2,'c':3};
print tdict.values();
#print '-'
| true |
649c215a7abc62eb0540b5e9456bade545ea7125 | Python | OrinaOisera/Data_Structures | /main.py | UTF-8 | 1,520 | 3.421875 | 3 | [] | no_license | def two_sum (arr,sum):
x = []
# y = []
# y = arr[0:]
x = arr[0:]
arr.sort()
left = 0
right = len(arr) - 1
while arr[right] > arr[left]:
if arr[right] +arr[left] > sum:
right = right - 1
elif arr[right] +arr[left] < sum:
left = left + 1
elif arr[right] +arr[left] == sum:
print("The values are ", arr[left] , "&", arr[right])
left = left + 1
right = right - 1
print(x)
# print(arr)
if arr[right] in x:
print("yes")
n = len(x)
i = 0
for arr[right] in range(n):
if arr[right] != x[i]:
i = i + 1
elif arr[right] == x[i]:
print(i)
# for arr[right] in range(n):
# if arr[right] != x[i]:
# i = i + 1
# elif arr[right] == x[i]:
# print(i)
# if arr[left] != x[i]:
# i = i + 1
# elif arr[left] == x[i]:
# print(i)
# while arr[right] in x:
# r = 0
# if arr[right] > x[r]:
# r = r + 1
# elif arr[right] == x[r]:
# print(x[r])
arr = [5, 7, 4, 3, 9, 8, 19, 2]
sum = 12
two_sum(arr, sum)
| true |
42ecd77a7bb4d3e65577093bd41155d923d7e84b | Python | panmpan17/Sushruta | /sushruta/manipulate.py | UTF-8 | 1,079 | 2.984375 | 3 | [] | no_license | import os
from PIL import Image
class Manipulator:
def clamp(num, _min, _max):
if num < _min:
return _min
elif num > _max:
return _max
return num
@classmethod
def get_img_name(cls, file_path):
return ".".join(file_path.split("/")[-1].split(".")[:-1])
@classmethod
def tune_multiplier(cls, file_path, tune_multiplier, result_folder=""):
try:
img = Image.open(file_path)
except FileNotFoundError:
return "File not exist"
file_name = cls.get_img_name(file_path)
for x in range(img.width):
for y in range(img.height):
color = list(img.getpixel((x, y)))
for i, value in enumerate(color):
if i < len(tune_multiplier):
color[i] = cls.clamp(
int(color[i] * tune_multiplier[i]), 0, 255)
img.putpixel((x, y), tuple(color))
img.save(os.path.join(result_folder, f"{file_name}.png"), "PNG")
img.close()
| true |
23f87a9a9dbde16b297568748a7b888723dfb015 | Python | lsst-sqre/lsst-efd-client | /src/lsst_efd_client/auth_helper.py | UTF-8 | 2,460 | 2.984375 | 3 | [
"MIT"
] | permissive | """Authentication helpers
"""
from urllib.parse import urljoin
import requests
class NotebookAuth:
"""Class to help keep authentication credentials secret.
Credentials can be retrieved either from a service endopint or
from a file on disk. The credential location is checked in that order.
Parameters
----------
service_endpoint : `str`, optional
Endopint of the service to use for credentials.
(https://roundtable.lsst.codes/segwarides/ by default)
Raises
------
RuntimeError
Raised if the service returns a non-200 status code.
"""
def __init__(
self, service_endpoint="https://roundtable.lsst.codes/segwarides/"
):
response = requests.get(service_endpoint)
if response.status_code == 200:
self.service_endpoint = service_endpoint
else:
raise RuntimeError(
f"Credential service at {service_endpoint} failed with Error "
f"{response.status_code}."
)
def get_auth(self, alias):
"""Return the credentials as a tuple
Parameters
----------
alias : `str`
Name of the authenticator.
Returns
-------
credentials : `tuple`
A tuple containing the host name, schema registry, port,
username, password and path.
"""
response = requests.get(
urljoin(self.service_endpoint, f"creds/{alias}")
)
if response.status_code == 200:
data = response.json()
return (
data["host"],
data["schema_registry"],
data["port"],
data["username"],
data["password"],
data["path"],
)
elif response.status_code == 404:
raise ValueError(
f"No credentials available for {alias}. "
"Try list_auth to get a list of available keys."
)
else:
raise RuntimeError(f"Server returned {response.status_code}.")
def list_auth(self):
"""Return a list of possible credential aliases
Returns
-------
aliases : `list`
A tuple of `str` that indicate valid aliases to use to retrieve
credentials.
"""
response = requests.get(urljoin(self.service_endpoint, "list"))
return response.json()
| true |
88ece39e62db6d9f299d548784104149fea94a8b | Python | hobama/sensorNetworks-2.4vs868 | /measurementsconverter/latencyconfidence.py | UTF-8 | 2,257 | 3.3125 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import math
# In[2]:
#2.4 GHz measurements
excel24 = pd.read_csv('./latency24_clean.csv')
excel868 = pd.read_csv('./latency868_clean.csv')
# In[3]:
excel24.head(n=5)
# In[4]:
latency24 = excel24['latency'].values
latency868 = excel868['latency'].values
latency24 = latency24/1000000
latency868 = latency868/1000000
# In[5]:
#determine amount of samples, mean and standard deviation
num24 = len(latency24)
mean24 = sum(latency24)/num24
std24 = np.std(latency24)
num868 = len(latency868)
mean868 = sum(latency868)/num868
std868 = np.std(latency868)
print("Number of samples: " + str(num24) + "\nMean of samples: " + str(mean24) + "\nStandard Deviation: " + str(std24))
print("Number of samples: " + str(num868) + "\nMean of samples: " + str(mean868) + "\nStandard Deviation: " + str(std868))
# In[6]:
#determine Z value of confidence interval (95%)
Z = 1.960
#Calculate intervals
left_border24 = mean24 + Z*(std24/math.sqrt(num24))
right_border24 = mean24 - Z*(std24/math.sqrt(num24))
#Calculate intervals
left_border868 = mean868 + Z*(std868/math.sqrt(num868))
right_border868 = mean868 - Z*(std868/math.sqrt(num868))
print("2.4GHz \nLeft border: " + str(left_border24) + "\nRight border: " + str(right_border24))
print("868MHz \nLeft border: " + str(left_border868) + "\nRight border: " + str(right_border868))
# In[9]:
import matplotlib.pyplot as plt
count24, bins24, ignored = plt.hist(latency24, 20, normed=True)
plt.plot(bins24, 1/(std24 * np.sqrt(2 * np.pi)) *
np.exp( - (bins24 - mean24)**2 / (2 * std24**2) ),
linewidth=2, color='r')
plt.ylabel('occurrence')
plt.xlabel('hundreth of a second')
plt.title('2.4 GHz Latency Confidence Intervals')
plt.savefig('latency24')
plt.show()
# In[8]:
count868, bins868, ignored = plt.hist(latency868, 20, normed=True)
plt.plot(bins868, 1/(std868 * np.sqrt(2 * np.pi)) *
np.exp( - (bins868 - mean868)**2 / (2 * std868**2) ),
linewidth=2, color='r')
plt.ylabel('occurrence')
plt.xlabel('hundreth of a second')
plt.title('868 MHz Latency Confidence Intervals')
plt.savefig('latency868')
plt.show()
| true |
aa7b37287ca6585eb19e592507d3e986bc5d106c | Python | misska/shopo | /importers/FlorentinumImporter.py | UTF-8 | 884 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding=utf-8
import urllib2
from bs4 import BeautifulSoup
from Importer import Importer
class FlorentinumImporter(Importer):
name = 'Florentinum'
links = [
'http://www.florentinum.cz/cs/obchody/obchodni-pasaz/',
'http://www.florentinum.cz/cs/obchody/piazza-a-ulice-na-florenci/'
]
soups = []
def download(self):
for link in self.links:
sock = urllib2.urlopen(link)
html = sock.read()
self.soups.append(BeautifulSoup(html, "html5lib"))
def parse(self):
for soup in self.soups:
shopMap = soup.find("map",{"id":"Map"})
for area in shopMap.findAll("area", {"shape":"poly"}):
rel = area.get('rel')
name = ' '.join(rel)
if isinstance(name, basestring):
self.shops.append(name) | true |
009196adeb7311d51d38eda6cb0c0044d3a67dd0 | Python | chunxiao369/chunxiao-code | /python-study/wrapper_test/wrapper_para.py | UTF-8 | 368 | 3 | 3 | [] | no_license | #!/usr/bin/python
import functools
import sys
def trace(log_level):
def impl_f(func):
print(log_level, 'Implementing function: "{}"'.format(func.__name__))
return func
return impl_f
@trace('[INFO]')
def print_msg(msg):
print(msg)
@trace('[DEBUG]')
def assert_(expr):
assert expr
print_msg('Hello, world!')
assert_('Hello, world!')
| true |
cdc06143316955ea040a4bc93bacddcaefbf8204 | Python | mfkiwl/pumba_signal_processing | /venv/main.py | UTF-8 | 2,812 | 2.71875 | 3 | [] | no_license | from datetime import datetime
from rtlsdr import RtlSdr
from pylab import *
from matplotlib import pyplot as plt
from scipy import signal
def main():
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S.%f")
print("Current Time =", current_time)
#aquire location
#start calibration process (needed?)
#start syc process
sdr = RtlSdr() # rtl-sdr instance
# configure device
timeToSample = 1 # in sec
sampleRate = 2.4e6 # in Mhz
sdr.sample_rate = sampleRate
sdr.center_freq = 433e6 # in Mhz
sdr.gain = 30 # in dB
print("gain set to:" ,sdr.get_gain())
print(now)
numberOfSamples = sampleRate * timeToSample
fig=figure()
ax = fig.add_subplot(111, projection='3d') # used for 3d IQ/time plot
samples = sdr.read_samples(numberOfSamples) # aquire samples
sdr.close()
# I/Q seperation
real = samples.real
imag = samples.imag
samp = np.arange(0, numberOfSamples, 1) # used as an axis
#ax.scatter(samp[0:-1:100],real[0:-1:100],imag[0:-1:100],marker='^',s=2)#used for pumba slack
simulateRecivers(real,sampleRate) # used to simulation
#plt.subplot(3, 1, 2)
# xlabel('Real axis')#used for pumba slack
# ylabel('img axis')#used for pumba slack
'''
pxx,farr=psd(samples, NFFT=1024, Fs=sampleRate / 1e6, Fc=sdr.center_freq / 1e6)
plt.subplot(2, 1, 1)
plt.plot(samp, imag)
plt.subplot(2, 1, 2)
plt.plot(farr,pxx)
'''
show()
# this function simulates multiple recivers
# reciver is an ndarray
def simulateRecivers(rawSampled,Fs):
samp = np.arange(0, len(rawSampled), 1) # used for axis
reciver2 = np.roll(rawSampled,5) # create a delayed reciver
reciver3 = np.roll(rawSampled,3)
discreteDelay12,timeDelay12 = getDelay(rawSampled,reciver2,Fs)
discreteDelay23,timeDelay23 = getDelay(reciver2, reciver3, Fs)
discreteDelay31,timeDelay31 = getDelay(reciver3, rawSampled, Fs)
print(discreteDelay12," ",timeDelay12)
print(discreteDelay23," ",timeDelay23)
print(discreteDelay31," ",timeDelay31)
# gets the delay between two signals
def getDelay(signal1,signal2,Fs):
corr = signal.correlate(signal1, signal2, 'same') # cross correlate reciveres
# subplot(311)
# axis = np.arange(0, len(rawSampled), 1)
# plt.plot(axis,signal1)
# subplot(312)
# plt.plot(axis,signal2)
# subplot(313)
# plt.plot(np.arange(0, len(corr), 1),corr)
maxpos = np.argmax(corr) # postion of max correlation
discreteDelay = ((len(corr) / 2) - maxpos) # descrete shift
timeDelay = discreteDelay * 1 / Fs # Time Delay
return discreteDelay, timeDelay
main() | true |
de53440c781058a0aa8b9b45277f83ad52d38d2b | Python | richmont/ListaSequencial | /python/exercicio4.py | UTF-8 | 392 | 4.15625 | 4 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env python3
# Faça um Programa que peça as 4 notas bimestrais e mostre a média.
def main():
nota1=float(input("Insira a primeira nota "))
nota2=float(input("Insira a segunda nota "))
nota3=float(input("Inira a terceira nota "))
nota4=float(input("Insira a quarta nota "))
media=(nota1+nota2+nota3+nota4)/4
print("A média final é de: ",media)
main()
| true |
f26b507db320c6affb7cc2c928f82412010143b8 | Python | domingoesteban/robolearn | /robolearn/torch/utils/data_management/simple_replay_buffer.py | UTF-8 | 3,141 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | import torch
from robolearn.torch.utils import pytorch_util as ptu
from robolearn.utils.data_management.replay_buffer import ReplayBuffer
class SimpleReplayBuffer(ReplayBuffer):
def __init__(
self, max_size, obs_dim, action_dim,
):
if not max_size > 1:
raise ValueError("Invalid Maximum Replay Buffer Size: {}".format(
max_size)
)
max_size = int(max_size)
self._obs_buffer = torch.zeros((max_size, obs_dim),
dtype=torch.float32,
device=ptu.device)
self._next_obs_buffer = torch.zeros((max_size, obs_dim),
dtype=torch.float32,
device=ptu.device)
self._acts_buffer = torch.zeros((max_size, action_dim),
dtype=torch.float32,
device=ptu.device)
self._rewards_buffer = torch.zeros((max_size, 1),
dtype=torch.float32,
device=ptu.device)
self._terminals_buffer = torch.zeros((max_size, 1),
dtype=torch.float32,
device=ptu.device)
self._obs_dim = obs_dim
self._action_dim = action_dim
self._max_size = max_size
self._top = 0
self._size = 0
def add_sample(self, observation, action, reward, terminal,
next_observation, **kwargs):
self._obs_buffer[self._top] = torch.as_tensor(observation)
self._acts_buffer[self._top] = torch.as_tensor(action)
self._rewards_buffer[self._top] = torch.as_tensor(reward)
self._terminals_buffer[self._top] = torch.as_tensor(terminal.astype(float))
self._next_obs_buffer[self._top] = torch.as_tensor(next_observation)
self._advance()
def terminate_episode(self):
pass
def _advance(self):
self._top = (self._top + 1) % self._max_size
if self._size < self._max_size:
self._size += 1
def random_batch(self, batch_size):
if batch_size > self._size:
raise AttributeError('Not enough samples to get. %d bigger than '
'current %d!' % (batch_size, self._size))
indices = torch.randint(0, self._size, (batch_size,), dtype=torch.long,
device=ptu.device)
return dict(
observations=self.buffer_index(self._obs_buffer, indices),
actions=self.buffer_index(self._acts_buffer, indices),
rewards=self.buffer_index(self._rewards_buffer, indices),
terminals=self.buffer_index(self._terminals_buffer, indices),
next_observations=self.buffer_index(self._next_obs_buffer, indices),
)
def available_samples(self):
return self._size
@staticmethod
def buffer_index(buffer, indices):
return torch.index_select(buffer, dim=0, index=indices)
| true |
2df84bb3b1f164c14c683d714faae6a0266a8e6e | Python | sandeepshiven/python-practice | /object oriented programming/attributes_and_class_keyword.py | UTF-8 | 692 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 15:07:04 2019
@author: sandeep
"""
class Student(): # object Student
study = "engineering" # common for every Student instance (Class object attribute)
def __init__(self,name,roll,branch):
#attributes
# we take in arguments and
# assing it using self.attributes_name
self.name = name
self.roll = roll
self.branch = branch
# Student instance
stu1 = Student("Sandeep", 98 ,"CSE")
print(f"My name is {stu1.name}. I am persuing {stu1.study} my branch is {stu1.branch} and my roll no. is {stu1.roll}")
print(f"{type(stu1)}") | true |
30d8ab6ab2dcd5e19d0a0c197ba9d92aec4a30b5 | Python | BradleyTjandra/Deep-Learning | /GAN.py | UTF-8 | 7,527 | 2.515625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import datetime
import os
from IPython import display
from tensorflow.keras import layers
from sklearn.utils import shuffle
#import math
#python "C:\Users\bradley.tjandra\AppData\Local\Continuum\anaconda3\Lib\site-packages\tensorboard\main.py" --logdir="C:\Users\bradley.tjandra\Dropbox\2019\Machine Learning_2019\Code\Other Implementations\GAN Dashboard"
# http://GZ0XVT2:6006
(x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
x_train = x_train / 255.
class GAN():
def __init__(self, sess):
self.tensors = {}
self.holders = {}
self.ops = {}
self.sess = sess
self.create_graph()
def create_generator_graph(self, Z):
with tf.variable_scope("gener"):
layer = tf.layers.dense(
Z
, units=128
, activation=tf.nn.relu
, name = "lay1"
)
layer = tf.layers.dense(
layer
, units=784
, activation=tf.sigmoid
, name = "lay3"
)
layer = tf.reshape(layer, shape=[-1, 28, 28])
return(layer)
def create_discriminator_graph(self):
layers = []
with tf.variable_scope("adver"):
layers.append(tf.layers.Dense(
units = 240
, activation=tf.nn.relu
, name = "adver/lay1"
))
layers.append(tf.layers.Dense(
units=1
, kernel_initializer=tf.random_normal_initializer(-0.005,0.005)
, name = "adver/lay3"
))
return(layers)
def apply_discriminator_graph(self, adver_layers, images):
layer = tf.reshape(images, shape=[-1, 784])
for L in adver_layers:
layer = L(layer)
preds = tf.nn.sigmoid(layer)
return(preds, layer)
def create_graph(self):
Z = tf.placeholder(tf.float32, [None, 100], name="Z")
real_images = tf.placeholder(tf.float32, [None, 28, 28])
fake_images = self.create_generator_graph(Z)
adver_layers = self.create_discriminator_graph()
pred_on_real, logits_real = self.apply_discriminator_graph(adver_layers, real_images)
pred_on_fake, logits_fake = self.apply_discriminator_graph(adver_layers, fake_images)
adver_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits_real)
, logits=logits_real
)
+ tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(logits_fake)
, logits=logits_fake
)
)
gener_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits_fake)
, logits=logits_fake)
)
adver_optimizer = tf.train.AdamOptimizer(.001)
gener_optimizer = tf.train.AdamOptimizer(.001)
gener_grads = gener_optimizer.compute_gradients(gener_loss, fake_images)
adver_train_op = adver_optimizer.minimize(adver_loss
, var_list=tf.trainable_variables(scope="adver")
)
gener_train_op = gener_optimizer.minimize(gener_loss
, var_list=tf.trainable_variables(scope="gener")
)
tf.summary.histogram("real_pred",tf.reduce_mean(pred_on_real))
tf.summary.histogram("fake_pred",tf.reduce_mean(pred_on_fake))
tf.summary.histogram("adversarial_loss",adver_loss)
tf.summary.histogram("generator_loss",gener_loss)
tf.summary.histogram("grad_gener", gener_grads)
tf.summary.histogram("grad_gener_mean", tf.reduce_mean(gener_grads))
tf.summary.histogram("grad_gener_max", tf.reduce_max(gener_grads))
tf.summary.histogram("grad_gener_min", tf.reduce_min(gener_grads))
merge = tf.summary.merge_all()
self.holders['Z'] = Z
self.holders['real_images'] = real_images
self.tensors['merge'] = merge
self.tensors['gener_grads'] = gener_grads
self.tensors['pred_on_real'] = pred_on_real
self.tensors['pred_on_fake'] = pred_on_fake
self.tensors['fake_images'] = fake_images
self.ops['adver'] = adver_train_op
self.ops['gener'] = gener_train_op
def prior(self, batch_size):
return(np.random.normal(size=[batch_size,100]))
def train_network(self, k, real_images):
for i in range(k):
Z = self.prior(real_images.shape[0])
summary, _, pred_on_real, pred_on_fake = self.sess.run((
self.tensors['merge']
, self.ops['adver']
, self.tensors['pred_on_real']
, self.tensors['pred_on_fake']
)
, {
self.holders['Z'] : Z
, self.holders['real_images'] : real_images
}
)
self.sess.run(self.ops['gener'], {
self.holders['Z'] : Z
})
return(summary, np.mean(pred_on_real), 1-np.mean(pred_on_fake))
def generate_images(self, m=None, prior=None):
if prior is None:
prior = self.prior(m)
images = self.sess.run(self.tensors['fake_images'],{
self.holders['Z']: prior})
return(np.reshape(images, (-1,28,28)))
def see_grads(self, Z_test=None):
if Z_test is None:
Z_test = self.prior(1)
grads = self.sess.run(
self.tensors['gener_grads']
, {
self.holders['Z'] : Z_test
})
return(grads)
def sample_image(images, j, minibatch_size):
if j == 0:
images = shuffle(images)
start = j * minibatch_size
stop = (j+1) * minibatch_size
return(images[start:stop,])
def generate_and_save_images(gan, epoch, Z_test):
fakes = gan.generate_images(prior=Z_test)
grads = gan.see_grads(Z_test)
n_cols = 3
n_rows = 4
display.clear_output(wait=True)
fig = plt.figure(figsize=(8,8))
for i in range(n_rows):
plt.subplot(n_rows,n_cols, n_cols*i+1)
plt.imshow(fakes[i, :, :], cmap='gray_r')
plt.axis('off')
plt.subplot(n_rows,n_cols, n_cols*i+2)
g = grads[0][0][i]
if len(g.shape) == 1:
g = np.expand_dims(g,-1)
plt.imshow(g, cmap='gray')
plt.colorbar()
plt.axis('off')
plt.subplot(n_rows,n_cols, n_cols*i+3)
g = grads[0][1][i]
if len(g.shape) == 1:
g = np.expand_dims(g,-1)
plt.imshow(g, cmap='gray_r')
plt.colorbar()
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
print(np.max(grads[0][0]), np.min(grads[0][0]))
tf.reset_default_graph()
sess = tf.Session()
gan = GAN(sess)
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
Z_test = np.random.normal(size=[4,100])
for i in range(100):
for j in range(int(x_train.shape[0]/128)):
real_images = sample_image(x_train, j, 128)
summary, real_acc, fake_acc = gan.train_network(1, real_images)
if j % 50 == 0:
train_writer.add_summary(summary, i)
generate_and_save_images(gan, i, Z_test)
print(real_acc, fake_acc)
| true |
fa87906779b7e87a810915380dc8cf4a03a4a464 | Python | CHESS-mission/05_FS | /simulators/ADCS/testClientTcp.py | UTF-8 | 1,268 | 2.625 | 3 | [] | no_license | import socket
from packages.package.UART import Uart
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 5005)
messages = [bytearray([0x1F,0x7F,0x80,0x1F,0xFF]),bytearray([0x1F,0x7F,0x00,0x1F,0xFF]),bytearray([0x1F,0x7F,0x80,0x1F,0xFF])]
if __name__ == '__main__':
s.connect(server_address)
try:
for message in messages:
if message[2] < 128:
message.append(Uart.checksum(message))
print(f"Sending Telecommand message : command id {message[2]}")
else:
print(f"Sending Telemetry message : Telemetry id {message[2]}")
s.send(message)
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected :
data = s.recv(2048)
amount_received += len(data)
print(f"received {data}")
if data[2] < 128:
print(f"Received Telecommand acknowledge : code {data[3]} \n")
else:
print(f"Received Telemetry reply : data {data[3]} \n")
finally:
print("close socket")
s.close() | true |
f82c111280cf3142b1dcbbab8b6ef1abf261ec1c | Python | mrescati1/TENSORFLOW | /Chord.py | UTF-8 | 361 | 3.34375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 16:34:50 2020
@author: micha
"""
from chord import Chord
matrix = [ [0, 5, 6, 4, 7, 4], [5, 0, 5, 4, 6, 5],
[6, 5, 0, 4, 5, 5], [4, 4, 4, 0, 5, 5],
[7, 6, 5, 5, 0, 4], [4, 5, 5, 5, 4, 0]]
names = ["Action", "Adventure", "Comedy", "Drama", "Fantasy", "Thriller"]
Chord(matrix, names).show() | true |
453b185b7937f6b2d42d1c896eb4c396ac3b29b1 | Python | MDziwny/django-fsm | /django_fsm/tests/test_integer_field.py | UTF-8 | 1,066 | 2.6875 | 3 | [
"MIT"
] | permissive | from django.db import models
from django.test import TestCase
from django_fsm import FSMIntegerField, TransitionNotAllowed, transition
class BlogPostStateEnum(object):
NEW = 10
PUBLISHED = 20
HIDDEN = 30
class BlogPostWithIntegerField(models.Model):
state = FSMIntegerField(default=BlogPostStateEnum.NEW)
@transition(field=state, source=BlogPostStateEnum.NEW, target=BlogPostStateEnum.PUBLISHED)
def publish(self):
pass
@transition(field=state, source=BlogPostStateEnum.PUBLISHED, target=BlogPostStateEnum.HIDDEN)
def hide(self):
pass
class BlogPostWithIntegerFieldTest(TestCase):
def setUp(self):
self.model = BlogPostWithIntegerField()
def test_known_transition_should_succeed(self):
self.model.publish()
self.assertEqual(self.model.state, BlogPostStateEnum.PUBLISHED)
self.model.hide()
self.assertEqual(self.model.state, BlogPostStateEnum.HIDDEN)
def test_unknow_transition_fails(self):
self.assertRaises(TransitionNotAllowed, self.model.hide)
| true |
64c52eb4c30778d94b7153d5a1a577ec7f546a73 | Python | smile1973/crawer-practice | /movie_rank.py | UTF-8 | 554 | 3.140625 | 3 | [] | no_license | from bs4 import BeautifulSoup
from pprint import pprint
import requests
r1 = requests.get("https://www.imdb.com/search/title/?groups=top_100")
soup1 = BeautifulSoup(r1.text, "html.parser")
result1 = soup1.find_all(class_ = "lister-item-header")
movie_list = []
for line in result1:
movie_list.append(line.find("a").text)
pprint(movie_list)
result2 = soup1.find_all(class_ = "genre")
gen_list = []
for line in result2:
gen_list.append(line.text.strip().split(", "))
pprint(gen_list)
pprint(list(zip(movie_list, gen_list))) | true |
a322e4439df31ecb404349dde66732ba04fd6bdf | Python | SarahHoegen/Python_Listas_Exe_Prova_etc | /listas/Programacao1/lista1.py | UTF-8 | 6,138 | 3.71875 | 4 | [] | no_license | #!/bin/env python3
# Marco André Mendes <marco.mendes@ifc.edu.br>
# Lista de exercícios 1
def soma_dois_inteiros(a,b):
''' Recebe dois numeros inteiros, e retorna a sua soma'''
soma = a + b
return soma
def metro_para_milimetros(metros):
''' Recebe um valor em metros, e retorna o valor em milimetros'''
milimetros = metros * 1000
return milimetros
def dias_para_segundos(dias,horas,minutos,segundos):
''' Recebe uma data em dias com horas,minutos,segundos, e retorna
a data em segundos'''
total = (dias * 24 * 60 * 60) + (horas * 60 * 60) + (minutos * 60) + segundos
return total
def aumento_salarial(salario,porcentagem):
''' Recebe um salario e sua porcentagem de aumento, e retorna
o novo salario'''
aumento = porcentagem/100
novosalario=(salario*aumento)+salario
return round(novosalario,2)
def preco_com_desconto(preco,desconto):
''' Recebe um preço e sua porcentagem de desconto, e retorna
novo preço'''
descontoTransformado = desconto / 100
precoPagar= preco - (preco * descontoTransformado)
return round(precoPagar,2)
def tempo_para_percorrer_uma_distancia(distancia,velocidade):
''' Recebe uma distancia e a velocidade que percorreras essa
distancia, e retorna as horas que seriam gastas para percorrer
em linha reta'''
tempo = distancia / velocidade
return round(tempo,2)
def celsius_para_fahrenheit(c):
''' Recebe uma temperatura em celsius, e retorna a temperatura
em fahrenheit'''
fahrenheit=((9*c)/5)+32
return round(fahrenheit,2)
def fahrenheit_para_celsius(f):
''' Recebe uma temperatura em fahrenheit, e retorna a temperatura
em celsius'''
celsius= (f-32)/1.8
return round(celsius,2)
def preco_aluguel_carro(dias,km):
''' Recebe uma quantidade de dias que o carro foi alugado e a
quantidade de kilometros rodados, e retorna o valor a ser pago.
1 dia: 60 reais mais R$ 0,15 por km rodado.'''
preco_dias= dias*60
preco_km =km*0.15
precototal=preco_dias+preco_km
return round(precototal,2)
def dias_perdidos_por_fumar(cigarros,anos):
''' Recebe uma quantidade de cigarros fumados por dia e a quantidade
de anos que fuma, e retorna o total de dias perdidos, sabendo que
cada cigarro reduz a vida em 10 minutos.'''
qtd_dias= cigarros * anos * 365
qtd_minutosperdidos=qtd_dias*10
qtd_diasperdidos=qtd_minutosperdidos/1440
# qtd_anosperdidos= qtd_diasperdidos/365
return round(qtd_diasperdidos,2)
def dois_elevado_a_um_milhao():
''' Calcula dois elevado a um milhão, e retorna a quantidade de
algarismos'''
dois_elevado=2**1000000
resultado=len(str(dois_elevado))
return resultado
# Área de testes: só mexa aqui se souber o que está fazendo!
acertos = 0
total = 0
def test(obtido, esperado):
global acertos, total
total += 1
if obtido != esperado:
prefixo = '\033[31m%s' %('Falhou')
else:
prefixo = '\033[32m%s' %('Passou')
acertos += 1
print ('%s Esperado: %s \tObtido: %s\033[1;m' % (prefixo, repr(esperado),
repr(obtido)))
def main():
print('Soma dois inteiros:')
test(soma_dois_inteiros(0,0), 0)
test(soma_dois_inteiros(-1,0), -1)
test(soma_dois_inteiros(1,1), 2)
test(soma_dois_inteiros(0,-1), -1)
test(soma_dois_inteiros(10,10), 20)
test(soma_dois_inteiros(-10,20), 10)
print('Metros para milimetros:')
test(metro_para_milimetros(0), 0)
test(metro_para_milimetros(1), 1000)
test(metro_para_milimetros(1.8), 1800)
test(metro_para_milimetros(1.81), 1810)
print('Dias,horas,minutos e segundos para segundos:')
test(dias_para_segundos(0,0,0,0), 0)
test(dias_para_segundos(0,0,0,30), 30)
test(dias_para_segundos(0,0,1,0), 60)
test(dias_para_segundos(1,0,0,0), 86400)
test(dias_para_segundos(1,1,1,1), 90061)
test(dias_para_segundos(0,23,59,59), 86399)
test(dias_para_segundos(10,20,59,1), 939541)
print('Aumento salarial baseado na porcentagem de aumento:')
test(aumento_salarial(1330,20), 1596.00)
test(aumento_salarial(1000,0), 1000.00)
test(aumento_salarial(1000.10,123), 2230.22)
test(aumento_salarial(0.0,200), 0.00)
print('Desconto do preco atual baseado na porcentagem do desconto:')
test(preco_com_desconto(1330,20), 1064.00)
test(preco_com_desconto(1000,0), 1000.00)
test(preco_com_desconto(1000.10,5.5), 945.09)
test(preco_com_desconto(0.0,200), 0.00)
print('Tempo gasto para percorrer um distancia a uma velocidade'
'constante(linha reta):')
test(tempo_para_percorrer_uma_distancia(1330,20), 66.50)
test(tempo_para_percorrer_uma_distancia(1000,100), 10.00)
test(tempo_para_percorrer_uma_distancia(1000,123), 8.13)
test(tempo_para_percorrer_uma_distancia(100000,201), 497.51)
print('Celsius para Fahrenheit:')
test(celsius_para_fahrenheit(30), 86.00)
test(celsius_para_fahrenheit(300), 572.00)
test(celsius_para_fahrenheit(-100), -148.00)
test(celsius_para_fahrenheit(1), 33.80)
print('Fahrenheit para Celsius:')
test(fahrenheit_para_celsius(30), -1.11)
test(fahrenheit_para_celsius(300), 148.89)
test(fahrenheit_para_celsius(-100), -73.33)
test(fahrenheit_para_celsius(1), -17.22)
print('Preco a pagar pelo aluguel do carro:')
test(preco_aluguel_carro(10,100), 615.00)
test(preco_aluguel_carro(13,133), 799.95)
test(preco_aluguel_carro(1,0), 60.00)
test(preco_aluguel_carro(3,79), 191.85)
print('Total de dias que perdeu de viver por ter fumados:')
test(dias_perdidos_por_fumar(10,10), 253.47)
test(dias_perdidos_por_fumar(13,13), 428.37)
test(dias_perdidos_por_fumar(0,110), 0.00)
test(dias_perdidos_por_fumar(3,79), 600.73)
print('Calcula a quantidade de numeros que ha em dois elevado a um'
'milhao:')
test(dois_elevado_a_um_milhao(), 301030)
if __name__ == '__main__':
main()
print("\n%d Testes, %d Ok, %d Falhas: Nota %.1f" %(total, acertos,
total-acertos, float(acertos*10)/total))
if total == acertos:
print("Parabéns, seu programa rodou sem falhas!") | true |
6e54e17c8fc05e22871cf8eb6d9e768b2355c67a | Python | RyanMolyneux/Learning_Python | /pythonCrashCourseWork/Chapter 3/3-9-DinnerGuests.py | UTF-8 | 251 | 4.03125 | 4 | [] | no_license | #Chapter 3 ex 9 date : 10/06/17.
#Variables
guest_list = ["Jim","Tim","Thomas"]
print("\n\nGuest List\n--------------------------\n",guest_list[0],"\n",guest_list[1],"\n",guest_list[2],"\n\nThe Amount of people attending is : ",len(guest_list)) | true |
90234799e19f076f24743b397d3f2574a9e8f9a4 | Python | Margiris/NNNavigator | /gameObjects.py | UTF-8 | 7,023 | 2.734375 | 3 | [
"MIT"
] | permissive | import pygame
import numpy
from brain import Brain
from settings import Settings
class GameObject(pygame.sprite.Sprite):
def __init__(self, sprite_groups, tile_size, color, coords, size, is_movable=False, fpm=Settings.FRAMES_PER_MOVE, move_ticks=0):
self.groups = sprite_groups
pygame.sprite.Sprite.__init__(self, self.groups)
self.tile_size = tile_size
self.image = pygame.Surface((size[0] * self.tile_size[0],
size[1] * self.tile_size[1]))
self.x, self.y = coords
self.width, self.height = size
self.color = color
self.rect = self.image.get_rect()
self.is_movable = is_movable
self.frames_per_move = fpm
self.move_ticker = move_ticks
def update(self):
self.image.fill(self.color)
if self.is_movable:
self.move_ticker += 1
self.rect.x = self.x * self.tile_size[0]
self.rect.y = self.y * self.tile_size[1]
def draw(self):
pass
def move(self, dx=0, dy=0):
if self.is_movable and self.move_ticker > self.frames_per_move:
self.move_ticker = 0
if 0 <= self.x + dx < Settings.TILE_COUNT[0]:
self.x += dx
if 0 <= self.y + dy < Settings.TILE_COUNT[1]:
self.y += dy
def __str__(self):
return Settings.PROP_SEP.join([str(self.x), str(self.y), str(self.width), str(self.height),
str(self.is_movable), str(self.frames_per_move), str(self.move_ticker)])
class Goal(GameObject):
def __init__(self, sprite_groups, tile_size, color, coords, size=(1, 1)):
super().__init__(sprite_groups, tile_size, color, coords, size)
def __str__(self):
return Settings.PROP_SEP.join(["G", tuple__str__(self.color), super().__str__()])
class Player(GameObject):
def __init__(self, sprite_groups, function, tile_size, color, coords, size=(1, 1), goal=None, walls=None, fpm=Settings.FRAMES_PER_MOVE, move_ticks=0, reached_goal=False, vision_surface=None, model_name=None):
super().__init__(sprite_groups, tile_size, color,
coords, size, True, fpm=fpm, move_ticks=move_ticks)
self.celebration_count = 0
self.report = function
self.goal = goal
self.walls = walls
self.original_color = color
self.brain = Brain(self, vision_surface, reached_goal, model_name)
self.resurrect()
def update(self):
self.brain.update()
if self.collides_with_wall(self.x, self.y):
self.die()
return super().update()
def move(self, dx=0, dy=0):
if self.is_alive and self.move_ticker > self.frames_per_move:
if self.collides_with_wall(self.x + dx, self.y + dy):
self.die()
else:
if self.x + dx == self.goal.x and self.y + dy == self.goal.y:
self.celebrate()
self.brain.move(dx, dy)
return super().move(dx, dy)
def die(self):
self.is_alive = False
self.color = Settings.PLAYER_DEAD_COLOR
self.brain.die()
self.report(self)
def celebrate(self):
self.brain.reached_goal = True
self.celebration_count += 1
self.report(self)
def get_celebrations(self):
return '{:.0f}/{:d}'.format(self.brain.episode_reward / self.brain.GOAL_REWARD, self.celebration_count)
def resurrect(self):
self.move_ticker = 0
self.is_alive = True
self.color = self.original_color
self.brain.resurrect()
def collides_with_wall(self, x, y):
for wall in self.walls:
if wall.x == x and wall.y == y:
return True
return False
# def look_8_ways(self):
# vision_cells = []
# vision = [None for _ in range(8)]
# index = 0
# for y in range(-1, 2):
# for x in range(-1, 2):
# if x == 0 and y == 0:
# continue
# vision[index] = 0
# for i in range(1, Settings.VISION_DISTANCE + 1):
# if self.collides_with_wall(self.x + x * i, self.y + y * i):
# vision[index] = Settings.VISION_DISTANCE - i + 1
# break
# else:
# vision_cells.append((self.x + x * i,
# self.y + y * i))
# index += 1
# return vision, vision_cells
def look_square(self):
vision = numpy.zeros(
self.brain.OBSERVATION_SPACE_VALUES, dtype=numpy.uint8)
x_start = self.x - Settings.VISION_DISTANCE
x_end = self.x + Settings.VISION_DISTANCE
y_start = self.y - Settings.VISION_DISTANCE
y_end = self.y + Settings.VISION_DISTANCE
for wall in self.walls:
if x_start <= wall.x <= x_end and y_start <= wall.y <= y_end:
vision[wall.y - self.y + Settings.VISION_DISTANCE][wall.x -
self.x + Settings.VISION_DISTANCE] = -1
if x_start < self.goal.x < x_end and y_start < self.goal.y < y_end:
vision[self.goal.y - self.y + Settings.VISION_DISTANCE][self.goal.x -
self.x + Settings.VISION_DISTANCE] = 1
return vision
def __str__(self):
return Settings.PROP_SEP.join(["P", tuple__str__(self.original_color), super().__str__(), str(self.is_alive), str(self.brain)])
class Wall(GameObject):
def __init__(self, sprite_groups, tile_size, color, coords, size=(1, 1), is_movable=False,
fpm=Settings.FRAMES_PER_MOVE, movement_range=(0, 0), move_ticks=0, move_dir=None):
super().__init__(sprite_groups, tile_size, color,
coords, size, is_movable, fpm, move_ticks)
self.curr_pos, self.max_pos = movement_range
if move_dir:
self.move_dir = move_dir
else:
self.move_dir = 1 if self.max_pos < 0 else -1
def update(self):
if self.is_movable and self.move_ticker > self.frames_per_move:
self.automover()
return super().update()
def automover(self):
if self.max_pos > 0:
if self.curr_pos <= 0 or self.curr_pos >= self.max_pos:
self.move_dir = -self.move_dir
self.move(self.move_dir, 0)
elif self.max_pos < 0:
if self.curr_pos >= 0 or self.curr_pos <= self.max_pos:
self.move_dir = -self.move_dir
self.move(0, -self.move_dir)
self.curr_pos += self.move_dir
def __str__(self):
return Settings.PROP_SEP.join(["W", tuple__str__(self.color), super().__str__(), str(self.curr_pos), str(self.max_pos), str(self.move_dir)])
def tuple__str__(t):
return Settings.TUPLE_SEP.join([str(c) for c in t])
| true |
cd16a430fbaf62b44403dc0685a102926a73680f | Python | anudeepsamaiya/pyprofile | /src/pyprofile/profiler.py | UTF-8 | 4,045 | 2.53125 | 3 | [
"MIT"
] | permissive | """Simple cProfile based python profiler.
"""
import cProfile
import functools
import inspect
import pstats
from datetime import datetime
from io import StringIO
from gprof2dot import (
TEMPERATURE_COLORMAP,
TIME_RATIO,
TOTAL_TIME_RATIO,
DotWriter,
PstatsParser,
)
__all__ = [
"profile",
"Profiler",
]
def profile(*fn, **options):
"""Profiler as a decorator.
"""
name = options.get("name")
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with Profiler(name or func.__name__, **options):
to_return = func(*args, **kwargs)
return to_return
return wrapper
if fn and inspect.isfunction(fn[0]):
# Called with no parameter
return decorator(fn[0])
else:
# Called with a parameter
return decorator
class Profiler(object):
def __init__(self, name: str, dump_dir: str = None, *args, **kwargs):
self.stats: str = None
self.name: str = (
f"stats_{name.strip()}_{int(datetime.now().timestamp())}"
)
self.dump_dir = dump_dir
self.save_stats = kwargs.pop("save_stats", False) and dump_dir
self.write_csv = kwargs.pop("write_csv", True)
self.write_dot = kwargs.pop("write_dot", True)
self.write_png = kwargs.pop("write_png", True)
self._prof_file = f"{dump_dir}/{self.name}.prof"
self._csv_file = f"{dump_dir}/{self.name}.csv"
self._dot_file = f"{dump_dir}/{self.name}.dot"
self._png_file = f"{dump_dir}/{self.name}.png"
def __enter__(self, *args, **kwargs):
self.start(*args, **kwargs)
return self
def __exit__(self, *args, **kwargs):
self.stats_str = self.stop(*args, **kwargs)
self._publish_stats_to_csv(self.stats_str)
self._publish_stats_to_dot(self.stats_str)
self._publish_stats_to_graph(self.stats_str)
return True
def stop(self, *args, **kwargs):
self.profiler.disable()
out = StringIO()
stats = pstats.Stats(self.profiler, stream=out)
self.save_stats and stats.dump_stats(self._prof_file)
stats.sort_stats("ncalls", "tottime", "cumtime")
stats.print_stats()
return out.getvalue()
def start(self, *args, **kwargs):
self.profiler = cProfile.Profile()
self.profiler.enable()
return self
def _publish_stats_to_dot(self, stats: str, *args, **kwargs):
if not self.save_stats or not self.write_dot:
return
with open(self._dot_file, "wt", encoding="UTF-8") as output:
theme = TEMPERATURE_COLORMAP
theme.skew = 1.0
profile = PstatsParser(self._prof_file).parse()
profile.prune(0.5 / 100.0, 0.1 / 100.0, None, False)
dot = DotWriter(output)
dot.strip = False
dot.wrap = False
dot.show_function_events = [TOTAL_TIME_RATIO, TIME_RATIO]
dot.graph(profile, theme)
def _publish_stats_to_graph(self, stats: str, *args, **kwargs):
if not self.save_stats or not self.write_png:
return
return None
def _publish_stats_to_csv(self, stats: str, *args, **kwargs):
if not self.save_stats or not self.write_csv:
return
# chop the string into a csv-like buffer
res = "ncalls" + stats.split("ncalls")[-1]
# save it to disk
with open(self._csv_file, "w",) as f:
f.write(
"\n".join(
[
self._get_csv_line_item(ln.rstrip())
for ln in res.split("\n")
]
)
)
def _get_csv_line_item(self, line):
line = line.split("{", 2)
if len(line) < 2:
return ",".join(line[0].split(None, 6))
stats, function_name = line
stats = stats.split(None, 6)
stats.append("{" + function_name)
return ",".join(stats)
| true |
c268055d0ceeb78fdaf451db546635f0e73b68b6 | Python | bmdyy/projecteuler | /034.py | UTF-8 | 231 | 3.578125 | 4 | [] | no_license | import math;
def f(n):
ret = 0;
while n > 0:
ret += math.factorial(n % 10);
n //= 10;
return ret;
t = 0;
for i in range(10, 1000000):
f_i = f(i);
if i == f_i:
t += i;
print(i);
print('total: %d' % t);
| true |
9d5723ee04f4d4737fa60aee49b593e25bce95ad | Python | aryalavishranta/Logistic-Regression | /logistic.py | UTF-8 | 1,255 | 3.09375 | 3 | [] | no_license | #import nrceassary functions
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Lo
#load iris dataset
iris=datasets.load_iris()
#use sepal lengths and sepal width attributes
X=iris.data[:,:2]
Y=iris.target
#fit logistic regression
logreg=LR(C=1e5,solver='lbfgs',multi_class='multinomial')
logreg.fit(X,Y)
#use logistic regression for fitting data
x_min,x_max=X[:,0].min()-.5,X[:,0].max()+.5
y_min,y_max=X[:,1].min()-.5,X[:,0].max()+.5
h=.02 #step size in mesh
xx,yy=np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))
Z=logreg.predict(np.c_[xx.ravel(),yy.ravel()])
#display output
x_min,x_max=X[:,0].min()-.5,X[:,0].max()+.5
y_min,y_max=X[:,1].min()-.5,X[:,1].max()+.5
h=.02
xx,yy=np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))
Z=logreg.predict(np.c_[xx.ravel(),yy.ravel()])
#put the result into a color plot
Z=Z.reshape(xx.shape)
plt.figure(1,figsize=(4,3))
plt.pcolormesh(xx,yy,Z,cmap=plt.cm.Paired)
#Plot training data
plt.scatter(X[:,0],X[:,1],c=Y,edgecolors='k',cmap=plt.cm.Paired)
plt.xlabel("Sepal length")
plt.ylabel("Sepal width")
plt.xlim(xx.min(),xx.max())
plt.ylim(yy.min(),yy.max())
plt.xticks(())
plt.yticks(())
plt.show() | true |
3062011389ec15dfcb04fb62ceb1b0d0f6473329 | Python | yeahmanitsmike/Cryptocurrency-Push-Notifications | /btc_sms.py | UTF-8 | 1,522 | 2.828125 | 3 | [] | no_license | import os
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
import time
#API key and URL
api_key = '################INSERT_API_KEY_HERE##############'
api_URL = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
#Params and Headers
parameters = {
'start':'1',
'limit':'1',
'convert':'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': api_key,
}
#Function that pulls the API and then grabs the BTC price and Percent
#change and uses them as parameters with the Bash script
def poster(parameters, headers):
session = Session()
session.headers.update(headers)
try:
global api_URL
response = session.get(api_URL, params=parameters)
data = json.loads(response.text)
print(data["data"][0]["quote"]["USD"]["price"])
price_0 = data["data"][0]["quote"]["USD"]["price"]
change_0 = data["data"][0]["quote"]["USD"]["percent_change_1h"]
price = str(round(price_0, 2))
change = str(round(change_0, 2))
#print("BTC price: $" + str(price) + "\nPercent change: " + str(change) + "%")
os.system('./slack_curl ' + price + ' ' + change)
return True
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
return False
#Main function
def main():
global parameters
global headers
while(True):
poster(parameters, headers)
time.sleep(3600)
main()
| true |
1add70b0cb10353e58e724420a61cf6a9e4febf2 | Python | jakamrak/uvod-v-programiranje | /datoteke-s-predavanj/2017-18/09-objekti/psi-b.py | UTF-8 | 713 | 3.140625 | 3 | [] | no_license | class Pes:
def __init__(self, glas='hov'):
self.veselje = 1
self.glas = glas
def __repr__(self):
return 'Pes(glas={0!r})'.format(
self.glas
)
def __str__(self):
return 'Pes, ki dela {0}.'.format(
self.glas
)
def __add__(self, other):
pol_mame = self.glas[:len(self.glas) // 2]
pol_ata = other.glas[len(other.glas) // 2:]
glas_novega_psa = pol_mame + pol_ata
dete = Pes(glas=glas_novega_psa)
dete.veselje = min(self.veselje, other.veselje)
return dete
def daj_glas(self):
print(self.veselje * self.glas + '!')
def razveseli(self):
self.veselje += 1
| true |
491dfbc1799de1dc37a9d415d450ffb31a4b87e6 | Python | Keashyn/CP1404_practical_2020 | /Prac_08/unreliablecartest.py | UTF-8 | 177 | 3.140625 | 3 | [] | no_license | from Prac_08.unreliable_car import UnreliableCar
def main():
"""Test unreliable class."""
my_car = UnreliableCar("Mazda 3", 100, 1000)
print(my_car.fuel)
main()
| true |
b2bede15032e4fa70d7a497cb52cb721963fa482 | Python | jinkim172/tweet-analysis | /regression/test.py | UTF-8 | 1,195 | 2.78125 | 3 | [] | no_license | import pandas
import pickle
from scipy.sparse import hstack
from sklearn.metrics import explained_variance_score as evs
input_files_dir = 'input_files/'
training_objects_dir = 'training_objects/'
testing_file = 'subset_test.csv'
model_file = 'full_subset_model.obj'
vectorizer_file = 'full_subset_vectorizer.obj'
print(f"Reading testing data from {input_files_dir + testing_file}...")
data = pandas.read_csv(input_files_dir + testing_file, header=None, encoding='latin-1')
print(f"Loading model from {training_objects_dir + model_file} and vectorizer from {training_objects_dir + vectorizer_file}...")
with open(training_objects_dir + vectorizer_file, 'rb') as f:
vectorizer = pickle.load(f)
with open(training_objects_dir + model_file, 'rb') as f:
clf = pickle.load(f)
x_test = vectorizer.transform(data.iloc[:, 5].values)
x = hstack([x_test])
# predict on test data
print("Predicting test data...")
results = clf.predict(x)
# for tweet, prediction, correct in zip(data.iloc[:, 5].values, results, data.iloc[:, 0].values):
# print(tweet, "prediction:", prediction, ", correct:", correct)
print("Explained variance score (1.0 is best):", evs(data.iloc[:, 0].values, results))
| true |
183227b0d124fd984fb611d777d1c2d4a78f0083 | Python | CodeTest-StudyGroup/Code-Test-Study | /qrlagusdn/[4]백준 삼성기출/#14500 테트로미오.py | UTF-8 | 1,791 | 3.234375 | 3 | [] | no_license | #14500 테트로미오
#아,,, N*M입니다. .ㅎㅎ N * N 이 아니라..
# ㅗ 같은 경우는 dfs로 탐색이 안된다.
#따라서 dfs 로 다른 경우들 탐색하고, ㅗ를 따로 함수 만들어서 탐색 해주었다.
import sys
MAX = -999999
def dfs(y,x,cnt,sum,visited):
global MAX
if cnt == 4:
MAX = max(MAX,sum)
return
for dir in range(4):
newy = y + dy[dir]
newx = x + dx[dir]
if 0<= newy <N and 0<=newx<M and visited[newy][newx] == 0:
sum += MAP[newy][newx]
visited[newy][newx] = 1
dfs(newy, newx, cnt+1,sum,visited)
sum -= MAP[newy][newx]
visited[newy][newx] = 0
def dfs_except(y,x):#ㅗ ㅏ ㅜ ㅓ
global MAX
if 0<= y < N-1 and 0<= x < M-2: #ㅜ
sum = MAP[y][x] + MAP[y][x+1] + MAP[y][x+2] + MAP[y+1][x+1]
MAX = max(MAX,sum)
if 0<= y < N-2 and 1<= x < M: #ㅓ
sum = MAP[y][x] + MAP[y+1][x] + MAP[y+2][x] + MAP[y+1][x-1]
MAX = max(MAX,sum)
if 1<= y <N and 0<=x <M-2:#ㅗ
sum = MAP[y][x] + MAP[y][x+1] + MAP[y][x+2] + MAP[y-1][x+1]
MAX = max(MAX,sum)
if 0<= y < N-2 and 0<= x <M-1: #ㅏ
sum = MAP[y][x] + MAP[y+1][x] + MAP[y+2][x] + MAP[y+1][x+1]
MAX = max(MAX,sum)
if __name__ == "__main__":
N, M = map(int,sys.stdin.readline().split())
MAP = [list(map(int,sys.stdin.readline().split())) for _ in range(N)]
dy = [-1,1,0,0]
dx = [0,0,-1,1]
visited = [[0 for _ in range(M)]for _ in range(N)]
for i in range(0,N):
for j in range(0,M):
visited[i][j] = 1
dfs(i,j,1,MAP[i][j], visited)
dfs_except(i,j)
visited[i][j] = 0
print(MAX)
| true |
0e389b45d43c85585e443fad6be88c85cd11f91c | Python | SelmaLeathem/Python-Reliable-Data-Transmission-Layer-Simulation | /reliable_layer.py | UTF-8 | 20,072 | 2.875 | 3 | [] | no_license | # Date: 5/1/2020
# Description: Code implementation for the reliable data transport layer. The
# book "Computer Networking a Top-Down Approach" by Kurose was
# used to as reference to determine how to implement each feature.
#
# Features include:
#
# * The number of packets sent are limited by the size of the
# flow control window.
#
# * A cumulative ack is implemented by sending one acknowledgement per every
# FLOW_CTRL_WINDOW_SIZE/STRING_DATA_LENGTH number of segments sent out. The
# value of the ACK number is the largest segment number of the group of
# packets plus the size of the payload.
#
# * Pipelining is implemented by using a cumulative ack. Groups of packets
# are sent out before receiving an acknowledgement back.
#
# * Selective retransmit is implemented by the server firstly sending an
# acknowledgement number reflecting the largest packet received without a
# gap in the data. The client in response resends all remaining packets at
# or above the acknowledgement number. This could mean anywhere from only
# one packet to the entire batch is resent.
#
# * Timeouts are implemented as described in the book "Computer Networking A
# Top-Down Approach" by Kurose. When a timeout occurs the packets are resent
# and the timeout time is doubled. After the acknowledgement is received by
# the client the timeout period goes back to its default value of one
# roundtrip time.
#
# * One roundtrip time is equivalent to 2 iterations.
from unreliable_channel import *
class ReliableLayer(object):
# The length of the string data that will be sent per packet...
STRING_DATA_LENGTH = 4 # characters
# Receive window size for flow-control
FLOW_CTRL_WINDOW_SIZE = 15 # characters
# The round trip time for a packet to arrive at the server and the
# server to to send back an acknowledgment
BASIC_RTT = 2
# The maximum size of the time delay
RTT_MAX = 4
# Add class members as needed...
#
def __init__(self):
self.send_channel = None
self.receive_channel = None
self.data_to_send = ''
self.current_iteration = 0 # <--- Use this for segment 'timeouts'
self.seqnum = 0 # the sequence number of a segment
self.acknum = 0 # the acknowledgement number sent by the receiver
# the cumulative size of the total data sent in # of chars
self.size_of_data_sent = 0
# holds the message received by the server
self.message_received = ''
# Holds the segment number and payload of each segment received by
# the server when it receives a group of segments. This dictionary
# is cleared out between "receive" events.
self.segments_received = {}
# When a group of segments arrive and there is a gap (eg a
# dropped/delayed segment) then the packets that arrived are stored
# in this dictionary
self.segments_waiting = {}
# The acknowledgment number the client expects to receive if the
# segments arrive at the server without issues
self.expected_ACK = 0
# A flag used by the client to indicate whether or not it can send
# data during that iteration. Note: the client does not send the
# next batch of of segments until it has receieved acknowledgment
# for the previously sent out batch
self.turn_to_send = True
# The value of the last received acknowledgment number sent by the
# server
self.last_good_server_acknum = 0
# Used to indicate the iteration number that a batch of packets was
# sent sent out on
self.send_time = 0
# A flag used to indicate if the client is resending segments
self.are_resending = False
# Indicates the timeout value
self.rtt = self.BASIC_RTT
# A counter that holds the number of segment timeouts
self.count_segment_timeouts = 0
# Called by main to set the unreliable sending lower-layer channel
def set_send_channel(self, channel):
self.send_channel = channel
# Called by main to set the unreliable receiving lower-layer channel
def set_receive_channel(self, channel):
self.receive_channel = channel
# Called by main to set the string data to send
def set_data_to_send(self, data):
self.data_to_send = data
# Called by main to get the currently received and buffered string data,
# in order
def get_data_received(self):
# Note: message Received is obtained with function:
# add_data_received(self, data, segment_numbers)
return self.message_received
# "timeslice". Called by main once per iteration
def manage(self):
self.current_iteration += 1
self.manage_send()
self.manage_receive()
# Manage Segment sending tasks... If there is data to send the client
# sends out FLOW_CTRL_WINDOW_SIZE/STRING_DATA_LENGTH number of segments.
# If the client is resending segments then a special flag is set to
# ensure the client only resends part or all of what they sent earlier.
# If the client cannot send data segments because they are waiting for
# an acknowledgment from the server for the previously sent segments
# then the turn_to_send flag is turned off.
#
# Timeouts are implemented in this function. When a batch of segments
# are sent the timer is started. If the turn_to_send flag is not turned
# back on before the expected timeout then a timeout event occurs,
# the turn_to_send flag is turned back on and part or all of the
# segments are resent.
def manage_send(self):
# If there is no data to send then exit the function
if self.data_to_send == '':
return
# Make sure the timeout interval does not go over RTT_MAX
if (self.rtt > self.RTT_MAX):
self.rtt = self.RTT_MAX
# Check for a timeout. If there is a timeout the timeout period is
# doubled and the resend flag is set to true. Also the
# "turn_to_send" flag is turned on. Normally this flag is turned on
# when the client receives an acknowledgment from the server
if self.current_iteration - self.send_time >= \
self.rtt and not self.turn_to_send:
self.count_segment_timeouts += 1 # increment the number of
# timeouts
self.rtt *= 2 # double the timeout period
self.turn_to_send = True # turn on flag that says can send now
self.are_resending = True # turn on flag that says are resending
# segments
# If the client has not received an acknowlegement for the last
# segments sent then this flag prevents more segments going out
if not self.turn_to_send:
return
# Set the current segment sequence number to the last received
# acknowledgement number sent by the server
self.seqnum = self.last_good_server_acknum
# Check for missing segments. If the cumulative size of the number
# of chars sent so far is larger than the last acknowledgment number
# received from the server then then decrease its value so it
# accurately represents the amount of data sent
if self.size_of_data_sent > self.last_good_server_acknum:
self.size_of_data_sent = self.last_good_server_acknum
self.are_resending = True # if the server acknum <
# size_of_data_sent then need to resend
else: # make sure the are_resending flag is turned off
self.are_resending = False
# If the expected acknum was received ensure that the timeout period
# is reset to its default value
if self.expected_ACK == self.last_good_server_acknum:
self.rtt = self.BASIC_RTT
# holds the size of the each segment sent as the number of chars
payloadSize = 0
# Keep sending segments until have sent an amount of data that is <=
# FLOW_CTRL_WINDOW_SIZE
while True:
# current seqnum size = (last seqnum value) + (size of last
# payload sent)
self.seqnum += payloadSize
# pull out a chunk of data from the send string of size
# STRING_DATA_LENGTH to send in a segment resource:
# https://www.geeksforgeeks.org/python-get-the-substring-from
# -given-string-using-list-slicing/
data = self.data_to_send[self.seqnum: self.seqnum +
self.STRING_DATA_LENGTH]
# get the payload size of the data
payloadSize = len(data)
# if the payload size is zero then have nothing to send so exit
# the function
if (payloadSize == 0):
break
# if the next data chunk does not fit into the flow-control
# window size then exit
if ((self.size_of_data_sent + payloadSize -
self.last_good_server_acknum) > self.FLOW_CTRL_WINDOW_SIZE):
break
# if are resending then only send what previously sent before
if self.are_resending:
# if the size of the sent data is such that it is equal to
# expected acknum -1 then exit
if (self.size_of_data_sent + payloadSize) > self.expected_ACK:
self.are_resending = False
break
# if all of the above conditions are satisfied then the last
# chunk of data to be pulled will be sent and is therefore
# counted in the size_of_data_sent variable
self.size_of_data_sent += payloadSize
# send the data chunk by using a Segment class object
seg = Segment()
seg.set_data(self.seqnum, data) # set the data and sequence
# number of the segment
# seg.dump() prints state values to screen
# Use the unreliable send_channel to send the segment
self.send_channel.send(seg)
# start monitoring the time for catching timeouts
self.send_time = self.current_iteration
# set the expected acknum to be returned by the server
self.expected_ACK = self.size_of_data_sent
# turn off the turn_to_send flag, so another batch of segments can't
# be sent until either an acknum is received for the last batch of
# segments or a timeout occurs
self.turn_to_send = False
# Manage Segment receive tasks...
# Most of error checking is done in the receive function, such as checking
# for packets out of order, missing packets and checksum checks.
#
# All incoming packets are pulled from the unreliable channel into a
# list. If the elements in the list pass the checksum checks then they
# are added to the dictionary segments_received as segments_received[
# seqnum] = payload. This dictionary is then sorted according to seqnum
# and checked to see if any sequence numbers are missing. If packets are
# missing then these are moved to a segments_waiting dictionary. Note
# that segments_received is cleared after every iteration. When in
# future iterations more packets arrive these waiting segments are then
# moved to the segments_received dictionary and if there are no gaps the
# payload contents of segments_received are added to the dataReceived
# string and an acknum is generated and sent to the client
def manage_receive(self):
# get a list of the incoming segments from the unreliable channel
list_incoming = self.receive_channel.receive()
# if the list is not empty then process it
if len(list_incoming) > 0:
# holds the segments numbers that have arrived in the latest
# receive
segment_numbers = []
# go through each segment that has arrived to determine errors
# and whether or not the segment is an acknum
for item in list_incoming:
# do a checksum check on the received packets
check_checksum_result = self.perform_checksum_check(item)
# if the packet passes the checksum test then process it
if check_checksum_result:
# variables used to add packet to dictionary[item_seq_num]
# = payload
word = item.payload
item_seq_num = item.seq_num
# if the packet is an ack number then ensure the sender
# retrieves it and then exit the function.
if item_seq_num >= self.acknum or item_seq_num == -1:
if item_seq_num == -1:
# store the new ack number in the
# last_good_server_acknum
self.last_good_server_acknum = item.ack_num
# set the "can send" flag for the send function
# since the ACK has been received
self.turn_to_send = True
return
# store the segment numbers in a list
segment_numbers.append(item_seq_num)
# add the packet to the segments_received dictionary
self.segments_received[item_seq_num] = word
# if there are packets waiting in the segments_waiting
# dictionary then move these to the segments_received dictionary
if len(self.segments_waiting) > 0:
for key in self.segments_waiting.keys():
segment_numbers.append(key)
# duplicates are not allowed in dictionaries so there
# should be only one of each packet listed
self.segments_received[key] = self.segments_waiting[key]
self.segments_waiting.clear() # clear the dictionary
# if after some initial processing there are elements in the
# segments_received dictionary then check for missing segments
# and if there are none add the payloads to the receiveString
# otherwise move the segments to the segments_waiting dictionary
if len(self.segments_received) > 0:
# remove duplicate segnums from list reference:
# https://www.w3schools.com/python
# /python_howto_remove_duplicates.asp
segment_numbers = list(dict.fromkeys(segment_numbers))
# check for missing segments. If there are missing segments
# the flag missingSegments is true. Regardless of missing
# segments or not the acknum is set to be the value of the
# largest in order segment+payload before any potential gaps.
missing_segments, self.acknum = \
self.verify_segment_numbers(segment_numbers,
self.segments_received)
# if there are no missing segments then add the payloads to
# the receive string
if not missing_segments:
self.acknum = \
self.add_data_received(self.segments_received,
segment_numbers)
else: # if there are missing segments then add them to the
# segments_waiting dictionary
self.add_data_waiting(self.segments_received,
segment_numbers)
self.segments_received.clear() # clear the dictionary
# send the acknum by making a segment object and sending that
# down the unreliable channel
ack = Segment()
ack.set_ack(self.acknum) # set the value of acknum
# ack.dump() prints state values to screen
# Use the unreliable send_channel to send the ack packet
self.send_channel.send(ack)
# Verify that there are no missing segments by looking for gaps in the
# segment numbers. input: a list of segment numbers a dictionary of the
# form dict[segnum] = payload output: a boolean that is true if there
# are segments missing the acknum for the last segment before any
# missing segments
def verify_segment_numbers(self, segment_numbers, data):
segment_missing = False # true if segment missing
length = len(segment_numbers) # size of list
# reference: https://www.w3schools.com/python/ref_list_sort.asp
segment_numbers.sort() # sort the segment numbers
# if the first or only segment number is greater than the ACK number
# then there is at least one segment missing at the beginning
if segment_numbers[0] > self.acknum:
return True, self.acknum
# if there is only one segment in the list and it satisfies the above
# condition then by default nothing is missing
i = 0
if length == 1:
num_index = segment_numbers[i]
# acknum= segnum + payload size
last_valid_ack = segment_numbers[i] + len(data[num_index])
return False, last_valid_ack # exit the function
# for list sizes > 1 check to see if there is a gap between segment
# numbers
for i in range(length - 1):
num_index = segment_numbers[i]
if (segment_numbers[i + 1] - segment_numbers[i]) > \
len(data[num_index]):
# there is a gap between segment number so the acknum is
# that of the last valid segment number before the gap occurs
last_valid_ack = segment_numbers[i] + len(data[num_index])
return True, last_valid_ack # exit the function
# if reach this point without exiting then unless the list was size
# 1 there are no gaps or missing segments
i = length - 1
num_index = segment_numbers[i]
# acknum= segnum + payload size
last_valid_ack = segment_numbers[i] + len(data[num_index])
# resource: https://www.geeksforgeeks.org/g-fact-41-multiple-return
# -values-in-python/
return segment_missing, last_valid_ack
# Adds validated data received to the dataReceived string.
# input: a list of segment numbers
# a dictionary of the form dict[segnum] = payload
# output: the latest acknum value
def add_data_received(self, data, segment_numbers):
segment_numbers.sort() # sort the segnums to ensure adding the
# payloads in the correct order
next_num = segment_numbers[0] # initialize next_num
for i in range(len(data)):
next_num = segment_numbers[i]
self.message_received += data[next_num] # concatenated the next
# payload string
return next_num + len(data[next_num]) # return last segment number +
# segment size = new ack #
# Performs a checksum check of an element in the list returned from the
# unreliable's channel receive() function. input: a list element
# returned from the unreliable.py receive function output: true if the
# checksum is valid
def perform_checksum_check(self, item):
seg = Segment()
seg.checksum = item.checksum
seg.seq_num = item.seq_num
seg.ack_num = item.ack_num
seg.payload = item.payload
return seg.check_checksum()
# Add dictionary data to the segments_waiting dictionary.
# input: a dictionary of the form dict[segnum] = payload
# a list of segment numbers
def add_data_waiting(self, data, segment_numbers):
for i in range(len(segment_numbers)):
nextNum = segment_numbers[i]
self.segments_waiting[nextNum] = data[nextNum]
| true |
9372134b8ef60cf3460b91c57a52de73b41cccff | Python | HenziKou/CIS-210 | /Projects/p2/p22_art_show.py | UTF-8 | 1,060 | 3.859375 | 4 | [] | no_license | '''
CIS 210
Art Show
Author: Henzi Kou
Credits: N/A
Using Python program, Turtle to create a design or drawing.
'''
from turtle import *
def art_show():
'''
Running the program will call on the functions using the program Turtle
to draw an image of a rainbow behind a sun.
For example:
>>> art_show()
'returns an image of a rainbow behind a sun that is drawn by Turtle program'
'''
rainbow_colors = ("red", "orange", "yellow", "greenyellow", "aquamarine",
"skyblue", "royalblue", "blueviolet", "indigo")
reset()
Screen()
up()
goto(-320, -260)
width(68)
speed(5)
for rcolor in rainbow_colors:
color(rcolor)
down()
forward(640)
up()
backward(640)
left(90)
forward(66)
right(90)
width(25)
color("gold")
goto(0,-170)
down()
begin_fill()
circle(170)
end_fill()
penup()
goto(0, 300)
return None
| true |
86656e99f12f23c269e787ae1a008eafbb931bca | Python | mramire8/revisiting | /expert/document_expert.py | UTF-8 | 752 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | from base import BaseExpert
class TrueExpert(BaseExpert):
"""docstring for TrueExpert"""
def __init__(self, classifier):
super(TrueExpert, self).__init__(classifier)
def label(self, x, y=None):
if y is None:
raise Exception("True labels are missing")
else:
return y
def fit(self, x, y=None):
return self
class PredictingExpert(BaseExpert):
"""docstring for PredictingExpert"""
def __init__(self, classifier):
super(PredictingExpert, self).__init__()
self.classifier = classifier
def label(self, x, y=None):
return self.classifier.predict(x)
def fit(self, x, y=None):
self.classifier.fit(x, y)
return self | true |
f53d78adf62cb59f83733a97b6c34dbfa949b414 | Python | r0mdau/hellanzb | /Hellanzb/Logging.py | UTF-8 | 23,893 | 2.78125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | """
Logging - hellanzb's logging facility. Ties in with python's logging system, with an added
SCROLL log level.
The NZBLeecherTicker object will constantly print new and kill it's old lines of text on
the screen via the scroll() level. This busys the screen, but the SCROLL level hooks allow
normal logging of non-SCROLL log messages by passing those non-SCROLL messages to
NZBLeecherTicker to be handled specially (printed above the scrolling text). This special
handling is only enabled when SCROLL has been turned on (via scrollBegin())
(c) Copyright 2005 Philip Jenvey
[See end of file]
"""
import heapq, logging, os, sys, thread, types
try:
import termios
except ImportError:
termios = None
from logging import StreamHandler
from logging.handlers import RotatingFileHandler
from threading import Condition, Lock, RLock, Thread
from twisted.internet import reactor
from twisted.python import reflect, util
from twisted.python.log import startLoggingWithObserver, FileLogObserver
from Hellanzb.Util import *
__id__ = '$Id$'
class StreamHandlerNoLF(StreamHandler):
""" A StreamHandler that doesn't append \n to every message logged to it """
def emit(self, record):
""" Cut/Pastse of StreamHandler's emit to not append messages with \n """
try:
msg = self.format(record)
if not hasattr(types, "UnicodeType"): #if no unicode support...
self.stream.write("%s" % msg)
else:
try:
self.stream.write("%s" % msg)
except UnicodeError:
self.stream.write("%s" % msg.encode("UTF-8"))
self.flush()
except:
self.handleError(record)
class RotatingFileHandlerNoLF(RotatingFileHandler, StreamHandlerNoLF):
""" A RotatingFileHandler that doesn't append \n to every message logged to it """
def emit(self, record):
""" Cut/Pastse of RotatingFileHandler's emit to not append messages with \n """
if self.maxBytes > 0: # are we rolling over?
msg = "%s" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
self.doRollover()
StreamHandlerNoLF.emit(self, record)
class ScrollableHandler(StreamHandlerNoLF):
""" ScrollableHandler is a StreamHandler that specially handles scrolling (log
messages at the SCROLL level). It allows you to temporarily interrupt the constant
scroll with other log messages of different levels (printed at the top of the scroll
area) """
# the SCROLL level (a class var)
LOGFILE = 11
SCROLL = 12
SHUTDOWN = 13
NOLOGFILE = 14
def __init__(self, *args, **kwargs):
self.scrollLock = RLock()
self.scrollFlag = False
StreamHandlerNoLF.__init__(self, *args, **kwargs)
def handle(self, record):
""" The 'scroll' level is a constant scroll that can be interrupted. This interruption is
done via prepending text to the scroll area """
rv = self.filter(record)
if rv:
if record.levelno == ScrollableHandler.SCROLL:
self.emitSynchronized(record)
elif record.levelno == ScrollableHandler.SHUTDOWN:
record.msg = '\n\n\n%s\n' % record.msg
self.emitSynchronized(record)
else:
self.scrollLock.acquire()
# If scroll is on, interrupt scroll
if ScrollableHandler.scrollFlag:
self.scrollHeader(record)
else:
# otherwise if scroll isn't on, just log the message normally
self.emitSynchronized(record)
self.scrollLock.release()
return rv
def emitSynchronized(self, record):
""" Write a log message atomically. Normal python logging Handler behavior """
self.acquire()
try:
self.emit(record)
finally:
self.release()
def scrollHeader(self, record):
""" Print a log message so that the user can see it during a SCROLL """
msg = self.format(record).rstrip() # Scroller appends newline for us
from twisted.internet import reactor
if inMainThread():
# FIXME: scrollBegin() should really be creating the scroller instance
# FIXME: no unicode crap from normal python log emit
Hellanzb.scroller.scrollHeader(msg)
else:
reactor.callFromThread(Hellanzb.scroller.scrollHeader, msg)
class RecentLogEntries:
""" A FIFO queue that maintains the specified size by popping off the least recently added
item """
def __init__(self, size):
self.size = size
self.logEntries = []
def append(self, level, logEntry):
if len(self.logEntries) >= self.size:
self.logEntries.pop(0)
self.logEntries.append((level, logEntry))
def __iter__(self):
entriesLen = len(self.logEntries)
i = 0
while i < entriesLen:
yield self.logEntries[i]
i += 1
class LogOutputStream:
""" Provides somewhat of a file-like interface (supporting only the typical writing
functions) to the specified logging function """
def __init__(self, logFunction):
self.write = logFunction
def flush(self): pass
def close(self): pass
def isatty(self): raise NotImplementedError()
def next(self): raise NotImplementedError()
def read(self, n = -1): raise NotImplementedError()
def readline(self, length = None): raise NotImplementedError()
def readlines(self, sizehint = 0): raise NotImplementedError()
def seek(self, pos, mode = 0): raise NotImplementedError()
def tell(self): raise NotImplementedError()
def truncate(self, size = None): raise NotImplementedError()
def writelines(self, list): raise NotImplementedError()
class ANSICodes(object):
# f/b_ = fore/background
# d/l/b = dark/light/bright
map = {
'ESCAPE': '\033',
'RESET': '0',
'KILL_LINE': 'K',
'F_DRED': '31',
'F_LRED': '31;1',
'F_DGREEN': '32',
'F_LGREEN': '32;1',
'F_BROWN': '33',
'F_YELLOW': '33;1',
'F_DBLUE': '34',
'F_LBLUE': '34;1',
'F_DMAGENTA': '35',
'F_LMAGENTA': '35;1',
'F_DCYAN': '36',
'F_LCYAN': '36;1',
'F_WHITE': '37',
'F_BWHITE': '37;1',
}
def __init__(self):
for key, val in self.map.iteritems():
if Hellanzb.DISABLE_ANSI:
code = ''
else:
code = self.code(key)
self.__dict__[key] = code
def code(self, name):
val = self.map[name]
if name != 'ESCAPE':
val = '%s[%s' % (self.map['ESCAPE'], val)
if name != 'KILL_LINE':
val = '%sm' % val
return val
def moveUp(self, count):
""" Return ANSI codes to move the cursor up count lines """
return not Hellanzb.DISABLE_ANSI and '\r\033[%iA' % count or ''
class HellaTwistedLogObserver(FileLogObserver):
""" Custom twisted LogObserver. It emits twisted log entries to the debug log
function, unless they are failures (Exceptions), which are emited to the error log
function """
def __init__(self):
from Hellanzb.Log import error, debug
self.error = error
self.debug = debug
def emit(self, eventDict):
isFailure = False
edm = eventDict['message']
if not edm:
if eventDict['isError'] and eventDict.has_key('failure'):
isFailure = True
text = ((eventDict.get('why') or 'Unhandled Error')
+ '\n' + eventDict['failure'].getTraceback())
elif eventDict.has_key('format'):
text = self._safeFormat(eventDict['format'], eventDict)
else:
# we don't know how to log this
return
else:
text = ' '.join(map(reflect.safe_str, edm))
fmtDict = {'system': eventDict['system'], 'text': text}
msgStr = self._safeFormat("[%(system)s] %(text)s\n", fmtDict)
util.untilConcludes(self.debug, msgStr, appendLF=False)
if isFailure:
util.untilConcludes(self.error, msgStr, appendLF=False)
__call__ = emit
NEWLINE_RE = re.compile('\n')
class NZBLeecherTicker:
""" A basic logger for NZBLeecher. It's uh, not what I really want. I'd rather put more
time into writing a curses interface. Code submissions greatly appreciated. -pjenvey
"""
def __init__(self):
self.size = 0
self.segments = []
self.connectionCounts = {}
self.currentLog = None
self.maxCount = 0 # FIXME: var name
ACODE = Hellanzb.ACODE
self.connectionPrefix = ACODE.F_DBLUE + '[' + ACODE.RESET + '%s' + \
ACODE.F_DBLUE + ']' + ACODE.RESET
self.scrollHeaders = []
self.started = False
self.killedHistory = False
from Hellanzb.Log import scroll
self.logger = scroll
def addClient(self, segment, color):
""" Add a client (it's segment) to the ticker, to log with the specified ascii color code """
heapq.heappush(self.segments, (segment.priority, segment, color))
def removeClient(self, segment, color):
""" Remove a client (it's segment) from the ticker """
self.segments.remove((segment.priority, segment, color))
def setConnectionCount(self, color, count):
""" Set the number of connections for the particular color """
if color not in self.connectionCounts:
self.connectionCounts[color] = count
else:
self.connectionCounts[color] += count
def scrollHeader(self, message):
# Even if passed multiple lines, ensure all lines are max 80 chars
lines = message.split('\n')
for line in lines:
line = truncateToMultiLine(line, length = 80).expandtabs()
self.scrollHeaders.append(line)
if Hellanzb.SHUTDOWN:
return
self.updateLog()
def killHistory(self):
""" clear scroll off the screen """
if not self.killedHistory and self.started:
msg = Hellanzb.ACODE.moveUp(self.maxCount + 1)
for i in range(self.maxCount + 1):
msg = '%s\n%s' % (msg, Hellanzb.ACODE.KILL_LINE)
msg = '%s%s' % (msg, Hellanzb.ACODE.moveUp(self.maxCount + 1))
if not Hellanzb.DAEMONIZE:
self.logger(msg)
self.killedHistory = True
self.started = False
# segments should be empty at this point anyway
self.segments = []
# FIXME: probably doesn't matter much, but should be using StringIO for concatenation
# here, anyway
def updateLog(self):
""" Log ticker """
if Hellanzb.DAEMONIZE or Hellanzb.DISABLE_SCROLLER:
return
ACODE = Hellanzb.ACODE
if self.currentLog != None:
# Kill previous lines,
if Hellanzb.DISABLE_ANSI:
currentLog = '\n'
else:
currentLog = Hellanzb.ACODE.moveUp(self.maxCount)
else:
# unless we have just began logging. and in that case, explicitly log the
# first message
currentLog = ''
# Log information we want to prefix the scroll (so it stays on the screen)
if len(self.scrollHeaders) > 0:
scrollHeader = ''
for message in self.scrollHeaders:
message = NEWLINE_RE.sub(ACODE.KILL_LINE + '\n', message)
scrollHeader = '%s%s%s\n' % (scrollHeader, message, ACODE.KILL_LINE)
currentLog = '%s%s' % (currentLog, scrollHeader)
# listing sorted via heapq
heap = self.segments[:]
sortedSegments = []
colorCount = self.connectionCounts.copy()
try:
while True:
p, segment, color = heapq.heappop(heap)
colorCount[color] -= 1
sortedSegments.append((segment, color))
except IndexError:
pass
lastSegment = None
i = 0
for segment, color in sortedSegments:
i += 1
if self.maxCount > 9:
prettyId = str(i).zfill(2)
else:
prettyId = str(i)
# Determine when we've just found the real file name, then use that as the
# show name
try:
if segment.nzbFile.showFilenameIsTemp == True and segment.nzbFile.filename != None:
segment.nzbFile.showFilename = segment.nzbFile.filename
segment.nzbFile.showFilenameIsTemp = False
except AttributeError, ae:
from Hellanzb.Log import debug
debug('ATTRIBUTE ERROR: ' + str(ae) + ' num: ' + str(segment.number) + \
' duh: ' + str(segment.articleData))
pass
connectionPrefix = color + '[' + ACODE.RESET + '%s' + \
color + ']' + ACODE.RESET
prefix = connectionPrefix % prettyId
if lastSegment != None and lastSegment.nzbFile == segment.nzbFile:
# 57 line width -- approximately 80 - 5 (prefix) - 18 (max suffix)
currentLog = '%s%s %s%s' % (currentLog, prefix,
rtruncate(segment.nzbFile.showFilename,
length = 57), ACODE.KILL_LINE)
else:
currentLog = '%s%s %s - %s%2d%%%s%s @ %s%s%.1fKB/s%s' % \
(currentLog, prefix, rtruncate(segment.nzbFile.showFilename,
length = 57), ACODE.F_DGREEN,
segment.nzbFile.downloadPercentage, ACODE.RESET, ACODE.F_DBLUE,
ACODE.RESET, ACODE.F_DRED, segment.nzbFile.getCurrentRate(),
ACODE.KILL_LINE)
currentLog = '%s\n' % currentLog
lastSegment = segment
# Fill in empty lines
for color, fillCount in colorCount.iteritems():
for count in range(fillCount):
i += 1
fill = i
if self.maxCount > 9:
prettyId = str(fill).zfill(2)
else:
prettyId = str(fill)
connectionPrefix = color + '[' + ACODE.RESET + '%s' + \
color + ']' + ACODE.RESET
prefix = connectionPrefix % prettyId
currentLog = '%s%s%s\n' % (currentLog, prefix, ACODE.KILL_LINE)
paused = ''
if Hellanzb.downloadPaused:
paused = '%s [Paused]%s' % (ACODE.F_DCYAN, ACODE.RESET)
totalSpeed = Hellanzb.getCurrentRate()
if totalSpeed == 0:
eta = '00:00:00'
else:
eta = prettyEta((Hellanzb.queue.totalQueuedBytes / 1024) / totalSpeed)
prefix = self.connectionPrefix % 'Total'
currentLog = '%s%s%s %.1fKB/s%s, %s%i MB%s queued, ETA: %s%s%s%s%s' % \
(currentLog, prefix, ACODE.F_DRED, totalSpeed, ACODE.RESET,
ACODE.F_DGREEN, Hellanzb.queue.totalQueuedBytes / 1024 / 1024, ACODE.RESET,
ACODE.F_YELLOW, eta, ACODE.RESET, paused, ACODE.KILL_LINE)
self.logger(currentLog)
self.currentLog = currentLog
self.scrollHeaders = []
def stdinEchoOff():
""" ECHO OFF standard input """
if not termios or Hellanzb.DAEMONIZE or Hellanzb.DISABLE_SCROLLER:
return
from Hellanzb.Log import debug
try:
fd = sys.stdin.fileno()
except:
return
try:
new = termios.tcgetattr(fd)
except Exception, e:
debug('stdinEchoOn error', e)
return
new[3] = new[3] & ~termios.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, termios.TCSADRAIN, new)
debug('stdinEchoOff - OFF')
except Exception, e:
debug('stdinEchoOff error', e)
def stdinEchoOn():
""" ECHO ON standard input """
if not termios or getattr(Hellanzb, 'DAEMONIZE', False) \
or getattr(Hellanzb, 'DISABLE_SCROLLER', False):
return
from Hellanzb.Log import debug
try:
fd = sys.stdin.fileno()
except:
return
try:
new = termios.tcgetattr(fd)
except Exception, e:
debug('stdinEchoOn error', e)
return
new[3] = new[3] | termios.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, termios.TCSAFLUSH, new)
debug('stdinEchoOn - ON')
except Exception, e:
debug('stdinEchoOn error', e)
def prettyException(exception):
""" Return a pretty rendition of the specified exception, or if no valid exception an
empty string """
message = ''
if exception != None:
if isinstance(exception, Exception):
message += getLocalClassName(exception.__class__) + ': ' + str(exception)
if not isinstance(exception, FatalError):
# Unknown/unexpected exception -- also show the stack trace
stackTrace = StringIO()
print_exc(file=stackTrace)
stackTrace = stackTrace.getvalue()
message = '%s\n%s' % (message, stackTrace)
return message
def lockScrollableHandlers(func, *args, **kwargs):
""" Call the function with all ScrollableHandlers locked """
lockedLoggers = []
for logger in Hellanzb.logger.handlers:
if isinstance(logger, ScrollableHandler):
logger.scrollLock.acquire()
lockedLoggers.append(logger)
func(*args, **kwargs)
[logger.scrollLock.release() for logger in lockedLoggers]
def initLogging():
""" Setup logging """
logging.addLevelName(ScrollableHandler.LOGFILE, 'LOGFILE')
logging.addLevelName(ScrollableHandler.SCROLL, 'SCROLL')
logging.addLevelName(ScrollableHandler.SHUTDOWN, 'SHUTDOWN')
logging.addLevelName(ScrollableHandler.NOLOGFILE, 'NOLOGFILE')
Hellanzb.logger = logging.getLogger('hellanzb')
#Hellanzb.logger.setLevel(ScrollableHandler.SCROLL)
Hellanzb.logger.setLevel(logging.DEBUG)
# Filter for stdout -- log warning and below
class OutFilter(logging.Filter):
def filter(self, record):
if record.levelno > logging.WARNING:
return False
# DEBUG will only go out to it's log file
elif record.levelno == logging.DEBUG:
return False
return True
outHdlr = ScrollableHandler(sys.stdout)
outHdlr.setLevel(ScrollableHandler.SCROLL)
outHdlr.addFilter(OutFilter())
Hellanzb.logger.addHandler(outHdlr)
errHdlr = ScrollableHandler(sys.stderr)
errHdlr.setLevel(logging.ERROR)
Hellanzb.logger.addHandler(errHdlr)
# Whether or not scroll mode is on
ScrollableHandler.scrollFlag = False
# Whether or not the scroller functionality is completely disabled
Hellanzb.DISABLE_SCROLLER = False
Hellanzb.recentLogs = RecentLogEntries(20)
def initLogFile(logFile = None, debugLogFile = None):
""" Initialize the log file. This has to be done after the config is loaded """
# map of ascii colors. for the kids
# This is initialized here, instead of initLogging, because it requires the config
# file to be loaded
Hellanzb.ACODE = ANSICodes()
maxBytes = backupCount = 0
if hasattr(Hellanzb, 'LOG_FILE_MAX_BYTES'):
maxBytes = unPrettyBytes(Hellanzb.LOG_FILE_MAX_BYTES)
if hasattr(Hellanzb, 'LOG_FILE_BACKUP_COUNT'):
backupCount = Hellanzb.LOG_FILE_BACKUP_COUNT
class LogFileFilter(logging.Filter):
def filter(self, record):
# SCROLL doesn't belong in log files and DEBUG will have it's own log file
if record.levelno == ScrollableHandler.SCROLL or record.levelno == logging.DEBUG \
or record.levelno == ScrollableHandler.NOLOGFILE:
return False
return True
# FIXME: should check if Hellanzb.LOG_FILE is set first
if logFile is not None:
Hellanzb.LOG_FILE = os.path.abspath(logFile)
if debugLogFile is not None:
Hellanzb.DEBUG_MODE = os.path.abspath(debugLogFile)
# Set this, maybe again, incase the -d option was specified
Hellanzb.DEBUG_MODE_ENABLED = True
# Ensure the log file's parent dirs exist and are writable
dirNames = {}
if hasattr(Hellanzb, 'LOG_FILE') and Hellanzb.LOG_FILE is not None:
dirNames['LOG_FILE'] = os.path.dirname(Hellanzb.LOG_FILE)
if hasattr(Hellanzb, 'DEBUG_MODE') and Hellanzb.DEBUG_MODE is not None:
dirNames['DEBUG_MODE'] = os.path.dirname(Hellanzb.DEBUG_MODE)
ensureDirs(dirNames)
if isPy2App():
Hellanzb.DISABLE_SCROLLER = True
if Hellanzb.LOG_FILE:
fileHdlr = RotatingFileHandlerNoLF(Hellanzb.LOG_FILE, maxBytes = maxBytes,
backupCount = backupCount)
fileHdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
fileHdlr.addFilter(LogFileFilter())
Hellanzb.logger.addHandler(fileHdlr)
if Hellanzb.DEBUG_MODE_ENABLED:
class DebugFileFilter(logging.Filter):
def filter(self, record):
if record.levelno > logging.DEBUG or record.levelno == ScrollableHandler.NOLOGFILE:
return False
return True
debugFileHdlr = RotatingFileHandlerNoLF(Hellanzb.DEBUG_MODE, maxBytes = maxBytes,
backupCount = backupCount)
debugFileHdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
debugFileHdlr.setLevel(logging.DEBUG)
debugFileHdlr.addFilter(DebugFileFilter())
Hellanzb.logger.addHandler(debugFileHdlr)
# Direct twisted log output via the custom LogObserver
startLoggingWithObserver(HellaTwistedLogObserver())
"""
Copyright (c) 2005 Philip Jenvey <pjenvey@groovie.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author or contributors may not be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
$Id$
"""
| true |
4593012c3cf2f35ef98a48e4f3c403856c22c487 | Python | suchov/python_penn | /errors.py | UTF-8 | 311 | 4.3125 | 4 | [] | no_license | number = input('Please input an integer.')
#Try to cast the input
try:
number = int(number)
#Catch the raised exeption if there is an error
except ValueError as e:
print("Your input is not an ingeter.")
print(e)
#Otherwise, there is no error
else:
print(str(number) + " is indeed an integer!")
| true |
f341c5b68c26470404716e27f87872a65c999eb9 | Python | haruyasu/LeetCode | /1_array_and_strings/max_area_of_island.py | UTF-8 | 1,174 | 3.359375 | 3 | [] | no_license | class Solution:
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
direction = [[-1, 0], [1, 0], [0, 1], [0, -1]]
def dfs(i, j, grid, aera):
if not (0 <= i < len(grid) and 0 <= j < len(grid[0]) and grid[i][j] > 0):
return False
grid[i][j] *= -1
area[0] += 1
for d in direction:
dfs(i + d[0], j + d[1], grid, area)
return True
result = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
area = [0]
if dfs(i, j, grid, area):
result = max(result, area[0])
return result
grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
print(Solution().maxAreaOfIsland(grid))
# 6
grid = [[0,0,0,1,1,0,0,0]]
print(Solution().maxAreaOfIsland(grid))
# 2
grid = [[0,0,0,0,0,0,0,0]]
print(Solution().maxAreaOfIsland(grid))
# 0
| true |
102fc56abcbedb2706b08161440b0f60c83c289e | Python | lkhphuc/HugoLarochelle_NN_Exercises | /mlpython/mlproblems/__init__.py | UTF-8 | 2,540 | 2.8125 | 3 | [] | no_license | # Copyright 2011 Hugo Larochelle. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY Hugo Larochelle ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Hugo Larochelle OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of Hugo Larochelle.
"""
Before a dataset can be fed to a Learner, it must first be converted
into an MLProblem.
MLProblem objects are simply iterators with some extra properties.
Hence, from an MLProblem, examples can be obtained by iterating over
the MLProblem.
MLProblem objects also contain metadata, i.e. "data about the
data". For instance, the metadata could contain information about the
size of the input or the set of all possible values for the
target. The metadata (field ``metadata`` of an MLProblem) is
represented by a dictionary mapping strings to arbitrary objects.
Finally, an MLProblem has a length, as defined by the output of
``__len__(self)``.
The ``mlproblems`` package is divided into different modules,
based on the nature of the machine learning problem or task
being implemented.
The modules are:
* ``mlproblems.generic``: MLProblems not specific to a particular task.
* ``mlproblems.classification``: classification MLProblems.
* ``mlproblems.ranking``: ranking MLProblems.
"""
| true |
6249a2cc182995e10d187424ed4018d8ee751e08 | Python | aister2020/KDDCUP_2020_MultimodalitiesRecall_3rd_Place | /code/cv_merge_score.py | UTF-8 | 893 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | import pandas as pd
import sys
import os, pickle
from collections import defaultdict
def fill(res, df):
for data in df.values:
q, p, v = data
q, p = int(q), int(p)
key = str(q) + ',' + str(p)
res[key].append(v)
return res
def mergeScore(input_prefix,cv_num, output):
res = defaultdict(list)
for i in range(cv_num):
print("current cv fold: {}".format(i))
df = pd.read_csv(input_prefix+str(i), sep=',', header=None)
print(df.shape)
res = fill(res, df)
print('res length: {}'.format(len(res)))
with open(output, 'w') as fout:
for key in res:
fout.write(key + ',' + str(sum(res[key]) / len(res[key])) + '\n')
if __name__ == '__main__':
input_prefix, cv_num = sys.argv[1:]
cv_num = int(cv_num)
output = input_prefix + 'merged'
mergeScore(input_prefix,cv_num, output) | true |
a347e9570f2a43d8bcc4474c6e264125a5ea97a3 | Python | AnubhavSrivastavaML/DeepLearning-Based-ImageSearch | /search_image_database.py | UTF-8 | 928 | 2.640625 | 3 | [
"MIT"
] | permissive | import cv2
import argparse
import sqlite3
import json
import numpy as np
from get_features import feature
from model import feature_extractor
from sklearn.metrics.pairwise import cosine_similarity
parser = argparse.ArgumentParser()
parser.add_argument('--threshold',type = int ,default=0.5)
parser.add_argument('--image',required = True )
args = parser.parse_args()
extractor = feature_extractor()
encoddings = feature(args.image,extractor)
query = """ SELECT * FROM tblRequest"""
with sqlite3.connect("imagedata") as conn:
data = conn.execute(query)
data = data.fetchall()
print("Fetched {} images from database".format(len(data)))
result=0
for d in data :
en = np.expand_dims(np.array(json.loads(d[2])),axis=0)
cosine = cosine_similarity(en,encoddings)
if cosine > args.threshold :
img = cv2.imread(d[1])
cv2.imwrite('results/'+str(result)+'.jpg',img)
result+=1
print("Found {} reults".format(result+1))
| true |
562d7ef62cbb51b63d04f2be8db30d4d1aa72c5d | Python | ShelMX/gb_algorithm_hw | /task_7.py | UTF-8 | 869 | 3.859375 | 4 | [
"Apache-2.0"
] | permissive | __author__ = 'Шелест Леонид Викторович'
"""
Написать программу, доказывающую или проверяющую,
что для множества натуральных чисел выполняется равенство: 1+2+...+n = n(n+1)/2,
где n – любое натуральное число.
"""
def main(n: int=None):
n = n if n else int(input('Введи целое число больше 1 '))
result = 0
i = 1
while i <= n:
result += i
i += 1
control = n * (n + 1) / 2
if control == result:
print(f"Сумма чисел от 1 до {n} вычисляется по формуле n*(n + 1)/2 и равна {result:,d}")
else:
print(f"Сумма чисел от 1 до {n} равна {result:,d}. Формула n*(n + 1)/2 = {control:,.2f}")
if __name__ == '__main__':
main()
| true |
47903c77d74986cd5dc2513d6a941e6c508e4e7e | Python | SurendraKumarAratikatla/MyLenovolapCodes1 | /Others/Location/location_tracker.py | UTF-8 | 619 | 2.734375 | 3 | [] | no_license | from kivy.metrics import dp
from kivymd.app import MDApp
from kivymd.uix.datatables import MDDataTable
class Example(MDApp):
def build(self):
self.data_tables = MDDataTable(
size_hint=(0.9, 0.6),
use_pagination=True,
column_data=[
("Pappulu", dp(30)),
("Oil", dp(30)),
],
row_data=[
(f"{i + 1}", "1", "2", "3", "4", "5") for i,j in range(50)
], check = True, background_color = [.10, 0.88, 0.9, .3]
)
def on_start(self):
self.data_tables.open()
Example().run() | true |
0b9ce92aec6f9e5daf6d67a1995a2643cb764c12 | Python | Akshat-Tripathi/Conquer2AICompetition | /game_utils/visualisation/visualisation.py | UTF-8 | 1,987 | 2.6875 | 3 | [] | no_license | import sys
import tempfile
import webbrowser
import numpy as np
from itertools import groupby
visualisation_file = sys.argv[1]
map_file = sys.argv[2]
def read_map(filepath):
with open(filepath, "r") as file:
tokens = list(filter(lambda s: s != "", set(file.read().replace("\n", " ").split(" "))))
tokens.sort()
world = []
file.seek(0)
lines = file.readlines()
for line in lines:
countries = line[:-1].split(" ")
for neighbour in countries[1:]:
world += [f"{tokens.index(countries[0])} -> {tokens.index(neighbour)}"]
return "; ".join(world)
def read_colours(filename):
with open(filename) as file:
return list(map(lambda s: s.replace("\n", ""), file.readlines()))
colours = read_colours("colours.txt")
def encode_state(state):
owners = np.argmax(np.hstack((state, np.ones((len(state), 1)))), 1)
encoded = []
for j in range(len(owners)):
owner = owners[j]
colour = "#FFFFFF"
troops = 0
if owner != state.shape[1]:
colour = colours[owner]
troops = state[j, owner]
encoded += [f"{j} [fillcolor=\"{colour}\" label={int(troops)}]"]
return "; ".join(encoded)
states = np.load(visualisation_file)[::-1]
world = read_map(map_file)
graphs = [f"[\'digraph {{node [style=\"filled\"]; {encode_state(state)}; {world}}}\']," for state in states]
#This is the location of the first ; in each graph
insert_index = 33
label_turn = lambda s, i: s[:insert_index] + f" turn [label=\"Turn: {i}\"]" + s[insert_index:]
active_graphs = [label_turn(v, i) for i, v in enumerate(graphs) if i == 0 or v != graphs[i-1]]
formatted_string = "\n\t".join(active_graphs)
with open("template.html") as file:
html = file.read().replace("[[ .graph ]]", formatted_string)
with tempfile.NamedTemporaryFile('w', delete=False, suffix='.html') as f:
url = 'file://' + f.name
f.write(html)
webbrowser.open(url) | true |
9a2f9ca58332a25349d8f723e126675c96866c86 | Python | dvingo/guten_better | /server/models.py | UTF-8 | 1,097 | 2.71875 | 3 | [
"MIT"
] | permissive |
from json import dumps as json_dumps
class TextSchedule:
def __init__(self, email, gutenberg_id, days_to_read=30, days_sent=0):
self.email = email
self.gutenberg_id = gutenberg_id
self.days_to_read = days_to_read
self.days_sent = days_sent
def __str__(self):
return self.to_json()
def is_finished(self):
return self.days_sent >= self.days_to_read
def increment_days_sent(self):
if self.is_finished():
return
self.days_sent += 1
def to_json(self):
return json_dumps({'email': self.email,
'gutenberg_id': self.gutenberg_id,
'days_to_read': self.days_to_read,
'days_sent': self.days_sent})
@staticmethod
def from_json(json):
return TextSchedule(email=json[u'email'],
gutenberg_id=json[u'gutenberg_id'],
days_to_read=json[u'days_to_read'],
days_sent=json[u'days_sent'] if json.has_key(u'days_sent') else 0)
| true |
664d5b58648f5928c208735fa59bc95f67f828d0 | Python | peva032/rjmcmc_microlensing | /plot_real_figures.py | UTF-8 | 5,176 | 2.703125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import uniform
import matplotlib.mlab as mlab
# actual = [0.01,15,10,10,0.001,0.6]
singlechain = pd.read_csv("binary_real_rjmcmc_output_1.csv")
binarychain = pd.read_csv("binary_real_rjmcmc_output_2.csv")
class uniprior(object):
def __init__(self,left,right):
self.left = left
self.right = right
self.range = right-left
self.dist = uniform(left,right)
def draw(self):
return rn.uniform(self.left,self.right)
def pdf(self,x):
return self.dist.pdf(x)
u0 = uniprior(0,1)
t0 = uniprior(400,600)
te = uniprior(10,100)
phi = uniprior(0,360)
d = uniprior(0,1)
q = uniprior(10e-4,0.1)
names = ['u0','t0','te','phi','q','d']
params = {'u0':u0,'t0':t0,'te':te,'phi':phi,'q':q,'d':d}
mu = binarychain.mean()
sigma = binarychain.std()
plt.figure(1)
# binary plots
for i in range(6):
k = np.linspace(params[names[i]].left,params[names[i]].right,1000)
plt.subplot(321 + i)
plt.xlim(params[names[i]].left,params[names[i]].right)
plt.plot(k,mlab.normpdf(k, mu[i+1], sigma[i+1]))
# plt.vlines(actual[i],[0],1.2*mlab.normpdf(mu[i+1],mu[i+1],sigma[i+1]))
plt.vlines(mu[i+1],[0],1.2*mlab.normpdf(mu[i+1],mu[i+1],sigma[i+1]),'r')
plt.ylim(0,1.2*mlab.normpdf(mu[i+1],mu[i+1],sigma[i+1]))
plt.xlabel(names[i])
plt.figure(2)
# single lens plots
for i in range(3):
k = np.linspace(params[names[i]].left,params[names[i]].right,1000)
plt.subplot(311 + i)
plt.xlim(params[names[i]].left,params[names[i]].right)
plt.plot(k,mlab.normpdf(k, mu[i+1], sigma[i+1]))
# plt.vlines(actual[i],[0],1.2*mlab.normpdf(mu[i+1],mu[i+1],sigma[i+1]))
plt.vlines(mu[i+1],[0],1.2*mlab.normpdf(mu[i+1],mu[i+1],sigma[i+1]),'r')
plt.ylim(0,1.2*mlab.normpdf(mu[i+1],mu[i+1],sigma[i+1]))
plt.xlabel(names[i])
plt.figure(3)
plt.subplot(311)
plt.xlabel('u0')
hist, bins = np.histogram(singlechain['u0'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[0],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(u0.left,u0.right)
plt.subplot(312)
plt.xlabel('t0')
hist, bins = np.histogram(singlechain['t0'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[1],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(t0.left,t0.right)
plt.subplot(313)
plt.xlabel('te')
hist, bins = np.histogram(singlechain['te'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[2],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(te.left,te.right)
plt.figure(4)
plt.subplot(321)
plt.xlabel('u0')
hist, bins = np.histogram(binarychain['u0'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[0],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(u0.left,u0.right)
plt.subplot(322)
plt.xlabel('t0')
hist, bins = np.histogram(binarychain['t0'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[1],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(t0.left,t0.right)
plt.subplot(323)
plt.xlabel('te')
hist, bins = np.histogram(binarychain['te'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[2],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(te.left,te.right)
plt.subplot(324)
plt.xlabel('phi')
hist, bins = np.histogram(binarychain['phi'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[3],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(phi.left,phi.right)
plt.subplot(325)
plt.xlabel('q')
hist, bins = np.histogram(binarychain['q'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[4],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(q.left,q.right)
plt.subplot(326)
plt.xlabel('d')
hist, bins = np.histogram(binarychain['d'], bins=15, density=True)
widths = np.diff(bins)
plt.bar(bins[:-1], hist, widths)
# plt.vlines(actualparams[5],[0],[100],'r')
plt.ylim((0,1.2*max(hist)))
plt.xlim(d.left,d.right)
count1 = singlechain.shape[0]
count2 = binarychain.shape[0]
p1 = count1/float(count1+count2)
p2 = count2/float(count1+count2)
e = 2*np.sqrt((1-p1)*p1/float(count1+count2))
plt.figure(5)
plt.scatter([1.,2.],[p1,p2])
plt.xticks([1,2],["Single Lens","Binary Lens"])
plt.xlim([0.5,2.5])
plt.errorbar([1.,2.],[p1,p2],yerr=[e,e],linestyle='None')
plt.ylim([0,1])
plt.title('Probability of Models')
plt.ylabel('Predicted Probability')
plt.xlabel('$Model$')
# plt.plot(data['t'],data['A'],'ko',label='data',markersize=0.8)
# plt.plot(t_data,MT(t_data,theta_op1[0],theta_op1[1],theta_op1[2]),'r--',label='single lens model',linewidth=0.5)
# plt.plot(t_data,binary2(t_data,theta_op2[0],theta_op2[1],theta_op2[2],theta_op2[3],theta_op2[4],theta_op2[5]),'b--',label='binary lens model',linewidth=0.5)
# # plt.axis([-10,100,0,13])
# plt.title('$Simulated\; data\; with\; rjmcmc\; model\; estimates$')
# plt.legend()
# plt.xlabel('$t$')
# plt.ylabel('$A(t)$')
plt.show()
| true |
fd6736b45ec94957b210b05d3d5efaecbdf2592c | Python | tingleshao/leetcode | /minimum_depth_of_binary_tree/main.py | UTF-8 | 880 | 3.453125 | 3 | [] | no_license | # Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def minDepth(self, root):
minD = 1
if root == None:
return 0
if root.left == root.right == None:
return minD
elif root.left != None:
if root.right == None:
return 1 + self.minDepth(root.left)
else:
return 1 + min(self.minDepth(root.left),self.minDepth(root.right))
else:
return 1+ self.minDepth(root.right)
def main():
s = Solution()
n1 = TreeNode(1)
# n2 = TreeNode(2)
# n3 = TreeNode(3)
# n1.left = n2
# n1.right = n3
print s.minDepth(None)
if __name__ == "__main__":
main() | true |
b2ce276043a8a54ed2aeec4b36bda33b152ff202 | Python | OliviaMoro/Python | /initialConditionsAndEigenvaluesProblems/methodTir.py | UTF-8 | 1,603 | 2.90625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 22:01:31 2020
@author: moroo
"""
import numpy as np
import sys, os
sys.path.append(os.path.abspath(os.path.join('..', 'integrator')))
from rungeKuttaMethod import rungeKutta4, rungeKutta2
from eulerMethod import explicitEuler, regressiveEuler
def tirEuler(n,L,y0,z0,yDer,zDer):
h = L/n
m = int(n/2)
x = np.zeros((n)); u = np.zeros((n,2))
x[0] = 0; x[-1] = L
u[0,:] = [y0[0],z0[0]]
u[-1,:] = [y0[-1],z0[-1]]
for i in range(1,m+1):
(xf,uf)=explicitEuler(h,x[i-1],u[i-1][0],u[i-1][1],key1=yDer,key2=zDer)
x[i] = xf
u[i,:] = uf
for i in range(n-2,m,-1):
(xf,uf)=regressiveEuler(h,x[i+1],u[i+1][0],u[i+1][1],key1=yDer,key2=zDer)
x[i] = xf
u[i,:] = uf
y = [i for i,j in u]
z = [j for i,j in u]
return x,y,z
def tirRK(n,L,y0,z0,yDer,zDer,x0=0,opt=2):
"""
Integrates between x = 0 or x0 and x = L :
opt = 2 : rungeKutta2
opt = 4 : rungeKutta4
"""
h = L/n
x = []; u = []
x.append(x0)
u.append([y0,z0])
for i in range(1,n+1):
if(opt == 2):
(xf,uf)= rungeKutta2(h,x[i-1],u[i-1][0],u[i-1][1],key1=yDer,key2=zDer)
elif(opt == 4):
(xf,uf)= rungeKutta4(h,x[i-1],u[i-1][0],u[i-1][1],key1=yDer,key2=zDer)
elif(opt == 0):
(xf,uf)= explicitEuler(h,x[i-1],u[i-1][0],u[i-1][1],key1=yDer,key2=zDer)
x.append(xf)
u.append(uf)
y = [i for i,j in u]
z = [j for i,j in u]
return x,y,z
| true |
787348aa43e2f67987b6a94f7516dd8a967c9fb0 | Python | caduceus313/Jet_Brains_Text_Based_Browser | /browser.py | UTF-8 | 3,621 | 3.28125 | 3 | [] | no_license | from os import mkdir, makedirs, path
from collections import deque
from bs4 import BeautifulSoup
from colorama import init, Fore
import argparse
import requests
init()
# handles command line argument parsing
parser = argparse.ArgumentParser(description='Text based web browser')
parser.add_argument('dirname', nargs='?', default='temp')
args = parser.parse_args()
class Browser:
def __init__(self, dir_name):
self.dir_name = dir_name
self.state = 'begin'
self.url_stack = deque()
# create directory from command line if it doesn't exist
def make_dir(self):
if not path.exists(self.dir_name):
makedirs(self.dir_name)
self.state = 'run'
def cache_page(self, page, url):
url = url.replace('http://', '').replace('https://', '').rsplit('.', 1)[0] # removes prefix and .com suffix
with open(self.dir_name + '/' + url + '.txt', 'w') as file_out:
file_out.write(page)
self.url_stack.append(url)
def tag_handler(self, r_object):
# tags = ('title', 'p', 'a', 'strong', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'dt', 'ul', 'ol', 'li')
parents = ('p', 'ul', 'ol', 'li')
soup = BeautifulSoup(r_object.content, 'html.parser')
line_break = ' '
for br in soup.select('br'):
br.replace_with(line_break)
# use ``unwrap`` to retrieve text from `span`-tag inside other tags
for span_tag in soup.select('span'):
span_tag.unwrap()
for a_tag in soup.select('a'):
if a_tag.get('href'):
a_tag.insert(0, Fore.BLUE)
a_tag.append(Fore.RESET)
if a_tag.parent and a_tag.parent.name in parents:
a_tag.unwrap()
# use ``smooth`` to clean up the parse tree
# by consolidating adjacent strings
soup.smooth()
text_of_page = []
# don't need find `ul` and `ol` tags,
# because they don't have text inside
for tag in soup.select('p, li'):
text: str = tag.string
if text:
text_of_page.append(
str(text).replace('\n', ' ').replace(' ', ' ').strip()
)
return '\n'.join(text_of_page)
# main method to deal with user input
def address_bar(self, url):
if self.state == 'begin':
self.make_dir()
if self.state == 'run':
if url == "exit":
self.state = 'exit'
return
elif url == "back":
if len(self.url_stack) > 1:
self.url_stack.pop()
return self.address_bar(self.url_stack.pop())
elif path.isfile(self.dir_name + '/' + url + '.txt'): # check if url has been cached
with open(self.dir_name + '/' + url + '.txt', 'r') as file_in:
self.url_stack.append(url)
return file_in.read()
elif "." not in url:
return 'error'
else:
url = f"https://{url}" if not url.startswith(('https://', 'http://')) else url
# print(url)
r = requests.get(url)
if r:
page = self.tag_handler(r)
self.cache_page(page, url)
else:
page = f'Request failed with code: {r.status_code}'
return page
return 'error'
if __name__ == "__main__":
myBrowser = Browser(args.dirname)
while myBrowser.state != 'exit':
print(myBrowser.address_bar(input('> ')))
| true |
7c9c5fce2f08b93372cbd5d77ba47666471a62e0 | Python | ravi4all/AdvPython_JulyMorning | /01-OOPS/Inheritance/Multiple+Multilevel.py | UTF-8 | 2,812 | 2.859375 | 3 | [] | no_license | class Honda:
def __init__(self):
self.selfStart = True
self.selfDriven = True
self.powerSteering = True
class Maruti:
def __init__(self,carName, tyreBrand, hybrid, automatic, average):
self.carName = carName
self.tyreBrand = tyreBrand
self.hybrid = hybrid
self.automatic = automatic
self.average = average
self.roofType = 'open'
def showDetails(self):
print("Hybrid : {}".format(self.hybrid))
print("Automatic : {}".format(self.automatic))
class Swift(Maruti):
def __init__(self, hybrid, automatic, average):
self.carName = 'Swift'
self.tyreBrand = 'MRF'
self.hybrid = hybrid
self.automatic = automatic
self.average = average
super(Swift, self).__init__(self.carName, self.tyreBrand, self.hybrid, self.automatic, self.average)
# function overriding
def showDetails(self):
print("Details of {}".format(self.carName))
print("Tyre Brand : {}".format(self.tyreBrand))
print("Average : {}".format(self.average))
# obj = Swift(False, False, 20)
# obj.swiftFeatures()
# obj.showDetails()
class SwiftVersion2(Swift):
def __init__(self,automatic,hybrid,average):
self.automatic = automatic
self.hybrid= hybrid
self.average = average
super().__init__(self.hybrid,self.automatic,self.average)
def newFeatures(self):
print("New features of {}".format(self.carName))
print("Swift uses {}".format(self.tyreBrand))
print("Average : {}".format(self.average))
print("Automatic : {}".format(self.automatic))
print("Hybrid : {}".format(self.hybrid))
print("Roof Type : {}".format(self.roofType))
# obj = SwiftVersion2(True,True,15)
# obj.newFeatures()
class SwiftVersion3(SwiftVersion2,Honda):
def __init__(self, automatic, hybrid, average):
self.automatic = automatic
self.hybrid = hybrid
self.average = average
super(SwiftVersion3, self).__init__(self.hybrid,self.automatic,self.average)
# super(SwiftVersion3, self).__init__()
def latestFeatures(self):
print("New features of {}".format(self.carName))
print("Swift uses {}".format(self.tyreBrand))
print("Average : {}".format(self.average))
print("Automatic : {}".format(self.automatic))
print("Hybrid : {}".format(self.hybrid))
print("Roof Type : {}".format(self.roofType))
print("Self Start : {}".format(Honda.selfStart))
print("Self Drive : {}".format(Honda.selfDriven))
print("Power Steering : {}".format(Honda.powerSteering))
obj = SwiftVersion3(True, True, 20)
obj.latestFeatures() | true |
5d66928b1ecbe4a89a8e17560bef91c2b0d70cb9 | Python | Yarharm/Handwritten-Digit-Recognition | /Neural Network.py | UTF-8 | 5,862 | 3.484375 | 3 | [] | no_license | import numpy as np
import scipy.special as spec
import matplotlib.pyplot as plt
class NeuralNetwork:
""" Three layers Neural Network with the Stochastic Gradient Descent
Parameters
------------
inodes : int
Number of nodes in the first layer of the network.
hnodes : int
Number of nodes in the second layer of the network.
onodes : int
Number of nodes in the third layer of the network.
eta : float
Learning rate (between 0.0 and 1.0).
Attributes
------------
wih : {array-like}, shape = [n_hnodes, n_inodes]
Weights between 1 and 2 layers of the network, where
n_hnodes is a number of nodes in the 2nd layer and
n_inodes is a number of nodes in the 1st layer
who : {array-like}, shape = [n_onodes, n_hnodes]
Weights between 2 and 3 layers of the network, where
n_onodes is a number of nodes in 3rd layer and
n_hnodes is a number of nodes in the 2nd layer
activation_func : float
Sigmoid function (between 0.0 and 1.0)
"""
def __init__(self, inodes, hnodes, onodes, eta=0.1):
self.inodes = inodes
self.hnodes = hnodes
self.onodes = onodes
self.eta = eta
# Weights in range of normal distribution with standard div 1/sqrt(n)
self.wih = np.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
self.activation_func = lambda x: spec.expit(x)
def train(self, input_list, target_list):
""" Train the network.
:param input_list : {array-like}, shape = [n_records, n_features]
Data for the training features, where n_records is the number of samples
and n_features is the number of features.
:param target_list : array-like, shape = [n_records]
Data for the target feature.
:return : None
"""
# Targets => training data
targets = np.array(target_list, ndmin=2).T
inputs = np.array(input_list, ndmin=2).T
# Propagate through network
hidden_in = np.dot(self.wih, inputs)
hidden_out = self.activation_func(hidden_in)
final_in = np.dot(self.who, hidden_out)
final_out = self.activation_func(final_in)
# output_errors => Refine weights between hidden and final layers
# hidden_errors => Refine weight between input and hidden layers
output_errors = targets - final_out
hidden_errors = np.dot(self.who.T, output_errors)
# Update weights between hidden and output layers
self.who += self.lr * np.dot((output_errors * final_out * (1.0 - final_out)), np.transpose(hidden_out))
# Uodate weights between input and hidden layers
self.wih += self.lr * np.dot((hidden_errors * hidden_out * (1.0 - hidden_out)), np.transpose(inputs))
def query(self, input_list):
""" Return predicted number (between 0 and 9)
:param input_list : {array-like}, shape = [n_records, n_features]
Data for the training features, where n_records is the number of samples
and n_features is the number of features.
:return : array of n_onodes floats
highest float in the array corresponds to the predicted index,
where n_onodes is a number of nodes in the 3rd layer.
"""
# Convert inputs into 2-d array for the dot product
inputs = np.array(input_list, ndmin=2).T
# Calculate hidden layer (Applying Sigmoid on the dot product)
hidden_in = np.dot(self.wih, inputs)
hidden_out = self.activation_func(hidden_in)
# Calculate final layer
final_in = np.dot(self.who, hidden_out)
return self.activation_func(final_in)
# Input, hidden and output nodes
input_nodes = 784 # 28x28 image
hidden_nodes = 100 # any reasonable number between 10 and 784, to avoid overfitting and underfitting
output_nodes = 10 # number of possible numbers between [0..9]
# Experimentally derived, through brute force( GridSearch for the hyperparameter tuning )
eta = 0.2
# Passes over the training dataset
epochs = 2
# Create NN
nn = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, eta)
# TRAINING PHASE (For 2 epochs
# Load training data from the subset of MNIST.csv of 100 elements
training_data = open("MNIST/mnist_train_10k.csv", "r")
training_list = training_data.readlines()
training_data.close()
# Train NN
for e in range(epochs):
for label in training_list:
# Rescale inputs from the training_list in range (0.00, 1.00]
inputs = np.asfarray(label.split(',')[1:]) / 255 * 0.99 + 0.01
# Create target array full of 0.01 and label of 0.99
targets = np.zeros(output_nodes) + 0.01
targets[int(label.split(',')[0])] = 0.99
nn.train(inputs, targets)
# TESTING PHASE
# Load testing data from the subset of MNIST.csv of 10 elements
testing_data = open("MNIST/mnist_test_1k.csv", "r")
testing_list = testing_data.readlines()
testing_data.close()
# Score for each record in the test set
score = []
for record in testing_list:
correct_answer = int(record.split(',')[0])
# Print(correct_answer, "correct answer")
# Get inputs
inputs = np.asfarray(record.split(',')[1:]) / 255.0 * 0.99 + 0.01
outputs = nn.query(inputs)
# Get label spitted by the NN
answer = np.argmax(outputs)
# Print(answer, "network output")
# Add 1 => correct answer/ 0 => incorrect
if(answer == correct_answer):
score.append(1)
else:
score.append(0)
# Evaluate performance
score_array = np.asarray(score)
print("Performance in %: ", score_array.sum() / score_array.size) | true |
663646ebe5fae27aa45d57a8651a57cbcb51336f | Python | Sal-Ali/CryptoForecaster | /CryptoForecaster/initial_training.py | UTF-8 | 6,486 | 2.921875 | 3 | [] | no_license | ''' Short note:
I am aware that the standard is to use Jupyter Notebook,
my personal preference is in Spyder and for purposes of
convenience this was what I chose, as a result code is frequently
commented in and out. On Github, I will leave all code uncommented
as this portion of code has zero bearing to my actual program besides
serving as record of exactly how I created my initial model'''
import pandas as pd
from pathlib import Path
from coin import coin
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from xgboost import XGBClassifier
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
''' Initial data gathered from gemini (free) '''
c = coin('train')
btc_file_path = Path("D:/btc.csv")
eth_file_path = Path("D:/eth.csv")
btc_dat = pd.read_csv(btc_file_path)
eth_dat = pd.read_csv(eth_file_path)
btc_dat = btc_dat.head(100000)
eth_dat = eth_dat.head(100000)
btc_prices = btc_dat.Open
eth_prices = eth_dat.Open
btc_features = []
eth_features = []
btc_labels = []
eth_labels = []
# price, arima, rsi, high, low | label
def features(prices, dataset, labels, features):
count = 0
while count != 99900:
prices_h = []
for i in range(100):
prices_h.append(prices[count+ i])
count += 1
stats = c.feature_maker_train(prices_h)
features.append(stats)
if dataset.Close[count] <= stats[0]:
labels.append(1) # buy
else:
labels.append(2) # sell
features(btc_prices, btc_dat, btc_labels, btc_features)
features(eth_prices, eth_dat, eth_labels, eth_features)
label_1 = pickle.dumps(btc_labels)
label_2 = pickle.dumps(eth_labels)
btc_f = pd.DataFrame(btc_features, columns=['price','arima','rsi','high','low'])
eth_f = pd.DataFrame(eth_features, columns=['price','arima','rsi','high','low'])
btc_features = pickle.dumps(btc_f)
eth_features = pickle.dumps(eth_f)
''' pickle split here to reduce repetitiveness '''
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Had to do it for cross-validation
btc_f = pickle.loads(btc_features)
eth_f = pickle.loads(eth_features)
btc_labels = pickle.loads(label_1)
eth_labels = pickle.loads(label_2)
btc_l = pd.Series(btc_labels)
eth_l = pd.Series(btc_labels)
btc_y = btc_l
btc_X = btc_f
train_X, test_X, train_y, test_y = train_test_split(btc_X.as_matrix(), btc_y.as_matrix(), test_size=0.01)
eth_y = eth_l
eth_X = eth_f
train_X1, test_X1, train_y1, test_y1 = train_test_split(eth_X.as_matrix(), eth_y.as_matrix(), test_size=0.01)
''' first attempt at model training '''
btc_model = XGBClassifier(silent=True)
btc_model.fit(train_X, train_y, verbose=False)
btc_pred = btc_model.predict(test_X)
eth_model = XGBClassifier(silent=True)
eth_model.fit(train_X1, train_y1, verbose=False)
eth_pred = eth_model.predict(test_X1)
print(" btc Error : " + str(zero_one_loss(btc_pred, test_y)))
print(" eth Error : " + str(zero_one_loss(eth_pred, test_y1)))
''' initial accuracy is 68% and 55% ... not the best, my attempt at tuning '''
btc_pipeline = Pipeline([('imputer', Imputer()), ('xgb_c', XGBClassifier())])
eth_pipeline = Pipeline([('imputer', Imputer()), ('xgb_c', XGBClassifier())])
param_grid = {
"xgb_c__n_estimators": [1, 10, 50, 100, 500, 1000],
"xgb_c__learning_rate": [0.01, 0.1, 0.5, 1, 10],
"xgb_c__early_stopping_rounds": [3, 6, 10, 12],
}
''' Yes, this took very long but I had time and was curious '''
fit_params_btc = {"xgb_c__eval_set": [(test_X, test_y)],
"xgb_c__eval_metric": 'error',
"xgb_c__verbose": False}
fit_params_eth = {"xgb_c__eval_set": [(test_X1, test_y1)],
"xgb_c__eval_metric": 'error',
"xgb_c__verbose": False}
searchCV_btc = GridSearchCV(btc_pipeline, cv=5,
param_grid=param_grid, fit_params=fit_params_btc)
searchCV_eth = GridSearchCV(eth_pipeline, cv=5,
param_grid=param_grid, fit_params=fit_params_eth)
searchCV_btc.fit(train_X, train_y)
searchCV_eth.fit(train_X1, train_y1)
print(searchCV_btc.best_params_)
print(searchCV_eth.best_params_)
'''
Results,
{'xgb_c__early_stopping_rounds': 3, 'xgb_c__learning_rate': 0.1, 'xgb_c__n_estimators': 500}
{'xgb_c__early_stopping_rounds': 3, 'xgb_c__learning_rate': 1, 'xgb_c__n_estimators': 500}'''
''' Trying a little more here '''
param_grid_2 = {
"xgb_c__n_estimators": [500, 750],
"xgb_c__learning_rate": [0.05, 0.1, 0.2, 1, 1.5, 2],
"xgb_c__early_stopping_rounds": [2, 3 ,4],
}
searchCV_btc = GridSearchCV(btc_pipeline, cv=5,
param_grid=param_grid_2, fit_params=fit_params_btc)
searchCV_eth = GridSearchCV(eth_pipeline, cv=5,
param_grid=param_grid_2, fit_params=fit_params_eth)
searchCV_btc.fit(train_X, train_y)
searchCV_eth.fit(train_X1, train_y1)
print(searchCV_btc.best_params_)
print(searchCV_eth.best_params_)
''' Little surprised here...
{'xgb_c__early_stopping_rounds': 2, 'xgb_c__learning_rate': 0.05, 'xgb_c__n_estimators': 750}
{'xgb_c__early_stopping_rounds': 2, 'xgb_c__learning_rate': 0.05, 'xgb_c__n_estimators': 500}
Let's try it '''
btc_model = XGBClassifier(silent=True, learning_rate=0.05, early_stopping_rounds=2,
n_estiamtors=750)
btc_model.fit(train_X, train_y, verbose=False)
btc_pred = btc_model.predict(test_X)
eth_model = XGBClassifier(silent=True, learning_rate=0.05, early_stopping_rounds=2,
n_estimators=500)
eth_model.fit(train_X1, train_y1, verbose=False)
eth_pred = eth_model.predict(test_X1)
print(" btc Error : " + str(zero_one_loss(btc_pred, test_y)))
print(" eth Error : " + str(zero_one_loss(eth_pred, test_y1)))
''' Surprisingly, more normalized error. Marginal decrease in btc error, but this is
likely due to the fact that I actually cross validated this set.
All in all 70% and 65% accurate - better than 50/50, I'll take it for now. '''
btc_model.save_model('btc_model.model')
eth_model.save_model('eth_model.model')
btc_model.save_model('btc_model.bin')
eth_model.save_model('eth_model.bin')
# Have to be safe, saved two copies
| true |
66b9f35333627a01a89fdc519cc7c7f7786e34ad | Python | 200504225/1803 | /11-day/数字.py | UTF-8 | 152 | 3.609375 | 4 | [] | no_license | a = ['1','2','3','4','5','6']
for i in range(len(a)-1,-1,-1):
a.pop(i)
print(a)
a = ['1','2','3','4','5','6']
for i in a[:]:
a.remove(i)
print(a)
| true |
93af06d2d49e6a0341d4a08006767a2903346869 | Python | bogolla/fhir-on-alchemy | /sil_fhir_server/models/questionnaireresponse.py | UTF-8 | 9,742 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/QuestionnaireResponse)
# Date: 2016-03-22.
from sqlalchemy import Column, ForeignKey
from sil_fhir_server.data_types import primitives
from . import domainresource
class QuestionnaireResponse(domainresource.DomainResource):
""" A structured set of questions and their answers.
A structured set of questions and their answers. The questions are ordered
and grouped into coherent subsets, corresponding to the structure of the
grouping of the underlying questions.
"""
__tablename__ = "QuestionnaireResponse"
author = Column(primitives.StringField, ForeignKey('FHIRReference.id'))
""" Person who received and recorded the answers.
Type `FHIRReference` referencing `Device, Practitioner, Patient, RelatedPerson` (represented as `dict` in JSON). """
authored = Column(primitives.StringField, ForeignKey('FHIRDate.id'))
""" Date this version was authored.
Type `FHIRDate` (represented as `str` in JSON). """
encounter = Column(primitives.StringField, ForeignKey('FHIRReference.id'))
""" Primary encounter during which the answers were collected.
Type `FHIRReference` referencing `Encounter` (represented as `dict` in JSON). """
group = Column(primitives.StringField, ForeignKey('QuestionnaireResponseGroup.id'))
""" Grouped questions.
Type `QuestionnaireResponseGroup` (represented as `dict` in JSON). """
identifier = Column(primitives.StringField, ForeignKey('Identifier.id'))
""" Unique id for this set of answers.
Type `Identifier` (represented as `dict` in JSON). """
questionnaire = Column(primitives.StringField, ForeignKey('FHIRReference.id'))
""" Form being answered.
Type `FHIRReference` referencing `Questionnaire` (represented as `dict` in JSON). """
source = Column(primitives.StringField, ForeignKey('FHIRReference.id'))
""" The person who answered the questions.
Type `FHIRReference` referencing `Patient, Practitioner, RelatedPerson` (represented as `dict` in JSON). """
status = Column(primitives.StringField)
""" in-progress | completed | amended.
Type `str`. """
subject = Column(primitives.StringField, ForeignKey('FHIRReference.id'))
""" The subject of the questions.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
def __init__(self, author, authored, encounter, group, identifier, questionnaire, source, status, subject,):
""" Initialize all valid properties.
"""
self.author = author
self.authored = authored
self.encounter = encounter
self.group = group
self.identifier = identifier
self.questionnaire = questionnaire
self.source = source
self.status = status
self.subject = subject
def __repr__(self):
return '<QuestionnaireResponse %r>' % 'self.property' # replace self.property
from sqlalchemy import Column, ForeignKey
from sil_fhir_server.data_types import primitives
from . import backboneelement
class QuestionnaireResponseGroup(backboneelement.BackboneElement):
""" Grouped questions.
A group of questions to a possibly similarly grouped set of questions in
the questionnaire response.
"""
__tablename__ = "QuestionnaireResponseGroup"
group = Column(primitives.StringField, ForeignKey('QuestionnaireResponseGroup.id'))
""" Nested questionnaire response group.
List of `QuestionnaireResponseGroup` items (represented as `dict` in JSON). """
linkId = Column(primitives.StringField)
""" Corresponding group within Questionnaire.
Type `str`. """
question = Column(primitives.StringField, ForeignKey('QuestionnaireResponseGroupQuestion.id'))
""" Questions in this group.
List of `QuestionnaireResponseGroupQuestion` items (represented as `dict` in JSON). """
subject = Column(primitives.StringField, ForeignKey('FHIRReference.id'))
""" The subject this group's answers are about.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
text = Column(primitives.StringField)
""" Additional text for the group.
Type `str`. """
title = Column(primitives.StringField)
""" Name for this group.
Type `str`. """
def __init__(self, group, linkId, question, subject, text, title,):
""" Initialize all valid properties.
"""
self.group = group
self.linkId = linkId
self.question = question
self.subject = subject
self.text = text
self.title = title
def __repr__(self):
return '<QuestionnaireResponseGroup %r>' % 'self.property' # replace self.property
from sqlalchemy import Column, ForeignKey
from sil_fhir_server.data_types import primitives
class QuestionnaireResponseGroupQuestion(backboneelement.BackboneElement):
""" Questions in this group.
Set of questions within this group. The order of questions within the group
is relevant.
"""
__tablename__ = "QuestionnaireResponseGroupQuestion"
answer = Column(primitives.StringField, ForeignKey('QuestionnaireResponseGroupQuestionAnswer.id'))
""" The response(s) to the question.
List of `QuestionnaireResponseGroupQuestionAnswer` items (represented as `dict` in JSON). """
linkId = Column(primitives.StringField)
""" Corresponding question within Questionnaire.
Type `str`. """
text = Column(primitives.StringField)
""" Text of the question as it is shown to the user.
Type `str`. """
def __init__(self, answer, linkId, text,):
""" Initialize all valid properties.
"""
self.answer = answer
self.linkId = linkId
self.text = text
def __repr__(self):
return '<QuestionnaireResponseGroupQuestion %r>' % 'self.property' # replace self.property
from sqlalchemy import Column, ForeignKey
from sil_fhir_server.data_types import primitives
class QuestionnaireResponseGroupQuestionAnswer(backboneelement.BackboneElement):
""" The response(s) to the question.
The respondent's answer(s) to the question.
"""
__tablename__ = "QuestionnaireResponseGroupQuestionAnswer"
group = Column(primitives.StringField, ForeignKey('QuestionnaireResponseGroup.id'))
""" Nested questionnaire group.
List of `QuestionnaireResponseGroup` items (represented as `dict` in JSON). """
valueAttachment = Column(primitives.StringField, ForeignKey('Attachment.id'))
""" Single-valued answer to the question.
Type `Attachment` (represented as `dict` in JSON). """
valueBoolean = Column(primitives.BooleanField)
""" Single-valued answer to the question.
Type `bool`. """
valueCoding = Column(primitives.StringField, ForeignKey('Coding.id'))
""" Single-valued answer to the question.
Type `Coding` (represented as `dict` in JSON). """
valueDate = Column(primitives.StringField, ForeignKey('FHIRDate.id'))
""" Single-valued answer to the question.
Type `FHIRDate` (represented as `str` in JSON). """
valueDateTime = Column(primitives.StringField, ForeignKey('FHIRDate.id'))
""" Single-valued answer to the question.
Type `FHIRDate` (represented as `str` in JSON). """
valueDecimal = Column(primitives.DecimalField)
""" Single-valued answer to the question.
Type `float`. """
valueInstant = Column(primitives.StringField, ForeignKey('FHIRDate.id'))
""" Single-valued answer to the question.
Type `FHIRDate` (represented as `str` in JSON). """
valueInteger = Column(primitives.IntegerField)
""" Single-valued answer to the question.
Type `int`. """
valueQuantity = Column(primitives.StringField, ForeignKey('Quantity.id'))
""" Single-valued answer to the question.
Type `Quantity` (represented as `dict` in JSON). """
valueReference = Column(primitives.StringField, ForeignKey('FHIRReference.id'))
""" Single-valued answer to the question.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
valueString = Column(primitives.StringField)
""" Single-valued answer to the question.
Type `str`. """
valueTime = Column(primitives.StringField, ForeignKey('FHIRDate.id'))
""" Single-valued answer to the question.
Type `FHIRDate` (represented as `str` in JSON). """
valueUri = Column(primitives.StringField)
""" Single-valued answer to the question.
Type `str`. """
def __init__(self, group, valueAttachment, valueBoolean, valueCoding, valueDate, valueDateTime, valueDecimal, valueInstant, valueInteger, valueQuantity, valueReference, valueString, valueTime, valueUri,):
""" Initialize all valid properties.
"""
self.group = group
self.valueAttachment = valueAttachment
self.valueBoolean = valueBoolean
self.valueCoding = valueCoding
self.valueDate = valueDate
self.valueDateTime = valueDateTime
self.valueDecimal = valueDecimal
self.valueInstant = valueInstant
self.valueInteger = valueInteger
self.valueQuantity = valueQuantity
self.valueReference = valueReference
self.valueString = valueString
self.valueTime = valueTime
self.valueUri = valueUri
def __repr__(self):
return '<QuestionnaireResponseGroupQuestionAnswer %r>' % 'self.property' # replace self.property | true |
f122fae26cb84ed2cd9c1ec6ecdbd3505a18222f | Python | Pra3t0r5/frro-soporte-2019-01 | /practico_06/capa_negocio.py | UTF-8 | 4,990 | 3.171875 | 3 | [
"MIT"
] | permissive | # Implementar los metodos de la capa de negocio de socios.
from frro_soporte_2019_01.practico_05.ejercicio_01 import Socio
from frro_soporte_2019_01.practico_05.ejercicio_02 import DatosSocio
class DniRepetido(Exception):
def __init__(self):
super(DniRepetido, self).__init__('DNI repetido')
class LongitudInvalida(Exception):
def __init__(self, campos_rangos):
msg = ''
for cr in campos_rangos:
campo, min_max = cr
min, max = min_max
msg += 'Longitud de ' + campo + ' debe estar entre ' +\
str(min) + ' y ' + str(max) + '\n'
super(LongitudInvalida, self).__init__(msg)
class MaximoAlcanzado(Exception):
def __init__(self, maximo):
super(MaximoAlcanzado, self).__init__('Se alcanzó el máximo de ' +
str(maximo) + ' socios')
class NegocioSocio(object):
MIN_CARACTERES_ABIERTO = 3
MAX_CARACTERES_ABIERTO = 15
MAX_SOCIOS = 200
def __init__(self):
self.datos = DatosSocio()
def buscar(self, id_socio):
"""
Devuelve la instancia del socio, dado su id.
Devuelve None si no encuentra nada.
:rtype: Socio
"""
return self.datos.buscar(id_socio)
def buscar_dni(self, dni_socio):
"""
Devuelve la instancia del socio, dado su dni.
Devuelve None si no encuentra nada.
:rtype: Socio
"""
return self.datos.buscar_dni(dni_socio)
def todos(self):
"""
Devuelve listado de todos los socios.
:rtype: list
"""
return self.datos.todos()
def alta(self, socio):
"""
Da de alta un socio.
Se deben validar las 3 reglas de negocio primero.
Si no validan, levantar la excepcion correspondiente.
Devuelve True si el alta fue exitoso.
:type socio: Socio
:rtype: bool
"""
try:
if self.regla_1(socio) and self.regla_2(socio) and self.regla_3():
self.datos.alta(socio)
return True
except Exception as ex:
raise ex
def baja(self, id_socio):
"""
Borra el socio especificado por el id.
Devuelve True si el borrado fue exitoso.
:rtype: bool
"""
return self.datos.baja(id_socio)
def modificacion(self, socio):
"""
Modifica un socio.
Se deben validar las reglas 1 y 2 primero.
Si no valida, levantar la excepcion correspondiente.
Devuelve True si la modificacion fue exitosa.
:type socio: Socio
:rtype: bool
"""
try:
if self.regla_1(socio) and self.regla_2(socio):
self.datos.modificacion(socio)
return True
except Exception as ex:
raise ex
def regla_1(self, socio):
"""
Validar que el DNI del socio es unico (que ya no este usado).
:type socio: Socio
:raise: DniRepetido
:return: bool
"""
enc = self.buscar_dni(socio.dni)
if enc is None or enc.id == socio.id:
return True
raise DniRepetido()
def regla_2(self, socio):
"""
Validar que el nombre y el apellido del socio cuenten con mas de 3 caracteres pero menos de 15.
:type socio: Socio
:raise: LongitudInvalida
:return: bool
"""
errores = []
if not self.MIN_CARACTERES_ABIERTO < len(socio.nombre) < self.MAX_CARACTERES_ABIERTO:
errores.append(('nombre', (self.MIN_CARACTERES_ABIERTO + 1, self.MAX_CARACTERES_ABIERTO - 1)))
if not self.MIN_CARACTERES_ABIERTO < len(socio.apellido) < self.MAX_CARACTERES_ABIERTO:
errores.append(('apellido', (self.MIN_CARACTERES_ABIERTO + 1, self.MAX_CARACTERES_ABIERTO - 1)))
if len(errores) == 0:
return True
else:
raise LongitudInvalida(errores)
def regla_3(self):
"""
Validar que no se esta excediendo la cantidad maxima de socios.
:raise: MaximoAlcanzado
:return: bool
"""
if len(self.datos.todos()) < self.MAX_SOCIOS:
return True
raise MaximoAlcanzado(self.MAX_SOCIOS)
def validar_todo(self, socio):
"""
Validar las 3 reglas de negocio
Si no validan, levanta la excepcion correspondiente.
Devuelve True si las validaciones son exitosas.
:type socio: Socio
:rtype: bool
"""
errores = []
for r in zip((self.regla_1, self.regla_2, self.regla_3), (socio, socio, None)):
try:
regla, arg = r
if arg is not None:
regla(arg)
else:
regla()
except Exception as ex:
errores.append(ex)
if len(errores) > 0:
raise Exception(*errores)
return True
| true |
478311c5e52fddee17f23ef7af05d61674d0d010 | Python | Archanasurendran12/python-myworks | /Fundamentals/opencvar/rotate.py | UTF-8 | 998 | 2.984375 | 3 | [] | no_license | #image rotation
import cv2
#read image as greyscale
img=cv2.imread(r'C:\Users\USER\PycharmProjects\pythonProject\Fundamentals\opencvar\unnamed.png',0)
#get imge hight and width
(h,w)=img.shape[:2]
#calculate the center of the image
center=(w/2,h/2)
angle90=90
angle180=180
angle270=270
scale=1.0
#prform the counterclockwis rotation holding at the cntr
#90 degrees
M=cv2.getRotationMatrix2D(center,angle90,scale)
rotated90=cv2.warpAffine(img,M,(h,w))
#180 degrees
M=cv2.getRotationMatrix2D(center,angle180,scale)
rotated180=cv2.warpAffine(img,M,(h,w))
#270 degrees
M=cv2.getRotationMatrix2D(center,angle270,scale)
rotated270=cv2.warpAffine(img,M,(h,w))
cv2.imshow('original image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('image rtated by 90 degrees',rotated90)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('image rtated by 180 degrees',rotated180)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('image rtated by 270 degrees',rotated270)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
325b73e58ea4457a5dcd65d1bd38a6edfc1a94bb | Python | vobject/deep-nfshp | /train.py | UTF-8 | 6,429 | 2.625 | 3 | [
"MIT"
] | permissive | import os
import argparse
import datetime
import cv2
import numpy as np
import sklearn.model_selection
from tensorflow import keras
from models.nvidia import NvidiaModel
from models.comma import CommaModel
from models.tiny import TinyModel
def training_generator(image_paths, steering_angles, preproc, batch_size):
"""
Generate training data.
"""
fourth_batch_size = batch_size // 4
slices_per_epoch = len(image_paths) // batch_size
datagen = keras.preprocessing.image.ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
channel_shift_range=.1,
fill_mode='nearest')
training_data_slice = 0
while True:
slice_begin = training_data_slice * fourth_batch_size
slice_end = slice_begin + fourth_batch_size
# original frames
orig_images = [preproc(cv2.imread(path)) for path in image_paths[slice_begin:slice_end]]
orig_steers = steering_angles[slice_begin:slice_end]
# original frames mirrored
orig_flip_images = [cv2.flip(x, 1) for x in orig_images]
orig_flip_steers = [-x for x in orig_steers]
# generated frames
gen = datagen.flow(
x=np.stack(orig_images, axis=0),
y=np.stack(orig_steers, axis=0),
batch_size=fourth_batch_size,
shuffle=False)
gen_images, gen_steers = next(gen)
# generated frames mirrored
gen_flip_images = [cv2.flip(x, 1) for x in gen_images]
gen_flip_steers = [-x for x in gen_steers]
images = np.concatenate((orig_images, orig_flip_images, gen_images, gen_flip_images), axis=0)
steers = np.concatenate((orig_steers, orig_flip_steers, gen_steers, gen_flip_steers), axis=0)
yield images, steers
training_data_slice = (training_data_slice + 1) % slices_per_epoch
def validation_generator(image_paths, steering_angles, preproc, batch_size):
"""
Generate cross validation data.
"""
while True:
indices = [np.random.choice(len(image_paths)) for x in range(batch_size)]
images = [preproc(cv2.imread(image_paths[i])) for i in indices]
steers = [steering_angles[i] for i in indices]
yield images, steers
def load_validation_data(image_paths, steering_angles, preproc):
"""
Load validation frames into memory.
"""
images = [preproc(cv2.imread(x)) for x in image_paths]
return np.array(images), np.array(steering_angles)
def read_datapoint(path):
"""
Read an image and it's metadata from disk.
"""
fname = os.path.basename(path)
fname = os.path.splitext(fname)[0]
elems = fname.split("_")
if len(elems) != 3:
raise Exception('Invalid data set: ' + path)
# return id, timestamp, steering value
return int(elems[0]), int(elems[1]), float(elems[2])
def load_data(data_dir, test_size):
"""
Load data from disk and split it into training and validation sets.
"""
X = []
y = []
sub_dirs = os.listdir(data_dir)
for sub_dir in sub_dirs:
sub_dir_path = os.path.join(data_dir, sub_dir)
img_files = os.listdir(sub_dir_path)
for img_file in img_files:
img_path = os.path.join(sub_dir_path, img_file)
_, _, steering = read_datapoint(img_path)
X.append(img_path)
y.append(steering)
return sklearn.model_selection.train_test_split(X, y, test_size=test_size, random_state=None)
def build_model(name, print_layer_summary=True):
if name == NvidiaModel.NAME:
model = NvidiaModel()
elif name == CommaModel.NAME:
model = CommaModel()
# FIXME: adapt training_generator() for 1-channel images of TinyModel
# elif name == TinyModel.NAME:
# model = TinyModel()
if print_layer_summary:
model.get().summary()
return model
def train_model(model, epochs, batch_size, X_train, X_valid, y_train, y_valid):
"""
Train the model.
"""
logging = keras.callbacks.TensorBoard(log_dir='logs')
checkpoint = keras.callbacks.ModelCheckpoint('model-' + model.NAME + '-{epoch:03d}-{val_loss:.4f}.h5',
monitor='val_loss',
save_best_only=True)
kmodel = model.get()
kmodel.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(lr=1.0e-4))
print('{} model compiled'.format(datetime.datetime.now()))
# 3/4 of training data per batch is generated; we need 4x steps to get once
# through the whole (physical) training set.
steps_per_epoch = len(X_train) * 4 // batch_size
# Pre-loading all validation data in memory speeds up training 10-15%.
# Switch back to validation data generator if the data get's too large.
X_valid_data, y_valid_data = load_validation_data(X_valid, y_valid, model.preprocess)
kmodel.fit_generator(
training_generator(X_train, y_train, model.preprocess, batch_size),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=(X_valid_data, y_valid_data),
#validation_data=training_generator(X_valid, y_valid, model.preprocess, batch_size),
#validation_steps=len(X_valid) // batch_size,
#workers=4,
callbacks=[logging, checkpoint],
verbose=1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='data_dir', type=str, default='data', help='data directory')
parser.add_argument('-t', dest='test_size', type=float, default=0.2, help='test size fraction')
parser.add_argument('-n', dest='epochs', type=int, default=50, help='number of epochs')
parser.add_argument('-b', dest='batch_size', type=int, default=64, help='batch size')
parser.add_argument('-m', dest='model_name', type=str, default='nvidia', help='model architecture ({}, {}, or {})'.format(NvidiaModel.NAME, CommaModel.NAME, TinyModel.NAME))
args = parser.parse_args()
print('{} start'.format(datetime.datetime.now()))
data = load_data(args.data_dir, args.test_size)
model = build_model(args.model_name)
print('{} model built'.format(datetime.datetime.now()))
train_model(model, args.epochs, args.batch_size, *data)
print('{} done'.format(datetime.datetime.now()))
if __name__ == '__main__':
main()
| true |
3a786bf1ed1ce41494299c2266f08aba839d8e37 | Python | alecbarreto/abarreto | /sum_square_dif.py | UTF-8 | 269 | 3.390625 | 3 | [] | no_license | def square_sum_difference():
lst = range(1,101)
sum1 = 0
sum2 = 0
for num in lst:
sum1 = sum1 + num**2
for num in lst:
sum2 += num
if num == 100:
sum2 = sum2**2
return sum2-sum1
print square_sum_difference()
| true |
787e98e457a7d7cb91b8f62e928ad7a7c91d375e | Python | leejw51/BumblebeeNet | /Test/DrawSinGraph.py | UTF-8 | 209 | 3 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pylab as plt
from numpy import arange, sin, pi, radians, degrees
from matplotlib.pylab import plot,show
x = arange(-360, 360, 0.1)
y = sin(radians(x))
plot(x,y)
show()
| true |
3a51f1d49e7d1617a880c15fdb2d236cb6d86b1d | Python | ShubhangiDabral13/Data_Structure | /Sorting_Ques/Bubble_Sort.py | UTF-8 | 603 | 4.4375 | 4 | [] | no_license |
#Function for bubble sort
def bubble_sort(li):
#Traversing through each element in the list
for i in range(0,len(li)-1):
for j in range(0,len(li)-i-1):
if(li[j]>li[j+1]):
#If next element is greater than the current element we will swap it
li[j],li[j+1] = li[j+1],li[j]
#Returning the sorted list
return li
m = int(input("number of element you want in list"))
li = []
for i in range(m):
a= int(input())
li.append(a)
#Calling the bubble_sort function and printing the sorted list
print(bubble_sort(li))
| true |
0df873f04bb1d8b07bf820a3e8db9db128035584 | Python | pyre/pyre | /packages/pyre/weaver/Make.py | UTF-8 | 4,108 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2023 all rights reserved
#
# access to the pyre package
import pyre
# my ancestor
from .LineMill import LineMill
# my declaration
class Make(LineMill):
"""
Support for makefiles
"""
# user configurable state
languageMarker = pyre.properties.str(default="Makefile")
languageMarker.doc = "the language marker"
# interface
def builtin(self, func, args=[]):
"""
Evaluate a builtin function
"""
# the arguments are a comma separated list
rargs = ",".join(
# of space separated words
"".join(word for word in words)
# made up from the arguments to the built in
for words in args
)
# render
yield f"${{{func} {rargs}}}"
# and done
return
def call(self, func, args=[]):
"""
Call a user defined function
"""
# the arguments are a comma separated list
rargs = ",".join(
# of space separated words
"".join(word for word in words)
# made up from the arguments to the built in
for words in args
)
# render
yield f"${{call {func},{rargs}}}"
# all done
return
def ifeq(self, op1, op2, onTrue, onFalse=None):
"""
Build a conditional block
"""
# render the operands
rop1 = "".join(op1)
rop2 = "".join(op2)
# build the condition
yield f"ifeq ({rop1},{rop2})"
# render the true part
yield from onTrue
# if there is an else part
if onFalse:
# render
yield "else"
# render the false part
yield from onFalse
# close out
yield "endif"
# and done
return
def literal(self, value):
"""
Render {value} as a literal
"""
# just the value
yield value
# and done
return
def set(self, name, value="", multi=[]):
"""
Set {name} to {value} immediately
"""
# pick the operator and delegate
return self._set(name=name, value=value, multi=multi, op=":=")
def seti(self, name, value="", multi=[]):
"""
Add {value} to {name}
"""
# pick the operator and delegate
return self._set(name=name, value=value, multi=multi, op="+=")
def setq(self, name, value="", multi=[]):
"""
Set {name} to {value}, delaying the evaluation of the right hand side until used
"""
# pick the operator and delegate
return self._set(name=name, value=value, multi=multi, op="=")
def setu(self, name, value="", multi=[]):
"""
Set {name} to {value} iff {name} is uninitialized
"""
# pick the operator and delegate
return self._set(name=name, value=value, multi=multi, op="?=")
def value(self, var):
"""
Build an expression to evaluate {var}
"""
# easy enough
yield f"$({var})"
# and done
return
# implementation details
def _set(self, name, value, multi, op):
"""
Support for variable assignments
"""
# if it's a single line assignment
if not multi:
# assemble the value
rvalue = "".join(value)
# render
yield f"{name} {op} {rvalue}"
# and done
return
# pull the continuation mark
mark = self.continuationMark
# prime the multiline assignment
yield f"{name} {op} {mark}"
# append the multiline content
for line in multi:
# assemble the line
rvalue = "".join(line)
# check whether there is anything there
if rvalue:
# and render it
yield f" {rvalue} {mark}"
# all done
return
# private data
comment = "#"
continuationMark = "\\"
# end of file
| true |
1b9caef492374276ffb71d046949bad43d624c50 | Python | elmar-hinz/Python.Vii | /vii/Range.py | UTF-8 | 6,341 | 3.421875 | 3 | [
"MIT"
] | permissive |
class NotLinesRangeException(Exception): pass
class NotOneLineRangeException(Exception): pass
class NotTwoLinesRangeException(Exception): pass
class NotPositionsRangeException(Exception): pass
class NotOnePositionRangeException(Exception): pass
class NotTwoPositionsRangeException(Exception): pass
class Range:
"""
Ranges are immutable. No copy function is needed.
By design no manipulations. Getters return new
objects where needed.
"""
def __init__(self, *args, isPosition = False):
newArgs = []
for i in range(len(args)):
if (isinstance(args[i], Range)
and args[i].isOnePosition()):
newArgs.append(args[i].toPositionTuple())
else:
newArgs.append(args[i])
args = tuple(newArgs)
if (len(args) == 1
and isinstance(args[0], int)):
self.position1 = (args[0], None)
self.position2 = (args[0], None)
""" is one line """
elif (len(args) == 1
and len(args[0]) == 2
and not isPosition):
""" is two lines """
self.position1 = (args[0][0], None)
self.position2 = (args[0][1], None)
elif (len(args) == 1
and len(args[0]) == 2
and isPosition):
""" is one position """
self.position1 = args[0]
self.position2 = args[0]
elif len(args) == 2:
if(isinstance(args[0], int)
and isinstance(args[1], int)):
if isPosition:
""" is one position """
self.position1 = args
self.position2 = args
else:
""" is lines """
self.position1 = (args[0], None)
self.position2 = (args[1], None)
elif len(args[0]) == 2 and len(args[1]) == 2:
""" is positions """
self.position1 = args[0]
self.position2 = args[1]
def __eq__(self, other):
return (self.position1 == other.position1
and self.position2 == other.position2)
def __str__(self):
return ("Range: %s, %s : %s, %s" %
(*self.position1, *self.position2))
def isInverse(self):
if self.position1[0] > self.position2[0]:
return True
if(self.isPositions()
and self.position1[0] == self.position2[0]
and self.position1[1] > self.position2[1]):
return True
return False
def isLines(self):
return (self.position1[1] == None
and self.position2[1] == None)
def isOneLine(self):
return (self.isLines()
and self.position1 == self.position2)
def isTwoLines(self):
return (self.isLines()
and self.position1 != self.position2)
def isPositions(self):
return (self.position1[1] != None
and self.position2[1] != None)
def isOnePosition(self):
return (self.isPositions()
and self.position1 == self.position2)
def isTwoPositions(self):
return (self.isPositions()
and self.position1 != self.position2)
def assertLines(self):
if not self.isLines():
raise NotLinesRangeException()
def assertOneLine(self):
if not self.isOneLine():
raise NotOneLineRangeException()
def assertTwoLines(self):
if not self.isTwoLines():
raise NotTwoLinesRangeException()
def assertPositions(self):
if not self.isPositions():
raise NotPositionsRangeException()
def assertOnePosition(self):
if not self.isOnePosition():
raise NotOnePositionRangeException()
def assertTwoPositions(self):
if not self.isTwoPositions():
raise NotTwoPositionsRangeException()
def contains(self, position):
position.assertOnePosition()
y, x = position.toPositionTuple()
if y < self.upperY():
return False
elif y > self.lowerY():
return False
elif( y == self.upperY()
and self.upperX() != None
and x < self.upperX()):
return False
elif( y == self.lowerY()
and self.lowerX() != None
and x > self.lowerX()):
return False
else:
return True
# Getters
def toLineTuples(self):
return self.position1[0], self.position2[0]
def toPositionTuple(self):
self.assertPositions()
return self.position1
def toPositionTuples(self):
self.assertPositions()
return self.position1, self.position2
def firstPosition(self):
return Range(self.position1, isPosition = True)
def firstY(self):
return self.position1[0]
def firstX(self):
return self.position1[1]
def lastPosition(self):
return Range(self.position2, isPosition = True)
def lastX(self):
return self.position2[1]
def lastY(self):
return self.position2[0]
def linewise(self):
return Range(self.position1[0], self.position2[0])
def lowerPosition(self):
if self.isInverse():
return self.firstPosition()
else:
return self.lastPosition()
def lowerX(self):
if self.isInverse():
return self.position1[1]
else:
return self.position2[1]
def lowerY(self):
if self.isInverse():
return self.position1[0]
else:
return self.position2[0]
def swap(self):
return Range(self.position2, self.position1)
def upperPosition(self):
if self.isInverse():
return self.lastPosition()
else:
return self.firstPosition()
def upperX(self):
if self.isInverse():
return self.position2[1]
else:
return self.position1[1]
def upperY(self):
if self.isInverse():
return self.position2[0]
else:
return self.position1[0]
class Position(Range):
def __init__(self, x, y):
super().__init__(x, y, isPosition = True)
| true |
c0d6de9c2c973a4a9ecb0e759a6f34d34da639b1 | Python | shiki7/Atcoder | /ABC211/C.py | UTF-8 | 322 | 2.734375 | 3 | [] | no_license | S = input()
T = 'chokudai#'
MOD = 10 ** 9 + 7
dp = [[0 for j in range(len(T))] for i in range(len(S) + 1)]
dp[0][0] = 1
for i in range(len(S)):
for j in range(len(T)):
dp[i][j] %= MOD
dp[i + 1][j] += dp[i][j]
if S[i] == T[j]:
dp[i + 1][j + 1] += dp[i][j]
print(dp[len(S)][8] % MOD)
| true |
65f2d1fbb84ff60f4e111fe8018c65731ebd8fca | Python | Richardjb/ComputerVision | /Canny Edge Detection/main.py | UTF-8 | 28,736 | 2.671875 | 3 | [] | no_license | from PIL import Image
import numpy as np
from scipy.misc import imread
import scipy.ndimage
import cv2
from copy import copy, deepcopy
import sys
from math import pi, sqrt, exp
import math
import matplotlib.pyplot as plt
##########################################################################
## Function Name: Q1
## Function Desc.: Operations needed for question 1
## Function Return: none
##########################################################################
def Q1():
# changes the print settings to show all values in array
np.set_printoptions(threshold='nan')
# reads i
I = Image.open("input.jpg")
I2 = cv2.imread("input2.jpg", 0)
I3 = cv2.imread("input_image.jpg", 0)
# image 1 with different sigma
im_25 = CannyEdgeDetection(I, .25,3)
im_50 = CannyEdgeDetection(I, .50,3)
im_75 = CannyEdgeDetection(I, .75,3)
# image 2 with different sigma
im2_25 = CannyEdgeDetection(I2, .25,3)
im2_50 = CannyEdgeDetection(I2, .50,3)
im2_75 = CannyEdgeDetection(I2, .75,3)
# image 3 with different sigma
im3_25 = CannyEdgeDetection(I3, .25,3)
im3_50 = CannyEdgeDetection(I3, .50,3)
im3_75 = CannyEdgeDetection(I3, .75,3)
# Shows all 3 images with their respective sigmas
# NOTE: the best sigma overall was .75
cv2.imshow("Image1: 25", im_25)
cv2.imshow("Image1: 50", im_50)
cv2.imshow("Image1: 75", im_75)
cv2.imshow("Image2: 25", im2_25)
cv2.imshow("Image2: 50", im2_50)
cv2.imshow("Image2: 75", im2_75)
cv2.imshow("Image3: 25", im3_25)
cv2.imshow("Image3: 50", im3_50)
cv2.imshow("Image3: 75", im3_75)
cv2.waitKey(0)
return
##########################################################################
## Function Name: CannyEdgeDetection
## Function Desc.: Performs my implementation of Canny Edge Detection
## Function Arguments: I = cv2 image, sigma = gaus arg, maskLength = len
## Function Return: image as np.array
##########################################################################
def CannyEdgeDetection(I, sigma, maskLength):
# used to get image dimensions
height, width = np.array(I).shape
# creates 1d gaussian of the passed in Length
G = ProfGoss(maskLength,sigma)
pic = deepcopy(I)
# creates 1D Gaussian masks for the derivitive of the function in the x and y directions respectively
# Note* the same derivative function will be used for dx && dy because they are essentially the same
# but treated different in orientation only
Gx = gausXDeriv(maskLength, sigma)
Gy = gausYDeriv(maskLength, sigma)
#Gx = [-1,0,1]
#Gy = [-1,0,1]
# Gets image after being convolved with mask in x-dir
Ix = ProfConvolveX(I, G, width, height)
# Gets image after being convolved with mask in y-dir
Iy = ProfConvolveY(I, G, width, height)
# convolves derivatives into already convolved image
# converts to np.array for later calculations
IPrimeX = np.array(ProfConvolveX(Ix,Gx,width,height))
IPrimeY = np.array(ProfConvolveY(Iy,Gy,width,height))
# magnitude of edge response calculation
tempArray = IPrimeX * IPrimeX + IPrimeY *IPrimeY
M = np.sqrt(tempArray)
cv2.imshow("Magnitude", M)
# determines the gradient for suppresion and converts to degrees
direction = np.arctan2(IPrimeY, IPrimeX) * 180 / np.pi
# stores result of suppression to be used with thresholding
nonMaxResult = NonMaximumSuppression(M, direction)
cv2.imshow("Final edge trace", nonMaxResult)
# pauses cv2 to allow image to be seen
cv2.waitKey(0)
#cv2.imshow("binary", cv2.threshold(nonMaxResult,127,255,cv2.THRESH_BINARY))
return nonMaxResult
##########################################################################
## Function Name: ProfGoss
## Function Desc.: Returns a 1D gaussian mask of len size and varied sigma
## Function Arguments: I = PIL image, sigma = gaus arg, maskLength = len
##########################################################################
def ProfGoss(size, sigma):
# forces size to be odd
if (size % 2 == 0):
size -= 1
# array to hold gaussian elements
mask = []
# sigma ^ 2
sigmaSquare = sigma * sigma
# gaussian formula
mult = 1.0/math.sqrt(2.0 * math.pi * sigmaSquare)
# loop to populate mask
for i in range (-size/2+1, size/2+1):
mask.append(mult * exp(-i * i/(2 * sigmaSquare)))
return mask
##########################################################################
## Function Name: ProfConvolveX
## Function Desc.: Convolves image in X direction and returns result
## Function Arguments: image = Pil, mask = 1d mask, w = width, h = height
##########################################################################
def ProfConvolveX(image, mask, w, h):
# creates deep copy of image
#newImage = image.copy()
#newImagePixels = newImage.load()
#pixels = image.load()
conversion = deepcopy(np.array(image))
pixels = deepcopy(np.array(image))
# loop for width then length
for i in range (0, h):
for j in range (0, w):
sum = 0.0
count = 0
lastCenter = 0
# used to calculate offset for array
for k in range (-len(mask)/2 + 1, len(mask)/2 + 1):
# actual index in image arr
nj = j + k
# in the event offset is out of bounds
if (nj < 0 or nj >= w):
continue
lastCenter = nj
# performs array index * mask index and adds to sum
# k + len(mask)/2 = center + offset -k -> k
sum += mask[k + len(mask)/2] * pixels[i,nj]
conversion[i,lastCenter] = sum / len (mask)
sum = 0
return conversion
##########################################################################
## Function Name: ProfConvolveY
## Function Desc.: Convolves image in Y direction and returns result
## Function Arguments: image = Pil, mask = 1d mask, w = width, h = height
##########################################################################
def ProfConvolveY(image, mask, w, h):
# creates deep copy of image
conversion = deepcopy(np.array(image))
pixels = deepcopy(np.array(image))
#formatted pixel[width,height]
for j in range (0, w):
for i in range (0, h):
sum = 0
count = 0
lastCenter = 0
for k in range (-len(mask)/2 + 1, len(mask)/2 + 1):
nj = i + k
if (nj < 0 or nj >= h):
continue
lastCenter = nj
#count += mask[k + len(mask)/2]
sum += mask[k + len(mask)/2] * pixels[nj,j]
conversion[lastCenter,j] = sum / len(mask)
return conversion
##########################################################################
## Function Name: NonMaximumSuppression
## Function Desc.: Performs nonMaximumSuppression
## image and mask
##########################################################################
def NonMaximumSuppression(magnitude, dir):
dirCpy = deepcopy(dir)
magCpy = deepcopy(magnitude)
# obtains img dimensions
size = magnitude.shape # formatted (Height, Width)
# sets width to image width
width = size[1]
# sets height to image height
height = size[0]
edge = deepcopy(magCpy)
# rounds angles to nearest degree based on 22.5(degree) difference
for a in range (0, height):
for b in range (0, width):
#sets edges to blank white canvas
#edge[a][b] = 255
q = dirCpy[a][b]
if (q <= 180 and q > 124):
dirCpy[a][b] = 135
if (q <= 124 and q > 101):
dirCpy[a][b] = 112.5
if (q <= 101 and q > 79):
dirCpy[a][b] = 90
if (q <= 79 and q > 56):
dirCpy[a][b] = 67.5
if (q <= 56 and q > 34):
dirCpy[a][b] = 45
if (q <= 34 and q > 11):
dirCpy[a][b] =22.5
if (q <= 11 and q > -350):
dirCpy[a][b] = 0
#print dirCpy
# since all angles have been rounded it is easy to switch between possibilities
# check pixel along gradient direction to determine if it is local maximum
# NOTE: the edge is perpendicular to gradient
for y in range (0, height):
for x in range (0, width):
#TODO: 112.5
if (dirCpy[y][x] == 135):
if ((y + 1 < height and x + 1 < width) and (y - 1 >= 0 and x - 1 >= 0)):
if (magCpy[y][x] > magCpy[y + 1][x - 1] and magCpy[y][x] > magCpy[y - 1][x + 1]):
magCpy[y][x] = 0
if (dirCpy[y][x] == 112.5):
if (y + 1 < height and y - 1 >= 0):
if (magCpy[y][x] > magCpy[y + 1][x] and magCpy[y][x] > magCpy[y - 1][x]):
magCpy[y][x] = 0
if (dirCpy[y][x] == 90):
if (y + 1 < height and y - 1 >= 0):
if (magCpy[y][x] > magCpy[y + 1][x] and magCpy[y][x] > magCpy[y - 1][x]):
magCpy[y][x] = 0
if (dirCpy[y][x] == 67.5):
if ((y + 2 < height and x + 1 < width) and (y - 2 >= 0 and x - 1 >= 0)):
if (magCpy[y][x] > magCpy[y + 2][x + 1] and magCpy[y][x] > magCpy[y - 2][x + 1]):
magCpy[y][x] = 0
if (dirCpy[y][x] == 45):
if ((y + 1 < height and x + 1 < width) and (y - 1 >= 0 and x - 1 >= 0)):
if (magCpy[y][x] > magCpy[y + 1][x + 1] and magCpy[y][x] > magCpy[y - 1][x - 1]):
magCpy[y][x] = 0
if (dirCpy[y][x] == 22.5):
if ((y + 1 < height and x + 2 < width) and ( y - 1 >= 0 and x - 2 >= 0)):
if (magCpy[y][x] > magCpy[y + 1][x + 2] and magCpy[y][x] > magCpy[y - 1][x - 2]):
magCpy[y][x] = 0
if (dirCpy[y][x] == 0):
if (x + 1 < width and x - 1 >= 0):
if (magCpy[y][x] > magCpy[y][x + 1] and magCpy[y][x] > magCpy[y][x - 1]):
magCpy[y][x] = 0
highThresh = 13
lowThresh = 1
cv2.imshow("before edge detection",magCpy)
edgeDetect(magCpy, highThresh,lowThresh)
cv2.imshow("after edge detection", magCpy)
#print magCpy
#remove points below low threshold
for y in range (0, height):
for x in range (0, width):
if (magCpy[y][x] < lowThresh):
magCpy[y][x] = 0
cv2.imshow("After minimum thresholding after edge detection",magCpy)
retArray = deepcopy(magCpy)
#remove points not near high threshold
for y in range (0, height):
for x in range (0, width):
if (y + 1 < height and x + 1 < width):
if (magCpy[y+1][x+1] >= highThresh):
continue
if (x + 1 < width):
if (magCpy[y][x+1] >= highThresh):
continue
if (y - 1 >= 0 and x + 1 < width):
if (magCpy[y-1][x+1] >= highThresh):
continue
if (y - 1 >= 0):
if (magCpy[y-1][x] >= highThresh):
continue
if (y - 1 >= 0 and x - 1 >= 0):
if (magCpy[y-1][x-1] >= highThresh):
continue
if (x - 1 >= 0):
if (magCpy[y][x-1] >= highThresh):
continue
if (x - 1 >= 0 and y + 1 < height):
if (magCpy[y+1][x-1] >= highThresh):
continue
if (y + 1 < height):
if (magCpy[y+1][x] >= highThresh):
continue
magCpy[y][x] = 0
#cv2.imshow("edges",edge)
return retArray
##########################################################################
## Function Name: edgeDetect
## Function Desc.: detects edges
## Function Arguments: mat = matrix, tUpper = higher threshold, tLower = lower threshold
##########################################################################
def edgeDetect(mat, tUpper, tLower):
matrix = np.array(mat)
rows, cols = matrix.shape
edges = deepcopy(matrix)
# make edges all black
for x in range (0, cols):
for y in range(0, rows):
edges[y][x] = 0
for x in range (0, cols):
for y in range(0, rows):
if (matrix[y][x] >= tUpper):
followEdges(x,y,matrix,tUpper,tLower, edges)
##########################################################################
## Function Name: followEdges
## Function Desc.: follows edges in an attempt to connect pixels
## Function Arguments: x,y = index, matrix = image, tUpper,tLower = thresholds
## edges = marked pixels
##########################################################################
def followEdges(x,y,matrix,tUpper,tLower, edges):
#print("x: %d y: %d" %(x,y))
sys.setrecursionlimit(100)
#set point to white
matrix[y][x] = 255
edges[y][x] = 255
height, width = matrix.shape
#base case
if (x >= width or y >= height):
return
deepestRecursion = 33
if (x >= deepestRecursion or y >= deepestRecursion):
sys.setrecursionlimit(1000)
return
for i in range (-1, 2):
for j in range (-1, 2):
if (i == 0 and j == 0):
continue
if ((x + i >= 0) and (y + j >= 0) and (x + i <= width) and (y + j <= height)):
if((edges[y + j][j + i]) > tLower and edges[y + j][j + i] != 255):
followEdges(x + i, y + j, matrix, tUpper, tLower, edges)
##########################################################################
## Function Name: Zero Padding
## Function Desc.: adds zeros to array sides
## Function Arguments: None
##########################################################################
def ZeroPadding(arr):
print ""
##########################################################################
## Function Name: Zero Padding
## Function Desc.: adds zeros to array sides
## Function Arguments: None
##########################################################################
def ResidualImages():
print "Creating Residual Images"
##########################################################################
## Function Name: Entropy
## Function Desc.: Performs Entropy process in its entirety
## Function Arguments: None
##########################################################################
def Entropy():
# the names of the individual pictures
image1 = "input.jpg"
image2 = "input2.jpg"
image3 = "input4.jpg"
# performs entropy operations and gains thresholds for each image
image1Threshold = EntropyOp(image1)
image2Threshold = EntropyOp(image2)
image3Threshold = EntropyOp(image3)
# finalizes the process by thresholding the image
BinifyImage(image1, image1Threshold)
BinifyImage(image2, image2Threshold)
BinifyImage(image3, image3Threshold)
##########################################################################
## Function Name: BinifyImage
## Function Desc.: converts image to binary 0, 255 on and off
## Function Arguments: imgName = (string) img name, thres = threshold amnt (int)
##########################################################################
def BinifyImage(imgName, thres):
# opens image
img = cv2.imread(imgName, 0)
# ges image dimensions
height,width = img.shape
imcpy = deepcopy(img)
# offset that gives images best look
# threshOffset = 60
threshOffset = 0
# loop turns pixels on and off based on value
for i in range (0, height):
for j in range (0, width):
if (imcpy[i][j] >= thres + threshOffset):
imcpy[i][j] = 255
else:
imcpy[i][j] = 0
cv2.imshow("Binary manually", imcpy)
# uses cv2 along with calculated threshold
(thresh, im_bw) = cv2.threshold(img,thres,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imshow("Binary with cv2", im_bw)
np.set_printoptions(threshold='nan')
print im_bw
cv2.waitKey(0)
##########################################################################
## Function Name: EntropyOp
## Function Desc.: Quantitative Evaluation of Edge Detector
## Function Arguments: None
##########################################################################
def EntropyOp(imageName):
# imports image and forces it to flatten and become grayscale
#imageTest = Image.open(imageName).convert('L')
# imports image and forces it to flatten and become grayscale
img = cv2.imread(imageName, 0)
# calculates histograram
#hist = cv2.calcHist([img],[0],None,[256],[0,256])
#hist = cv2.calcHist([img],[0],None,[256],[0,256])
# formatted better
hist,bins = np.histogram(img.ravel(),256,[0,256])
# makes and displays histogram
plt.hist(img.ravel(),256,[0,256]); plt.show()
image = imread(imageName)
# dimenstions of image
height,width = image.shape
# area or number of pixels in image
numPx = width * height
# array to hold entropy
valArray = []
# CORRECTION: incorrect way of displaying histogram
#n, bins, patches = plt.hist( histArray, bins=255, range=(0,255), histtype='step')
#plt.xlabel("Value")
#plt.ylabel("Frequency")
#plt.show()
# copies histogram as a float
# probArray = probability of each pixel
probArray = deepcopy(hist.astype(float))
# normalizes the array, sum of all elements = 1
for i in range (0,len(hist)):
probArray[i] /= numPx
# number to be added to zero to prevent divide by zeros
theta = 1e-7
# copies prob array and sets all values to 0 (float)
P_T = deepcopy(probArray)
for w in range (0, len(probArray)):
P_T[w] = 0.0
# calculate pt and store in array
for i in range (0, len(probArray)):
for j in range (0, i + 1):
P_T[i] += probArray[j]
# Store classes A and B of probability
A = []
B = []
# calculate A class, theta prevent divide 0 error
for i in range(0, len(probArray)):
A.append(probArray[i]/(P_T[i] + theta))
#print ("%f/(%f + %f) = %f" % (probArray[i], P_T[i], theta, probArray[i]/(P_T[i] + theta)))
# calculate B class, theta prevent divide 0 error
for i in range(0, len(probArray)):
# prevents out of bounds error
if (i + 1 >= len(probArray) - 1):
B.append(theta)
continue
B.append(probArray[i+1]/(1.0 - P_T[i] + theta))
#print ("B = %f/(%f + %f) = %f" % (probArray[i], P_T[i], theta, probArray[i]/(P_T[i] + theta)))
# used to hold HA and HB
HA = 0.0
HB = 0.0
# H(A)
for i in A:
it = -1 * i * np.log2(i + theta)
HA += it
#print ("HA: %f" %HA)
# H(B)
for i in B:
it = -1 * i * np.log2(i + theta)
HB += it
#print ("HB: %f" %HB)
# total entropy int not int[]
HT = HA + HB
print ("H(T) = %d" %HT)
return HT
##########################################################################
## Function Name: Q2
## Function Desc.: Quantitative Evaluation of Edge Detector
## Function Arguments: None
##########################################################################
def Q2():
name1 = "input_image.jpg"
#name1 = "input.jpg"
name2 = "input_image2.jpg"
name3 = "input_image3.jpg"
# reads in test images and converts to grayscale
testImageA = Image.open(name1).convert('L')
testImageB = Image.open(name2).convert('L')
testImageC = Image.open(name3).convert('L')
#testImageC = Image.open("input_image.jpg").convert('L')
# reads in edge map for correlating images as grayscale
testImageAEdge = np.array(Image.open("output_image.png").convert('L'))
testImageBEdge = np.array(Image.open("output_image2.png").convert('L'))
testImageCEdge = np.array(Image.open("output_image3.png").convert('L'))
# opens same images for easy dimension access in form array[y][x]
# total is area of rectangle or image px amnt.
testImageAInfo = imread("input_image.jpg").shape
heightA = testImageAInfo[0]
widthA = testImageAInfo[1]
totalA = widthA * heightA
testImageBInfo = imread("input_image2.jpg").shape
heightB = testImageBInfo[0]
widthB = testImageBInfo[1]
totalB = widthB * heightB
testImageCInfo = imread("input_image3.jpg").shape
heightC = testImageCInfo[0]
widthC = testImageCInfo[1]
totalC = widthC * heightC
'''
# _ prevents it from being a tuple, converts image to binary
_ ,x = cv2.threshold(nonMaxResult,7,1,cv2.THRESH_BINARY)
print x
'''
# to be used with gaussian and derivative calculations best sigma is .501
sigma = .501
# length of masks
maskLength = 3
A = CannyEdgeDetection(testImageA, sigma, maskLength)
B = CannyEdgeDetection(testImageB, sigma, maskLength)
C = CannyEdgeDetection(testImageC, sigma, maskLength)
#cv2.imshow("A", A)
cv2.waitKey(0)
if (True):
# converts both edge map and image to binary (redundant because cannyEdgeDetection does this)
(_ ,binA) = cv2.threshold(A,0,255,cv2.THRESH_BINARY )
(_ , testABin) = cv2.threshold(testImageAEdge,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
print ("Image 1 rankings")
QEED(binA,testABin,widthA,heightA)
(_ ,binB) = cv2.threshold(B,0,255,cv2.THRESH_BINARY)
(_ , testBBin) = cv2.threshold(testImageBEdge,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
print ("Image 2 rankings")
QEED(binB,testBBin,widthB,heightB)
(_ ,binC) = cv2.threshold(C,0,255,cv2.THRESH_BINARY)
(_ , testCBin) = cv2.threshold(testImageCEdge,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
print ("Image 3 rankings")
QEED(binC,testCBin,widthC,heightC)
cv2.waitKey(0)
print ("Image 1 rankings with salt and pepper")
# add noise to picture, salt and pepper then gaussian
noisyTestA = Noice("gauss", Noice("s&p",np.array(testImageA)))
noisyA = CannyEdgeDetection(noisyTestA, sigma, maskLength)
# changes the print settings to show all values in array
np.set_printoptions(threshold='nan')
# loop forces image to binary because cv2 complained about noised image
for i in range (0, heightA):
for j in range (0, widthA):
if (noisyA[i][j] != 0):
noisyA[i][j] = 255
QEED(noisyA,testImageAEdge, widthA, heightA)
##########################################################################
## Function Name: QEED
## Function Desc.: Quantitative Evaluation of Edge Detector operations
## Function Arguments: binA = binary of original image, testImageAEdge =
## edge map to be compared, (w, h) = width height
##########################################################################
def QEED (binA,testImageAEdge, w, h):
# area for rect or num px.
totalA = float(w * h)
ON_PIXEL = 255
OFF_PIXEL = 0
# shape = [Width, height]
# calculates TP for A
countA = 0.0
for i in range (0, h):
for j in range (0, w):
if (binA[i][j] == ON_PIXEL and binA[i][j] == testImageAEdge[i][j]):
countA += 1
TP_A = countA/ totalA
#calculates TN for A
countA = 0.0
for i in range (0, h):
for j in range (0, w):
if (binA[i][j] == OFF_PIXEL and binA[i][j] == testImageAEdge[i][j]):
countA += 1
TN_A = countA/totalA
# calculates FP for A
countA = 0.0
for i in range (0, h):
for j in range (0, w):
if (binA[i][j] == ON_PIXEL and binA[i][j] != testImageAEdge[i][j]):
countA += 1
FP_A = countA/totalA
# calculates FN for A, B, C
countA = 0.0
for i in range (0, h):
for j in range (0, w):
if (binA[i][j] == OFF_PIXEL and binA[i][j] != testImageAEdge[i][j]):
countA += 1
FN_A = countA/totalA
print ("TP_A = %f\nTN_A = %f\nFP_A = %f\nFN_A = %f" %(TP_A, TN_A, FP_A, FN_A))
# using pre calculated values, the edge comparison is performed
SensitivityA = TP_A/(TP_A + FN_A)
SpecificityA = TN_A/(TN_A + FP_A)
PrecisionA = TP_A/(TP_A + FP_A)
NegativePredictiveValueA = TN_A / (TN_A + FN_A)
FallOutA = FP_A / (FP_A + TN_A)
FNRA = FN_A / (FN_A + TP_A)
FDRA = FP_A/(FP_A + TP_A)
AccuracyA = (TP_A + TN_A)/(TP_A + FN_A + TN_A + FP_A)
F_ScoreA = (2 * TP_A) / ( (2 * TP_A) + FP_A + FN_A)
MCCA = ((TP_A * TN_A) - (FP_A * FN_A)) / \
math.sqrt((TP_A + FP_A) * (TP_A + FN_A) * (TN_A + FP_A) * (TN_A + FN_A))
print ("\n\nSensitivity = %f\nSpecificity = %f\nPrecision = %f\nNegativePredictiveValue = %f\n"
"FallOut = %f\nFNR = %f\nFDR = %f\nAccuracy = %f\nF_Score = %f\nMCC = %f\n" %
(SensitivityA, SpecificityA, PrecisionA,
NegativePredictiveValueA, FallOutA, FNRA,
FDRA, AccuracyA, F_ScoreA, MCCA))
return (SensitivityA, SpecificityA, PrecisionA, NegativePredictiveValueA, FallOutA, FNRA,
FDRA, AccuracyA, F_ScoreA, MCCA)
##########################################################################
## Function Name: gauss
## Function Desc.: returns the 1 dimensional gausian mask
##########################################################################
def gauss(n=43,sigma=1):
r = range(-int(n/2),int(n/2)+1)
return [1 / (sigma * sqrt(2*pi)) * exp(-float(x)**2/(2*sigma**2)) for x in r]
##########################################################################
## Function Name: gausXDeriv
## Function Desc.: returns the 1 dimensional gausian mask
##########################################################################
def gausXDeriv(n=43,sigma=1):
r = range(-int(n/2),int(n/2)+1)
return [-x / ((sigma ** 3) * sqrt(2*pi)) * exp(-float(x)**2/(2*sigma**2)) for x in r]
##########################################################################
## Function Name: gausYDeriv
## Function Desc.: returns the 1 dimensional gausian mask
##########################################################################
def gausYDeriv(n=43,sigma=1):
r = range(-int(n/2),int(n/2)+1)
return [-y / ((sigma ** 3) * sqrt(2*pi)) * exp(-float(y)**2/(2*sigma**2)) for y in r]
##########################################################################
## Function Name: Noice
## Function Desc.: generates noise in numpy images and returns disturbed
## image
##########################################################################
def Noice(noise_typ,image):
# Switch to determine which type of noise to be added
if noise_typ == "gauss":
row,col= image.shape
mean = 0
#var = 0.1
#sigma = var**0.5
gauss = np.random.normal(mean,10,(row,col))
gauss = gauss.reshape(row,col)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col = image.shape
s_vs_p = 0.5
#amount = 0.004
amount = 0.25
out = image
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 255
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
##########################################################################
## Function Name: Main
## Function Desc.: entry point of program
##########################################################################
def main():
Q1() # calls operations needed for question 1
Q2() # calls methods to handle question 2
ResidualImages() # calls method to perform operations needed for Q3
# YES WE CAN USE ENTROPY FOR THRESHOLDING
Entropy() # calls methods to handle question 4
x = np.reshape(ProfGoss(5, 1.6),(1,5))
xtran = np.reshape(x,(5,1))
res = np.dot(xtran, x)
print res
return
# initializes the program
main() | true |
07b7ca9f5e13a210892b144d5d779e80186df198 | Python | quNadi/ExCheckPy | /basic/any_all.py | UTF-8 | 141 | 2.609375 | 3 | [] | no_license | def avoids(word,forbidden):
return not any(letter in forbidden for letter in word)
a=['ala','pies','kot','mysz']
print(avoids('kot',a)) | true |
78f161f64f53261c8a334b6b5d18d41ec162cbfe | Python | arjun010/global-sci-fi-project | /analysis/reddit.py | UTF-8 | 1,452 | 2.578125 | 3 | [] | no_license | import praw
import csv
import unicodedata
reddit = praw.Reddit(client_id='BKgABmF31ARw4w', \
client_secret='MQfbvgznD05VRiU1O4CLE4QWo1E', \
user_agent='scifihw', \
username='asrinivasan40', \
password='Susheela1058')
subreddit = reddit.subreddit('shortscifistories')
top_subreddit = subreddit.top()
topics_dict = { "title":[], \
"score":[], \
"id":[], "url":[], \
"comms_num": [], \
"created": [], \
"body":[]}
for submission in top_subreddit:
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["url"].append(submission.url)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["created"].append(submission.created)
topics_dict["body"].append(submission.selftext)
# print topics_dict.keys()
writer = csv.writer(open('data.csv','wb'))
writer.writerow(topics_dict.keys())
for i in range(0,100):
postData = []
for key in topics_dict.keys():
val = topics_dict[key][i]
# print type(val)
if type(val) is unicode:
val = unicodedata.normalize('NFKD', val).encode('ascii','ignore')
# val = val.encode('utf-8')
postData.append(val)
# print postData
# print len(postData)
writer.writerow(postData) | true |
3e7c631ecf3e8c6b546e063960dd4937cb83a757 | Python | aniruddhamalkar/contious-single-direction-socket | /socket_part_2_server.py | UTF-8 | 611 | 3.3125 | 3 | [] | no_license | import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket has been successfully created")
sock.bind((socket.gethostname(), 5555))
print("Socket has been successfully binded")
sock.listen(5)
while True:
client_socket, ip_address = sock.accept()
print("Connection from {0} has been establish!".format(ip_address))
while True:
message = input("Please enter the text that you want to send: ")
client_socket.send(bytes(message, "utf-8"))
if message == "quit":
break
break
print("Connection has been terminated!")
client_socket.close()
| true |
ce6aac1fbcd25a8b9d35efe5c28ca56076ba5fe4 | Python | InHouse-Banana/DataScientistChallenge | /model_inference.py | UTF-8 | 6,433 | 3.03125 | 3 | [] | no_license | import argparse
import itertools
import numpy as np
import pandas as pd
import pickle
from sklearn.metrics import accuracy_score, f1_score, precision_score\
, recall_score, roc_auc_score
def try_func(func, y_data, predictions):
try:
return func(y_data,predictions).round(2)
except Exception as E:
print(E)
return np.nan
def calc_metrics(model, x_data, y_data):
"""
Calculates model metrics: acc, f1, precision, recall, roc_auc, using x_data (features) and y_data (targets).
It prints the values and returns them into a list.
Args:
model ([type]): xgboost model
x_data ([type]): pandas dataframe array containing features
y_data ([type]): pandas dataframe array containing targets
Returns:
list([acc, f1, precision, recall, roc_auc])
"""
shape_x, shape_y = (x_data.shape, y_data.shape)
print('Data shapes X:', shape_x, 'Y:', shape_y)
predictions = model.predict(x_data)
acc = try_func(accuracy_score, y_data, predictions)
f1 = try_func(f1_score, y_data, predictions)
prec = try_func(precision_score, y_data, predictions)
rec = try_func(recall_score, y_data, predictions)
roc = try_func(roc_auc_score, y_data, predictions)
print('Metrics model -', 'accuracy:', acc, 'f1:', f1, 'precision:', prec, 'recall:', rec, 'roc_auc:', roc)
return [acc, f1, prec, rec, roc, shape_x, shape_y]
if __name__ == "__main__":
"""
Reads a csv file, and generates featueres and calculates metrics: acc, f1, precision, recall, roc_auc
It prints the values and returns them into a list.
Optional Arguments:
--source-file SOURCE_FILE
file containing the features and targets to calculate metrics
--model_file MODEL_FILE
model file location that will be tested
--enc_file ENC_FILE encoder file locations to one-hot features
Output:
Prints [acc, f1, prec, rec, roc_auc, shape_x, shape_y]
"""
# Parsing arguments
parser = argparse.ArgumentParser(description='Parameters for XGBoost Prediction')
parser.add_argument('--source-file', dest = 'source_file', action = 'store', type = str
, default='dataset/data.csv'
, help='file containing the features and targets to calculate metrics')
parser.add_argument('--model_file', dest = 'model_file', action = 'store', type = str
, default='model_artifacts/xgb_tuned.pkl'
, help='model file location that will be tested')
parser.add_argument('--enc_file', dest = 'enc_file', action = 'store', type = str
, default='model_artifacts/encoder.pkl'
, help='encoder file location, that one-hot encode features')
args = parser.parse_args()
source_file = args.source_file
model_file = args.model_file
enc_file = args.enc_file
# Load model
#model_file = "model_artifacts/xgb_tuned.pkl"
print('Loading model from:', model_file)
trained_xgb_tuned = pickle.load(open(model_file, "rb"))
#Load encoder
#enc_file = "model_artifacts/encoder.pkl"
print('Loading encoder from:', enc_file)
enc = pickle.load(open(enc_file, "rb"))
#source_file = 'data_sources/2016-09-19_79351_training.csv'
print('Reading file:', source_file)
cols_to_drop = [ 'PAY_' + str(x) for x in range(0, 7)] + ['ID']
clean_data = pd.read_csv(source_file).drop(columns = cols_to_drop)
SEX_dict = {1 : 'male', 2 : 'female'}
EDUCATION_dict = {1 : 'graduate_school', 2 : 'university', 3: 'high_school'\
, 4 : 'education_others', 5 : 'unknown', 6 : 'unknown'}
MARRIAGE_dict = {1 : 'married', 2 : 'single', 3 : 'marriage_others'}
#default_payment_next_month_dict = {0 : 'no', 1 : 'yes'}
clean_data['SEX'] = clean_data['SEX'].replace(SEX_dict)
clean_data['EDUCATION'] = clean_data['EDUCATION'].replace(EDUCATION_dict)
clean_data['MARRIAGE'] = clean_data['MARRIAGE'].replace(MARRIAGE_dict)
#clean_data['default.payment.next.month'] = clean_data['default.payment.next.month'].replace(default_payment_next_month_dict)
# Drop categories not-used
clean_data = clean_data[clean_data['EDUCATION'] != 0]
clean_data = clean_data[clean_data['MARRIAGE'] != 0]
clean_data = clean_data[clean_data['EDUCATION'] != 'education_others']
clean_data = clean_data[clean_data['MARRIAGE'] != 'marriage_others']
# Create BAL_AMT feature
for month in range(1,7):
bill_col = 'BILL_AMT' + str(month)
pay_col = 'PAY_AMT' + str(month)
bal_col = 'BAL_AMT' + str(month)
clean_data[bal_col] = clean_data[bill_col] - clean_data[pay_col]
clean_data = clean_data.drop(columns = [bill_col, pay_col])
training_data = clean_data.copy()
cat_features = training_data.select_dtypes(include=["object"]).columns.values
print('cat_features:', cat_features)
numerical_features = ['LIMIT_BAL', 'AGE', 'BAL_AMT1'\
,'BAL_AMT2', 'BAL_AMT3', 'BAL_AMT4', 'BAL_AMT5', 'BAL_AMT6']
print('numerical_features:', numerical_features)
target = ['default.payment.next.month']
print('target:', target)
# Preparing to one-hot-encode
x = training_data[list(cat_features) + list(numerical_features)]
y = training_data[target]
# One-Hot-Encoding
x_onehot = enc.fit_transform(x[cat_features]).toarray()
enc_categories = list(itertools.chain(*np.array(enc.categories_, dtype=object)))
x_onehot = pd.DataFrame(x_onehot, columns = enc_categories).astype(str)
# Features and targets
features_targets = pd.concat([x_onehot.reset_index(drop=True), x[numerical_features].reset_index(drop=True), y.reset_index(drop=True)]
, axis = 1)
features_targets.columns = features_targets.columns.str.lower()
x = features_targets.drop( columns = features_targets.columns[-1:])
y = features_targets.drop( columns = features_targets.columns[:-1])
print("Metrics XGB model")
print('Shape source features:', x.shape, 'targets:', y.shape)
metrics = calc_metrics(trained_xgb_tuned, x.values, y.values)
| true |
b6e45ef315a83d7815ab162352dc97b8ad74ee08 | Python | nehruperumalla/ML_from_Scratch | /KNN/KNN.py | UTF-8 | 1,241 | 3.34375 | 3 | [] | no_license | import numpy as np
from collections import Counter
class KNN:
def __init__(self, k: int = 3):
self.k = k
def euclidean_distance(self, x1: np.ndarray, x2: np.ndarray) -> float:
# Euclidean distance calculation between two points with D-Dimensions
return np.sqrt(np.sum((x1 - x2) ** 2))
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
# Storing the Train Data
self.X_train = X
self.y_train = y
def predict(self, X: np.ndarray) -> np.ndarray:
# Class label prediction of test data.
predicted_labels = [self._predict(x) for x in X]
return predicted_labels
def _predict(self, x: np.ndarray) -> int:
# Distance calculation between a query/test point and training data
distances = [self.euclidean_distance(
x, x_train) for x_train in self.X_train]
# Getting the K-Nearest Neighbour's indices of query/test point
k_indices = np.argsort(distances)[:self.k]
# Fetching the K-Nearest Neighbour's class labels
# Returning majority class label among them
k_nearest_labels = [self.y_train[index] for index in k_indices]
return Counter(k_nearest_labels).most_common(1)[0][0]
| true |
dc52609aeeb64c19cde19d771f1ba49cbcb15e23 | Python | wenhaoliang/leetcode | /leetcode/offerIsComing/分治算法/剑指 Offer 51. 数组中的逆序对.py | UTF-8 | 2,623 | 3.828125 | 4 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。输入一个数组,求出这个数组中的逆序对的总数。
示例 1:
输入: [7,5,6,4]
输出: 5
链接:https://leetcode-cn.com/problems/shu-zu-zhong-de-ni-xu-dui-lcof
"""
from typing import List
class Solution:
def reversePairs(self, nums: List[int]) -> int:
def merge(nums, left, mid, right):
i, j, temp = left, mid + 1, []
while i <= mid and j <= right:
if nums[i] <= nums[j]:
temp.append(nums[i])
i += 1
else:
self.count += mid - i + 1
temp.append(nums[j])
j += 1
while i <= mid:
temp.append(nums[i])
i += 1
while j <= right:
temp.append(nums[j])
j += 1
for i in range(len(temp)):
nums[left + i] = temp[i]
def mergeSort(nums, left, right):
if left >= right:
return
mid = (left + right) // 2
mergeSort(nums, left, mid)
mergeSort(nums, mid + 1, right)
merge(nums, left, mid, right)
self.count = 0
mergeSort(nums, 0, len(nums) - 1)
return self.count
class Solution1:
def reversePairs(self, nums: List[int]) -> int:
def merge(nums, left, mid, right):
i, j, temp = left, mid + 1, []
while i <= mid and j <= right:
if nums[i] <= nums[j]:
temp.append(nums[i])
i += 1
else:
self.count += mid - i + 1
temp.append(nums[j])
j += 1
while i <= mid:
temp.append(nums[i])
i += 1
while j <= mid:
temp.append(nums[j])
j += 1
for i in range(len(temp)):
nums[left + i] = temp[i]
def mergeSort(nums, left, right):
if left >= right:
return
mid = (left + right) // 2
mergeSort(nums, left, mid)
mergeSort(nums, mid + 1, right)
merge(nums, left, mid, right)
self.count = 0
n = len(nums)
mergeSort(nums, 0, n - 1)
return self.count
if __name__ == "__main__":
A = Solution()
print(A.reversePairs([7, 5, 3, 4]))
A = Solution1()
print(A.reversePairs([7, 5, 3, 4]))
| true |
b08080ff57bd3d1ffc29e51c2c1dac84d7d73d3e | Python | ThompsonNJ/CSC231-Introduction-to-Data-Structures | /Lab 02/GeneratingPolygons.py | UTF-8 | 619 | 3.15625 | 3 | [] | no_license | class RegularPolygon:
def __init__(self, num, length):
self.num_sides = num
self.length = length
def compute_perimeter(self):
return (self.num_sides*self.length)
class EqualiaterialTriangle(RegularPolygon):
def compute_area(self):
return (3**(1/2.0) * self.length)
class Square(RegularPolygon):
def compute_area(self):
return self.length**2
poly_list = []
with open('polygons.csv', 'r') as rfile:
line = rfile.readline().strip()
for s in line:
line = s.split(',')
if line[0] == '3':
| true |
d9626e172b54bcafff4998cc1b43f520a5fd83da | Python | adsantos97/cs462-asn2 | /rough_asn2.py | UTF-8 | 5,319 | 3.640625 | 4 | [] | no_license | _author_ = 'arizza santos'
# Course: CS 462
# April 18, 2018
# Assignment 2: Dynamic Programming and Greedy Algorithms
import random
import timeit
import itertools
import sys
# purpose: generate random integers
# input: n - number of objects(integers)
# return: rand_ints - list of random integers
def generate_rand_ints(n):
rand_ints = []
for i in range(1, n+1):
rand_ints.append(random.randint(1, n))
return rand_ints
# purpose: generate list of integers [start,n]
# input: start - starting integer
# n - number of objects(integers)
# step - increment between each integer
# return: ints - list of integers
def generate_ints(start, n, step):
ints = []
for i in range(start, n+1, step):
ints.append(i)
return ints
# purpose: generate one random problem instance
# input: start - starting integer
# n - number of objects
# step - increment between each integer
# return: list of objects with their weights and values
def generate_one_instance(start, n, step):
prob_instance = []
obj = []
objects = generate_ints(start, n, step)
print "Objects: ", objects
weights = generate_rand_ints(n)
values = generate_rand_ints(n)
#print("Object\tWeight\tValue")
for i in range(n/step):
obj.append(objects[i])
obj.append(weights[i])
obj.append(values[i])
prob_instance.append(obj)
print obj
#print obj[0],"\t",obj[1],"\t",obj[2]
obj = []
# double check problem instance
#for i, w, v in itertools.izip(objects, weights, values):
#print("Object {}: {} {}".format(i, w, v))
return prob_instance
# purpose: find the maximum weight that is possible to carry
# input: prob_instance - list of objects
# return: maximum weight (75% of the sum)
def max_weight(prob_instance):
max_weight = 0
for obj in prob_instance:
max_weight += obj[1]
#print "Total Weight: ", max_weight
max_weight = int(round(max_weight * 0.75)) # 75% and round
return max_weight
# purpose: make a list from a given problem instance
# input: prob_instance - list of objects with their weights and values
# choice: 0 - objects, 1 - weights, 2 - values
# return: a list made from the given choice
def make_list(prob_instance, choice):
l = []
for obj in prob_instance:
l.append(obj[choice])
return l
# purpose: brute force implementation of 0-1 Knapsack Problem
# input: max_w - maximum weight of the prob_instance
# n - number of objects
# w - list of weights
# v - list of values
# return: solution
def brute_force(max_w, n, w, v):
if n == 0 or max_w == 0:
return 0
if w[n-1] > max_w:
return brute_force(max_w, n-1, w, v)
else:
return max(v[n-1] + brute_force(max_w-w[n-1], n-1, w, v),
brute_force(max_w, n-1, w, v))
# purpose: dynamic programming implementation of 0-1 Knapsack Problem
# input: max_w = maximum weight of the prob_instance
# n - number of objects
# wt - list of weights
# v - list of values
# return: solution
def dynamic_programming(max_w, n, wt, v):
#K = [[0 for x in range(max_w+1)] for y in range(n+1)]
print "hi"
'''
for i in range(n+1):
for w in range(max_w+1):
if i == 0 or w == 0:
K[i][w] = 0
elif wt[i-1] <= w:
K[i][w] = max(v[i-1] + K[i-1][w-wt[i-1]], K[i-1][w])
else:
K[i][w] = K[i-1][w]
return K[n][max_w]
'''
def main():
if len(sys.argv) != 5:
print "Please type: python asn2.py <start> <n> <step> <algorithm>"
print "Choices of algorithm: {} - brute force, {} - greedy, " \
"{} - dynamic programming".format('b','g','d')
else:
start = int(sys.argv[1])
n = int(sys.argv[2])
step = int(sys.argv[3])
choice = sys.argv[4]
print "Start = {} | n = {} | Step = {}\n".format(start, n, step)
print "n\tMax Weight\tSolution\tTime(ms)"
for i in range(start, n+1, step):
if choice == 'b':
one = generate_one_instance(start, i, step)
w = make_list(one, 1)
v = make_list(one, 2)
max_w = max_weight(one)
start_time = timeit.default_timer()
solution = brute_force(max_w, i, w, v)
elapsed = int((timeit.default_timer() - start_time) * 1000)
print "{}\t{}\t\t{}\t\t{}".format(i, max_w, solution, elapsed)
elif choice == 'g':
print "greedy"
elif choice == 'd':
print "n: ", i
one = generate_one_instance(start, i, step)
w = make_list(one, 1)
v = make_list(one, 2)
max_w = max_weight(one)
start_time = timeit.default_timer()
solution = dynamic_programming(max_w, i, w, v)
elapsed = int((timeit.default_timer() - start_time) * 1000)
print "{}\t{}\t\t{}\t\t{}".format(i, max_w, solution, elapsed)
else:
print "Invalid algorithm choice!"
print "I: ", i
print generate_one_instance(start, i, step)
main()
| true |
5803284239ca515aff903f817ac17a7e9fd8dd89 | Python | letscodedjango/py-se-bootcamp | /py_se_day07/use_student_class.py | UTF-8 | 757 | 2.625 | 3 | [
"MIT"
] | permissive | from pysebootcamp.py_se_day07 import student_module
from pysebootcamp.py_se_day07.student_module import Student
# john = student_module.Student("John", 26, "B.E", 13244, "john@gmale.com", 1, "Python")
# mike = student_module.Student("Mike", 27, "B.E", 34353, 2, "Java")
# jenny = student_module.Student("Jenny", 23, "B.E", 758597, 3, "JavaScript", email="jenny@gmale.com")
# ramu = student_module.Student("Ramu", 26, "Ph.D", 343536, "rambo@gmale.com", 4, "Python")
john = Student("John", 26, "B.E", 13244, "john@gmale.com", 1, "Python")
mike = Student("Mike", 27, "B.E", 34353, 2, "Java")
jenny = Student("Jenny", 23, "B.E", 758597, 3, "JavaScript", email="jenny@gmale.com")
ramu = Student("Ramu", 26, "Ph.D", 343536, "rambo@gmale.com", 4, "Python")
print(john.name)
print(mike.name)
print(mike.name) | true |
0a2396582730a9b3c430fa8c85f72e0afb23dbbf | Python | renantarouco/ff-neuralnet | /ff_neuralnet.py | UTF-8 | 3,467 | 3.03125 | 3 | [] | no_license | import numpy as np
from tqdm import tqdm
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
class FirstFFNetwork:
def __init__(self):
# First layer and biases
self.w111 = np.random.randn()
self.w112 = np.random.randn()
self.w121 = np.random.randn()
self.w122 = np.random.randn()
self.b11 = 0
self.b12 = 0
# Second layer weights and bises
self.w211 = np.random.randn()
self.w212 = np.random.randn()
self.b21 = 0
def sigmoid(self, x):
return 1.0/(1.0 + np.exp(-x))
def feed_forward(self, x):
self.x1, self.x2 = x
# First layer pre (axx) and post-activation (hxx)
self.a11 = self.w111 * self.x1 + self.w112 * self.x2 + self.b11
self.h11 = self.sigmoid(self.a11)
self.a12 = self.w121 * self.x1 + self.w122 * self.x2 + self.b12
self.h12 = self.sigmoid(self.a12)
# Second layer pre (axx) and post-activation (hxx)
self.a21 = self.w211 * self.h11 + self.w212 * self.h12 + self.b21
self.h21 = self.sigmoid(self.a21)
return self.h21
def back_propagation(self, x, y):
self.feed_forward(x)
# Second layer errors (mse)
self.dw211 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.h11
self.dw212 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.h12
self.db21 = (self.h21 - y) * self.h21 * (1 - self.h21)
# First layer errors (mse)
self.dw111 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.w211 * self.h11 * (1 - self.h11) * self.x1
self.dw112 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.w211 * self.h11 * (1 - self.h11) * self.x2
self.db11 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.w211 * self.h11 * (1 - self.h11)
self.dw121 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.w212 * self.h12 * (1 - self.h12) * self.x1
self.dw122 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.w212 * self.h12 * (1 - self.h12) * self.x2
self.db12 = (self.h21 - y) * self.h21 * (1 - self.h21) * self.w212 * self.h12 * (1 - self.h12)
def fit(self, X, Y, epochs=1, learning_rate=1, init=True, display_loss=False):
if init:
self.__init__()
if display_loss:
losses = {}
for i in tqdm(range(epochs), total=epochs, unit='epoch'):
dw111, dw112, dw121, dw122, dw211, dw212, db11, db12, db21 = [0]*9
for x, y in zip(X, Y):
self.back_propagation(x, y)
dw111 += self.dw111
dw112 += self.dw112
dw121 += self.dw121
dw122 += self.dw122
dw211 += self.dw211
dw212 += self.dw212
db11 += self.db11
db12 += self.db12
db21 += self.db21
m = X.shape[1]
self.w111 -= learning_rate * dw111 / m
self.w112 -= learning_rate * dw112 / m
self.w121 -= learning_rate * dw121 / m
self.w122 -= learning_rate * dw122 / m
self.w211 -= learning_rate * dw211 / m
self.w212 -= learning_rate * dw212 / m
self.b11 -= learning_rate * db11 / m
self.b12 -= learning_rate * db12 / m
self.b21 -= learning_rate * db21 / m
if display_loss:
Y_pred = self.predict(X)
losses[i] = mean_squared_error(Y_pred, Y)
if display_loss:
plt.plot(np.array(list(losses.values())).astype(float))
plt.xlabel('Epochs')
plt.ylabel('Mean Squared Errors')
plt.show()
def predict(self, X):
Y_pred = []
for x in X:
y_pred = self.feed_forward(x)
Y_pred.append(y_pred)
return np.array(Y_pred)
| true |
68c2a866918447c2dc7c080d0e34531f86ad2b58 | Python | phanrahan/magma | /magma/backend/mlir/printer_base.py | UTF-8 | 852 | 3.328125 | 3 | [
"MIT"
] | permissive | import io
import sys
class PrinterBase:
def __init__(self, tab: int = 4, sout: io.TextIOBase = sys.stdout):
self._tab = tab
self._indent = 0
self._sout = sout
self._flushed = True
def push(self):
self._indent += 1
def pop(self):
if self._indent == 0:
raise RuntimeError("Can not match deindent")
self._indent -= 1
def _make_indent(self) -> str:
return f"{' ' * (self._indent * self._tab)}"
def flush(self):
self._sout.write("\n")
self._flushed = True
def print(self, s: str):
tab = self._make_indent() if self._flushed else ""
self._sout.write(f"{tab}{s}")
self._flushed = False
def print_line(self, line: str):
self._sout.write(f"{self._make_indent()}{line}\n")
self._flusehd = True
| true |
6dcfc3b51fd6c132c278d1a7c8511408941725a2 | Python | zhangyu345293721/leetcode | /src/leetcodepython/string/day_year_1154.py | UTF-8 | 2,690 | 3.96875 | 4 | [
"MIT"
] | permissive | # encoding='utf-8'
import datetime
'''
/**
* This is the solution of No. 1154 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/day-of-the-year
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 给你一个按 YYYY-MM-DD 格式表示日期的字符串 date,请你计算并返回该日期是当年的第几天。
*
* 通常情况下,我们认为 1 月 1 日是每年的第 1 天,1 月 2 日是每年的第 2 天,依此类推。每个月的天数与现行公元纪年法(格里高利历)一致。
*
* 示例 1:
*
* 输入:date = "2019-01-09"
* 输出:9
* 示例 2:
*
* 输入:date = "2019-02-10"
* 输出:41
* 示例 3:
*
* 输入:date = "2003-03-01"
* 输出:60
* 示例 4:
*
* 输入:date = "2004-03-01"
* 输出:61
*
* 提示:
*
* date.length == 10
* date[4] == date[7] == '-',其他的 date[i] 都是数字。
* date 表示的范围从 1900 年 1 月 1 日至 2019 年 12 月 31 日。
*
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/day-of-the-year
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
class Solution:
def day_of_year(self, date: str) -> int:
'''
计算一年中为第多少天
Args:
date: 日期
Returns:
一年中的第多少天
'''
dd = datetime.datetime.strptime(date, "%Y-%m-%d")
return dd.timetuple().tm_yday
def day_of_year3(self, date: str) -> int:
'''
计算一年中为第多少天
Args:
date: 日期
Returns:
一年中的第多少天
'''
year, month, day = date.split('-')
days = 0
day_months = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if self.is_leap(int(year)):
day_months[2] += 1
for i in range(int(month)):
days += day_months[i]
return days+int(day)
def is_leap(self, year: int) -> bool:
'''
判断是不是闰年
Args:
year: 年
Returns:
布尔值
'''
return (year % 4 == 0 and year % 100 != 0) or year % 400 == 0
if __name__ == '__main__':
date = '2020-2-11'
solution = Solution()
num = solution.day_of_year(date)
assert num == 42
| true |