blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a48eeed53d7025c694f075e7a9fec206de58c792
|
Python
|
jdf/processing.py
|
/mode/examples/Basics/Transform/Rotate/Rotate.pyde
|
UTF-8
| 797
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
"""
Rotate.
Rotating a square around the Z axis. To get the results
you expect, send the rotate function angle parameters that are
values between 0 and PI*2 (TWO_PI which is roughly 6.28). If you prefer to
think about angles as degrees (0-360), you can use the radians()
method to convert your values. For example: scale(radians(90))
is identical to the statement scale(PI/2).
"""
angle = 0
jitter = 0
def setup():
size(640, 360)
noStroke()
fill(255)
rectMode(CENTER)
def draw():
background(51)
global jitter
# during even-numbered seconds (0, 2, 4, 6...)
if second() % 2 == 0:
jitter = random(-0.1, 0.1)
global angle
angle = angle + jitter
c = cos(angle)
translate(width / 2, height / 2)
rotate(c)
rect(0, 0, 180, 180)
| true
|
e10d6cde11dbf05ba699566e7372f9b0ea6d144f
|
Python
|
berinhard/sketches
|
/s_028/s_028.pyde
|
UTF-8
| 1,076
| 3.515625
| 4
|
[] |
no_license
|
# Author: Berin
# Sketches repo: https://github.com/berinhard/sketches
import time
from datetime import datetime
from random import choice, shuffle
def equation_1(teta, x_offset=0, y_offset=0):
r = 1 - (cos(teta) * sin(teta))
x = r * cos(teta) * 150
y = r * sin(teta) * 90
return x + x_offset, y + y_offset
def equation_2(teta, x_offset=0, y_offset=0):
r = cos(teta/2)
x = r * cos(teta) * 50
y = sin(teta) * 160
return x + x_offset, y + y_offset
def setup():
size(800, 800)
strokeWeight(5)
background(30)
frameRate(70)
YELLOW = (218, 165, 32, 200)
GREEN = (57, 255, 77, 200)
RED = (184, 15, 10, 200)
COLORS = [YELLOW] * 16 + [GREEN] * 12 + [RED] * 8
def draw():
background(30)
for i in range(5):
stroke(*COLORS[(frameCount + i) % len(COLORS)])
teta = radians(frameCount + i * 20)
x1, y1 = equation_1(teta, width / 4, height/4)
#point(x1, y1)
x2, y2 = equation_2(teta, 3 * width/4, 3 * height/4)
#point(x2, y2)
line(x1, y1, x2, y2)
| true
|
b44448709e843c1c5b82aa19f4472b22a0d40ae0
|
Python
|
SabaDD/Model-Interpretation
|
/Prediction.py
|
UTF-8
| 1,178
| 2.578125
| 3
|
[] |
no_license
|
import numpy as np
from sklearn.metrics import roc_curve, auc
from image_preprocessing import change_data
def model_prediction(custom_resnet_model, X_test, y_test, left,right,upper,lower):
roc_auc_cv = []
new_list = change_data(X_test,LEFT = left,RIGHT = right,UPPER = upper,LOWER = lower)
new_list = np.array(new_list)
# plt.figure
# plt.imshow((new_list[0] * 255).astype(np.uint8))
# print('this is the new list: ============> '+ str(new_list[0]))
# Plot ROC_AUC curve
y_score = custom_resnet_model.predict(new_list)
pred = y_score[:, 1] # only positive cases
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test[:,1], pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
# plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.2f' % roc_auc)
# plt.legend(loc='lower right')
# plt.xlabel('False positive rate')
# plt.ylabel('True positive rate')
#plt.show()
roc_auc_cv.append(roc_auc)
# plt.savefig('Plots/Auc'+ str(ii)+'-'+str(k)+'.jpg')
# plt.close()
# print("Average AUC= %.2f "%np.mean(roc_auc_cv))
return np.mean(roc_auc_cv)
| true
|
d09a2b002d574074d72ba3254d5515df4b5ccbaa
|
Python
|
loovien/jokercrawl
|
/jokers/spiders/test.py
|
UTF-8
| 2,174
| 3.171875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import MySQLdb
import datetime
import time
import random
from MySQLdb.cursors import DictCursor
def str_test():
string = list("ๆๆฏไธไธชไธญๅฝไบบ")
print(string[0:4])
print("".join(string))
def time_parse():
now = datetime.datetime.now()
str_time = now.strftime("%m-%d %H:%M")
print(">>>>> parse time:")
with_year = "{year}-{suffix}".format(year="2017", suffix=str_time)
print(with_year)
new_time = datetime.datetime.strptime(with_year, "%Y-%m-%d %H:%M")
print(">>>>> string to time:")
print(new_time.timestamp())
def connect_mysql(**kwargs):
options = {
"host": kwargs.pop("host", "localhost"),
"user": kwargs.pop("user", "root"),
"password": kwargs.pop("password", "111111"),
"database": kwargs.pop("database", "hahajok"),
"port": kwargs.pop("port", 3306),
"charset": kwargs.pop("charset", "utf8")
}
print(options)
conn = MySQLdb.Connect(options)
cursor = conn.cursor()
cursor.execute("select * from user")
result = cursor.fetchall()
print(result)
def rand_test():
a = random.randint(1, 1000)
print(a)
def fetch_mysql():
conn = MySQLdb.Connect(host="localhost", user="root", password="111111", db="hahajok", port=3306,
charset="utf8", cursorclass=DictCursor, autocommit=True)
cursor = conn.cursor()
sql = "insert into joker (id, title, content) values (1111, 'xixix', 'hahahhahah')"
print(sql)
cursor.execute(sql)
result_set = cursor.fetchall()
print(list(result_set))
cursor.close()
def time_compute():
now = datetime.datetime.now()
now_timestamp = now.timestamp()
print(">>>> now timestamp")
print(now)
print(now_timestamp)
lastweek = now - datetime.timedelta(weeks=1)
lastweek_timestamp = lastweek.timestamp()
print(">>>>> last week timestamp")
print(lastweek)
print(lastweek_timestamp)
if __name__ == '__main__':
# time_compute()
# str_test()
# time_parse()
# fetch_mysql()
# connect_mysql()
# rand_test()
l1 = [i for i in range(1, 100, 20)]
print(l1)
| true
|
e5db5b8ba51628cb4eae9e8013c5e25cd669c010
|
Python
|
namonai/UebungSS19
|
/Session5/session5_uebung_loesung.py
|
UTF-8
| 3,946
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/python3
import matplotlib.pyplot as plt
from numpy.random import rand
import pandas
#------------------scipy-------------------
# https://www.scipy.org/
# SciPy (pronounced โSigh Pieโ) is a Python-based ecosystem of open-source software for mathematics, science, and engineering.
#------------------matplotlib-------------------
# https://matplotlib.org/
# Matplotlib is a Python 2D plotting library which produces publication quality figures in
# a variety of hardcopy formats and interactive environments across platforms.
# Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook,
# web application servers, and four graphical user interface toolkits.
#
# You'll need following matplotlib functions:
# --> scatter()
#--------------------numpy----------------------
# https://www.numpy.org/
# NumPy is the fundamental package for scientific computing with Python. It contains among other things:
# a powerful N-dimensional array object
# sophisticated (broadcasting) functions
# tools for integrating C/C++ and Fortran code
# useful linear algebra, Fourier transform, and random number capabilities
# Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data.
# Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases.
#
# You'll need following numpy functions:
# --> rand()
#--------------------pandas----------------------
# https://pandas.pydata.org/
# pandas is an open-source Python library that provides high performance data analysis tools and easy to use data structures.
# pandas is available for all Python installations, but it is a key part of the Anaconda distribution and works extremely well in Jupyter notebooks to share data,
# code, analysis results, visualizations, and narrative text.
#
# You'll need following pandas functions:
#--> read_csv
#--> to_csv
#--> dataFrames
#--> informations about dataframes https://www.tutorialspoint.com/python_pandas/python_pandas_dataframe.htm
# *************Exercise***************
#
# Look at the example Plot from Matplotlib
# https://matplotlib.org/gallery/lines_bars_and_markers/scatter_with_legend.html#sphx-glr-gallery-lines-bars-and-markers-scatter-with-legend-py
# Group A:
# in the current file create a function "createData(data_points,filename)" that takes two arguments
# int data_points = "number of datapoints e.g. 250"
# string filename = "a filename e.g. scatterPlot.csv"
# this function should create the specified number of datapoints suitable to be consumed for the scatterplot from the example above
# and write this date to a csv file named like the second argument in the current folder
# Group B:
# in the current file create a function "createPlot(filename)" that takes one argument
# string filename = "a filename e.g. scatterPlot.csv"
# this function should read a csv file named 'filename' from the current folder and create a scatterplot like in the example above
def createData(data_points,file):
data = []
for color in ['red', 'green', 'blue']:
#Create an array of the given shape and populate it with random samples from a uniform distribution
x_array, y_array = rand(2, data_points)
scale_array = 200.0 * rand(data_points)
for index in range(0, data_points):
data.append({'x':x_array[index],'y':y_array[index], 'color':color, 'scale':scale_array[index]})
df=pandas.DataFrame(data,columns=['x','y','color','scale'])
df.to_csv(file)
def createPlot(file_name):
df = pandas.read_csv(file_name)
print(df)
plt.scatter(df['x'], df['y'], s=df['scale'], c=df['color'], alpha=0.5)
plt.show()
createData(130,'scatterPLot.csv')
createPlot('scatterPLot.csv')
| true
|
ad5fc7b18f1795a71546e4909c238cfc55a8e7a8
|
Python
|
uwubackup/Cloud-Nine
|
/cogs/mod.py
|
UTF-8
| 5,270
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
import json
import dotenv
import discord
import io
import asyncio
from discord.ext import commands
from dotenv import load_dotenv
import aiohttp
from io import BytesIO
class Mod(commands.Cog):
def __init__(self, commands):
self.commands = commands
@commands.command()
@commands.has_permissions(kick_members=True)
async def warn(self, ctx, member: discord.Member, *, reason='No reason Provided'):
"""Warns a member with the provided reason"""
with open('databases/warnings.json', 'r') as f:
warns = json.load(f)
if str(ctx.guild.id) not in warns:
warns[str(ctx.guild.id)] = {}
if str(member.id) not in warns[str(ctx.guild.id)]:
warns[str(ctx.guild.id)][str(member.id)] = {}
warns[str(ctx.guild.id)][str(member.id)]["warns"] = 1
warns[str(ctx.guild.id)][str(member.id)]["warnings"] = [reason]
else:
warns[str(ctx.guild.id)][str(member.id)]["warnings"].append(reason)
with open('warnings.json', 'w') as f:
json.dump(warns, f)
await ctx.send(f"{member.mention} was warned for: {reason}")
print(reason)
embed = discord.Embed(
description=str(member + " is warned | Reason = " + reason),
colour=discord.Colour.blue()
)
await ctx.send(embed = embed)
@commands.command()
async def warns(self, ctx, member: discord.Member):
with open('warnings.json', 'r') as f:
warns = json.load(f)
num = 1
warnings = discord.Embed(title=f'{member}\'s warns ', color = ctx.author.color)
for warn in warns[str(ctx.guild.id)][str(member.id)]["warnings"]:
warnings.add_field(name=f"Warn {num}", value=warn)
num += 1
await ctx.send(embed=warnings)
@commands.command()
@commands.has_permissions(kick_members=True)
async def removewarn(self, ctx, member: discord.Member, num: int, *, reason='No reason provided.'):
"""Removes specified warn from warnings.json"""
with open('warnings.json', 'r') as f:
warns = json.load(f)
num -= 1
warns[str(ctx.guild.id)][str(member.id)]["warns"] -= 1
warns[str(ctx.guild.id)][str(member.id)]["warnings"].pop(num)
with open('warnings.json', 'w') as f:
json.dump(warns, f)
await ctx.send('Warn has been removed!')
embed = discord.Embed(title='Your warn has been removed',
description=f'Your warning was removed by {ctx.author}')
await member.send(embed=embed)
@commands.command(alises = ['k'])
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason):
"""Kicks the mentioned member"""
print(reason)
embed = discord.Embed(
description=str(
str(member) + " is Kicked | reason = " + reason),
colour=discord.Colour.green()
)
await member.kick(reason=reason)
await member.send(embed=embed)
await ctx.channel.send(embed = embed)
@commands.command(alises = ['b'])
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason):
"""Bans the mentioned member"""
print(reason)
embed = discord.Embed(
description=str(
str(member) + " is banned | reason = " + reason),
colour=discord.Colour.green()
)
await member.ban(reason=reason)
await member.send(embed=embed)
@commands.command()
@commands.has_permissions(kick_members=True)
async def mute(self, ctx, member: discord.Member, *, reason):
"""Gives muted role to the mentioned user"""
print(reason)
Muted = discord.utils.get(ctx.guild.roles, name="Muted")
await member.add_roles(Muted)
embed = discord.Embed(
description=str(
str(member) + " is Muted | reason = " + reason),
colour=discord.Colour.red()
)
await member.send(embed=embed)
@commands.command()
@commands.has_permissions(kick_members=True)
async def unmute(self, ctx, member: discord.Member, *, reason="No reason specified"):
"""Unmutes the mentioned member"""
print(reason)
Muted = discord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(Muted)
embed = discord.Embed(
description=str(
str(member) + " is Unmuted | reason = " + reason),
colour=discord.Colour.green()
)
await member.send(embed=embed)
@commands.command(aliases=["purge"])
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, args):
await ctx.channel.purge(limit=int(args) + 1)
@commands.command(aliases =["ub"])
@commands.has_permissions(ban_members = True)
async def unban(self, ctx, id: int):
user = await commands.fetch_user(id)
await ctx.guild.unban(user)
def setup(bot):
bot.add_cog(Mod(bot))
| true
|
510bd6038e2c8f27e92994e801b09dc89ac9913d
|
Python
|
yiyang186/autostatest
|
/utils.py
|
UTF-8
| 2,174
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from scipy.stats import t
from scipy.stats import norm
def compare0(_p, alpha):
print("p = {0}, alpha = {1}".format(_p, alpha))
if _p >= alpha:
print("p >= {0}".format(alpha))
print("ๅทฎๅผๆ ็ป่ฎกๅญฆๆไน")
else:
if _p > alpha * 0.4:
print("{0} < p < {1}".format(0.4*alpha, alpha))
else:
print("p < {0}".format(alpha))
print("ๅทฎๅผๆ็ป่ฎกๅญฆๆไน")
def compare1(_p, alpha):
print("p = {0}, alpha = {1}".format(_p, alpha))
if _p >= alpha:
print("p >= alpha, ๅ้้ดๅทฎๅผๆ ็ป่ฎกๅญฆๆไน")
elif np.abs(_p - alpha) < alpha / 10.0:
print("p=alpha, ่ฏทไฝฟ็จFisher็กฎๅๆฆ็ๆณ")
else:
if _p > alpha * 0.4:
print("{0} < p < {1}".format(0.4*alpha, alpha))
else:
print("p < {0}".format(alpha))
print("ๅ้้ดๅทฎๅผๆ็ป่ฎกๅญฆๆไน")
def method_check(method, methods):
if method not in methods:
print("ๆๆถๅชๆฏๆไธๅๆนๆณ:")
for m in methods:
print(m)
raise NotImplementedError("ไธๆฏๆ่ฟ็งๆนๆณ๏ผ{0}".format(method))
return None
def type_check(a):
if type(a) == np.ndarray:
return a
try:
a = np.array(a)
except:
raise IOError("ERROR: ่ฏท่พๅ
ฅnumpyๆฐ็ปๆๅ่กจ!!!")
return a
def sample_std(x):
return np.sqrt(((x - x.mean())**2).sum() / (x.size-1))
def CI_population_mean(x, ci=0.95, sigma=None):
n = x.size
m = x.mean()
s = x.std()
return CI_population_mean_base(n, m, s, ci=ci, sigma=sigma_SE)
# def CI_population_mean_diff(x1, x2, )
def CI_population_mean_base(n, m, s, ci=0.95, sigma=None):
alpha = 1 - ci
s_SE = s / np.sqrt(n)
if sigma or n > 60:
_u = norm.isf(alpha/2)
if sigma:
sigma_SE = sigma / np.sqrt(n)
return (m - _u * sigma_SE, m + _u * sigma_SE)
else:
return (m - _u * s_SE, m + _u * s_SE)
else:
_t = t.isf(alpha/2, n-1)
return (m - _t * s_SE, m + _t * s_SE)
| true
|
4d95d920337062c170398fed2c89fbe177e8377c
|
Python
|
MaherBhasvar/Hackout3
|
/backend Server/fetch.py
|
UTF-8
| 4,728
| 2.609375
| 3
|
[] |
no_license
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from time import sleep
import numpy as np
import re
def lat_lon(city):
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://www.google.com/maps')
driver.find_element_by_xpath(
"//input[@id='searchboxinput']").send_keys(city)
driver.find_element_by_xpath(
"//button[@id='searchbox-searchbutton']").click()
sleep(5)
url = driver.current_url
url = (re.findall(r'@.+/', url)[0])[1:-1].split(',')
lat = url[0][:-2]
lon = url[1][:-2]
driver.close()
return lat, lon
except Exception as e:
print(e)
driver.close()
return -1
def search_buses(a, b, date, seats):
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://bus.makemytrip.com/bus/search/'+a+'/'+b+'/'+date)
sleep(1)
try:
Gbuses = driver.find_element_by_xpath(
"//a[@class='bluePrimarybtn font12']")
if Gbuses != None:
Gbuses.click()
sleep(1)
except:
pass
d = dict()
timing_start = driver.find_elements_by_xpath(
"//span[@class='sc-gqjmRU izTLmu']")
timing_duration = driver.find_elements_by_xpath(
"//span[@class='sc-jzJRlG btSGRi']")
timing_end = driver.find_elements_by_xpath(
"//span[@class='sc-fjdhpX laKnSQ']")
names = driver.find_elements_by_xpath(
"//span[@class='sc-chPdSV glJpds']")
types = driver.find_elements_by_xpath(
"//span[@class='sc-kgoBCf cQRYis']")
#d['rating'] = driver.find_elements_by_xpath("//span[@class='sc-hSdWYo eXbDWv']")
cost = driver.find_elements_by_xpath(
"//span[@class='sc-brqgnP bKUXLW']")
seats = driver.find_elements_by_xpath("//ul[@class='sc-dxgOiQ BhRgU']")
#n_state_bus = len(driver.find_elements_by_xpath("//div[@class='sc-kTUwUJ dmMRtR']")[0].find_elements_by_xpath(".//*"))
#n_pri_bus = len(driver.find_elements_by_xpath("//div[@class='sc-kTUwUJ dmMRtR']")[1].find_elements_by_xpath(".//*"))
c, dd = [], []
for i in cost:
c.append(i.text)
least = np.argmin(c)
for k in timing_duration:
z = k.text.split(' ')
print(z)
if len(z) == 1:
if z[0][-1] == 'h':
dd.append(int(z[0][:-1])*60)
else:
dd.append(int(z[1][:-1]))
else:
dd.append(int(z[0][:-1])*60+int(z[1][:-1]))
l_d = np.argmin(dd)
print(dd)
for i in range(len(timing_duration)):
d[str(i)] = [timing_start[i].text, timing_duration[i].text,
timing_end[i].text, names[i].text+types[i].text, cost[i].text, seats[i].text]
driver.close()
if d == {}:
return -1
else:
return d[str(least)], d[str(l_d)]
except Exception as e:
print(e)
return -1
def search_flights(a, b, date, seats):
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://www.goibibo.com/flights/air-' +
a[0][3]+'-'+b[0][3]+'-'+date+'--'+str(seats)+'-0-0-E-D/')
print('https://www.goibibo.com/flights/air-' +
a[0][3]+'-'+b[0][3]+'-'+date+'--'+str(seats)+'-0-0-E-D/')
d = dict()
timing_start = driver.find_elements_by_xpath(
"//span[@class='fb ico18 padT5 quicks']")
timing_duration = driver.find_elements_by_xpath(
"//div[@class='ico15 fb txtCenter quicks padT5']")
timing_end = driver.find_elements_by_xpath(
"//span[@data-cy='arrTime']")
names = driver.find_elements_by_xpath(
"//span[@class='greyLt ico13 padR10 padL5']")
cost = driver.find_elements_by_xpath(
"//span[@class='ico20 fb quicks']")
c, dd = [], []
for i in cost:
c.append(i.text)
least = np.argmin(c)
for k in timing_duration:
z = k.text.split(' ')
dd.append(int(z[0][:-1])*60+int(z[1][:-1]))
l_d = np.argmin(dd)
for i in range(len(timing_duration)):
d[str(i)] = [timing_start[i].text, timing_duration[i].text,
timing_end[i].text, names[i].text, cost[i].text]
driver.close()
if d == {}:
return -1
else:
return d[str(least)], d[str(l_d)]
except Exception as e:
print(e)
return -1
| true
|
2fe3f770fb091e92723dac74a9a83aafc83c7a1f
|
Python
|
venky5522/venky
|
/oops_concept/polymorphism/method_overload.py
|
UTF-8
| 146
| 3.296875
| 3
|
[] |
no_license
|
class a:
def hello(self,*args):
print("hello")
obj = a()
obj.hello()
obj.hello(10)
obj.hello(10,20,30)
obj.hello(10,20,30,40)
| true
|
96c603af0051ec6b2e1341824037dbb9dce2190d
|
Python
|
andy-keene/AI
|
/alpha-beta/minimax.py
|
UTF-8
| 862
| 3.25
| 3
|
[] |
no_license
|
from node import Node
pos_inf = float('inf')
neg_inf = float('-inf')
def alpha_beta_search(node):
v = max_value(node, neg_inf, pos_inf)
node._value = v
return v, node
def max_value(node, alpha, beta):
#print('in max player: ', node._player, ' state ', node._state)
if node.is_terminal():
return node.utility_value()
v = neg_inf
for successor in node.generate_successors():
v = max(v, min_value(successor, alpha, beta))
node._value = v
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(node, alpha, beta):
#print('in min player: ', node._player, ' state ', node._state)
if node.is_terminal():
return node.utility_value()
v = pos_inf
for successor in node.generate_successors():
v = min(v, max_value(successor, alpha, beta))
node._value = v
if v <= alpha:
return v
beta = min(beta, v)
return v
| true
|
789e028464443997e9881fcba5e74d66193d88ce
|
Python
|
d-chambers/spype
|
/spype/utils.py
|
UTF-8
| 10,013
| 2.71875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
A number of utilities for sflow
"""
import copy
import inspect
import os
import time
import types
from collections import Sequence, OrderedDict
from inspect import signature, Signature
from typing import Optional, Callable, Tuple, Set, Mapping
from spype.constants import adapt_type, args_kwargs_type
from spype.exceptions import NoReturnAnnotation
# ----------------------- context stuff
_SFLOW_CONTEXT = dict(
check_type=True,
check_compatibility=True,
on_failure=None,
on_success=None,
on_finish=None,
on_start=None,
print_flow=False,
predicate=None,
)
class Context:
""" A class for controlling modifications made to a dictionary,
used to sensibly store global state """
def __init__(self, input_dict: dict):
"""
Parameters
----------
input_dict
A dictionary for holding modifiable state.
"""
self._dict = input_dict
self._previous_state = {}
def __call__(self, _save_state=True, **kwargs):
"""
Set global options for how spype behaves.
If an unsupported value is set a KeyError will be raised.
"""
if not set(kwargs).issubset(self._dict):
diff = set(kwargs) - set(self._dict)
msg = (
f"unsupported option(s): {diff} passed to set_options. "
f"supported uptions are {set(self._dict)}"
)
raise KeyError(msg)
if _save_state:
self._previous_state = copy.deepcopy(self._dict)
self._dict.update(kwargs)
return self
def __getitem__(self, item):
return self._dict[item]
def __setitem__(self, key, value):
self(**{key: value})
def items(self):
return _SFLOW_CONTEXT.items()
def __enter__(self):
pass
def __repr__(self):
return str(self._dict)
def __exit__(self, exc_type, exc_val, exc_tb):
self(_save_state=False, **self._previous_state)
context = Context(_SFLOW_CONTEXT)
# ------------------- File lock for thread-safe log
class FileLockException(Exception):
pass
class FileLock(object):
"""
File lock based on https://github.com/dmfrey/FileLock
"""
def __init__(self, file_path, timeout=10, delay=0.1):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.file_path = file_path
self.lockfile = f"{file_path}.lock~"
self.timeout = timeout
self.delay = delay
@property
def is_locked(self):
return os.path.exists(self.lockfile)
def lock(self):
""" create the lock file """
with open(self.lockfile, "w") as fi:
fi.write("occupied")
def release(self):
""" release the lock """
if self.is_locked:
os.remove(self.lockfile)
def acquire(self):
"""
Try to acquire the lock.
"""
start_time = time.time()
while (time.time() - start_time) < self.timeout:
if self.is_locked: # try sleeping a bit
time.sleep(self.delay)
else: # create lock file and go about your business
self.lock()
break
else:
msg = f"{self.lockfile} still exists after {self.timeout} seconds"
raise IOError(msg)
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
self.release()
def __del__(self):
""" Make sure that the FileLock instance doesn't leave a lockfile
lying around.
"""
self.release()
# ------------------------- function jiggering
def partial_to_kwargs(
func: Callable,
*args,
partial_dict: Optional[dict] = None,
signature: Optional[inspect.Signature] = None,
**kwargs,
) -> dict:
"""
Return a kwargs dict compatible with function or siganture.
Parameters
----------
func
A callable
partial_dict
A dict that may have keys named the same as arguments expected by
func
signature
A signature object, if None then get if from function.
"""
out = dict(kwargs)
sig = signature or inspect.signature(func)
argd = OrderedDict(
(
(item, value)
for item, value in sig.parameters.items()
if item not in partial_dict
)
)
# first bind new args taking out any that are also found in partial_dict
out.update({name: value for name, value in zip(argd, args)})
# get kwargs to bind
shared_keys = set(partial_dict) & set(sig.parameters)
out.update({item: partial_dict[item] for item in shared_keys})
return out
def apply_partial(
func: Callable,
*args,
partial_dict: Optional[Mapping] = None,
signature: Optional[inspect.Signature] = None,
**kwargs,
) -> Tuple[tuple, dict]:
"""
Call func with args and kwargs, supersede with partial_dict.
Inspects a callable and if any argument names match keys in partial
those will be applied.
Parameters
----------
func
A callable
partial_dict
A dict that may have keys named the same as arguments expected by
func
signature
A signature object, if None then get if from function.
Returns
-------
Tuple of args and kwargs which can be input into func
"""
if not partial_dict: # bail out if no special binding to perform
return func(*args, **kwargs)
out = partial_to_kwargs(
func, *args, partial_dict=partial_dict, signature=signature, **kwargs
)
return func(**out)
# --------------------- Args and Kwargs Wrangling
def args_kwargs(output, adapter: Optional[adapt_type] = None) -> args_kwargs_type:
"""
Take the output of a function and turn it into args and kwargs.
Parameters
----------
output
Any output from a function
adapter
A sequence of ints/strings for mapping output into args and kwargs
Returns
-------
tuple
A tuple of args and kwargs
"""
if output is None:
return (), {}
if not isinstance(output, tuple): #
output = (output,)
if adapter is None:
return tuple(output), {}
assert len(adapter) == len(
output
), f"adapter {adapter} and output {output} have different lengths"
# wrangle output into a tuple and a dict based on adapter
return _apply_adapter(output, adapter)
def _apply_adapter(output, adapter):
""" apply an adapter tuple to an output tuple """
out_list = {}
out_dict = {}
for val, item in zip(output, adapter):
if isinstance(item, int):
out_list[item] = val
elif isinstance(item, str):
out_dict[item] = val
# change out_list dict into a tuple
assert set(out_list) == set(range(len(out_list)))
out_tuple = tuple(out_list[x] for x in range(len(out_list)))
return out_tuple, out_dict
def de_args_kwargs(args, kwargs):
"""
Take args and kwargs and turn it into a simple tuple.
"""
out = tuple([x for x in args] + [val for _, val in kwargs.items()])
if len(out) == 1: # unpack if len is 1
out = out[0]
return None if out is () else out
def get_default_names(sig: inspect.Signature) -> Set[str]:
"""
Return a set of parameter names that have default values.
"""
return {
key
for key, value in sig.parameters.items()
if value.default is not inspect._empty
}
def sig_to_args_kwargs(
sig: inspect.Signature, adapter: Optional[tuple] = None
) -> (tuple, dict):
"""
Return an tuple of args and kwargs of types for signature return type.
If no return annotation is given raise a NoReturnAnnotation Exception.
Parameters
----------
sig
The signature that may have return annotations attached.
adapter
A tuple of ints, None, or str to re-arrange the outputs.
Returns
-------
args and kwargs
"""
sig = sig if isinstance(sig, Signature) else signature(sig)
# get output args
if isinstance(sig.return_annotation, tuple):
args = sig.return_annotation
elif sig.return_annotation is inspect._empty:
raise NoReturnAnnotation
# if this is a Tuple typehint strip out args
elif sig.return_annotation.__class__ == Tuple.__class__:
args = sig.return_annotation.__args__
else:
args = (sig.return_annotation,)
# wrangle into args and kwargs
kwargs = {}
if adapter:
args, kwargs = _apply_adapter(args, adapter)
return args, kwargs
# ------------------------ misc functions
def iterate(obj):
""" return an iterable object from any sequence or non-sequence. Return
empty tuple if None """
if obj is None:
return ()
if isinstance(obj, str):
return (obj,)
return obj if isinstance(obj, Sequence) else (obj,)
def function_or_class_name(obj):
"""
Given a callable, try to determine its name. Return 'None' if None.
"""
try: # for decorator tasks
return obj.__name__
except AttributeError: # for class Tasks
return obj.__class__.__name__
def copy_func(f, name=None):
"""
return a function with same code, globals, defaults, closure, and
name (or provide a new name)
"""
fn = types.FunctionType(
f.__code__, f.__globals__, name or f.__name__, f.__defaults__, f.__closure__
)
# in case f was given attrs (note this dict is a shallow copy):
fn.__dict__.update(f.__dict__)
return fn
| true
|
67ab18d2844c9bf312c82362758b7e7f85b210cc
|
Python
|
brianwachira/Tesserect
|
/run.py
|
UTF-8
| 3,784
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3.6
from password_locker import Password_Locker
def initialize_password_locker(username,password):
'''
Function to initialize password locker
'''
new_password_locker = Password_Locker(username,password)
return new_password_locker
def create_account(password_locker):
'''
Function to create user account
'''
return Password_Locker.create_account(password_locker)
def login(password_locker):
'''
Function to log in user
'''
return Password_Locker.login(password_locker)
def add_credentials(password_locker,account_name,account_username,account_password):
'''
Function that adds user's credentials
'''
return Password_Locker.add_credentials(password_locker,account_name,account_username,account_password)
def generate_credentials(password_locker):
'''
Function that generates user's credentials
'''
return Password_Locker.generate_credentials(password_locker)
def generate_password(password_locker):
'''
Function that generates passwords
'''
return Password_Locker.generate_password(password_locker)
def set_password_length(password_locker,length):
'''
Function that generates a password based on the user's length of choice
'''
return Password_Locker.set_password_length(password_locker,length)
def main():
print("Hi there\n")
print("What would you like to do")
print('\n')
instance = ""
while True:
print("Use these short codes\n li - Log in, ca - Create an account, ac - Add credentials, gc - Generate credentials, gp - Generate passwords, gsp - Generate password with a set length, ex - exit")
choice = input()
if(choice == 'li'):
print("Enter your username")
username = input().lower()
print("Enter password")
password = input()
instance = initialize_password_locker(username,password)
if(login(instance)):
print("Login succesful")
else:
print("It seems your account does not exist")
elif(choice == 'ca'):
print("Enter your username")
username = input().lower()
print("Enter password")
password = input()
instance = initialize_password_locker(username,password)
if(create_account(instance)):
print("Account creation succesful\n")
print(f"Welcome {username}")
else:
print("Account already exists")
elif(choice == 'ac'):
print("Enter the account you wish to save")
account_name = input()
print("Enter the username")
username = input()
print("Enter the password")
password = input()
if(add_credentials(instance,account_name,username,password)):
print("Succesful!")
else:
print("Your account does not exist")
elif(choice == 'gc'):
credentials = generate_credentials(instance)
print("The credentials are :\n")
print(f"{credentials}")
elif(choice == 'gp'):
password = generate_password(instance)
print("Kindly copy the password below :")
print(f"{password}")
elif(choice == 'gsp'):
print("Enter the length of a password you'd want")
length_of_password = input()
password = set_password_length(instance,length_of_password)
print("Kindly copy password to clipboard")
print(f"{password}")
else:
print("Bye")
break
if __name__ == '__main__':
main()
| true
|
f5cba8070b9d9055ddcc15f293d90b98a68341d8
|
Python
|
Miralan/Meta-TTS
|
/lightning/callbacks/utils.py
|
UTF-8
| 6,216
| 2.5625
| 3
|
[] |
no_license
|
import os
import json
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
from scipy.io import wavfile
from utils.tools import expand, plot_mel
def synth_one_sample_with_target(targets, predictions, vocoder, preprocess_config):
"""Synthesize the first sample of the batch given target pitch/duration/energy."""
basename = targets[0][0]
src_len = predictions[8][0].item()
mel_len = predictions[9][0].item()
mel_target = targets[6][0, :mel_len].detach().transpose(0, 1)
duration = targets[11][0, :src_len].detach().cpu().numpy()
pitch = targets[9][0, :src_len].detach().cpu().numpy()
energy = targets[10][0, :src_len].detach().cpu().numpy()
mel_prediction = predictions[1][0, :mel_len].detach().transpose(0, 1)
if preprocess_config["preprocessing"]["pitch"]["feature"] == "phoneme_level":
pitch = expand(pitch, duration)
else:
pitch = targets[9][0, :mel_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["energy"]["feature"] == "phoneme_level":
energy = expand(energy, duration)
else:
energy = targets[10][0, :mel_len].detach().cpu().numpy()
with open(
os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")
) as f:
stats = json.load(f)
stats = stats["pitch"] + stats["energy"][:2]
fig = plot_mel(
[
(mel_prediction.cpu().numpy(), pitch, energy),
(mel_target.cpu().numpy(), pitch, energy),
],
stats,
["Synthetized Spectrogram", "Ground-Truth Spectrogram"],
)
if vocoder.mel2wav is not None:
max_wav_value = preprocess_config["preprocessing"]["audio"]["max_wav_value"]
wav_reconstruction = vocoder.infer(mel_target.unsqueeze(0), max_wav_value)[0]
wav_prediction = vocoder.infer(mel_prediction.unsqueeze(0), max_wav_value)[0]
else:
wav_reconstruction = wav_prediction = None
return fig, wav_reconstruction, wav_prediction, basename
def recon_samples(targets, predictions, vocoder, preprocess_config, figure_dir, audio_dir):
"""Reconstruct all samples of the batch."""
for i in range(len(predictions[0])):
basename = targets[0][i]
src_len = predictions[8][i].item()
mel_len = predictions[9][i].item()
mel_target = targets[6][i, :mel_len].detach().transpose(0, 1)
duration = targets[11][i, :src_len].detach().cpu().numpy()
pitch = targets[9][i, :src_len].detach().cpu().numpy()
energy = targets[10][i, :src_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["pitch"]["feature"] == "phoneme_level":
pitch = expand(pitch, duration)
else:
pitch = targets[9][i, :mel_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["energy"]["feature"] == "phoneme_level":
energy = expand(energy, duration)
else:
energy = targets[10][i, :mel_len].detach().cpu().numpy()
with open(
os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")
) as f:
stats = json.load(f)
stats = stats["pitch"] + stats["energy"][:2]
fig = plot_mel(
[
(mel_target.cpu().numpy(), pitch, energy),
],
stats,
["Ground-Truth Spectrogram"],
)
plt.savefig(os.path.join(figure_dir, f"{basename}.target.png"))
plt.close()
mel_targets = targets[6].transpose(1, 2)
lengths = predictions[9] * preprocess_config["preprocessing"]["stft"]["hop_length"]
max_wav_value = preprocess_config["preprocessing"]["audio"]["max_wav_value"]
wav_targets = vocoder.infer(mel_targets, max_wav_value, lengths=lengths)
sampling_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
for wav, basename in zip(wav_targets, targets[0]):
wavfile.write(os.path.join(audio_dir, f"{basename}.recon.wav"), sampling_rate, wav)
def synth_samples(targets, predictions, vocoder, preprocess_config, figure_dir, audio_dir, name):
"""Synthesize the first sample of the batch."""
for i in range(len(predictions[0])):
basename = targets[0][i]
src_len = predictions[8][i].item()
mel_len = predictions[9][i].item()
mel_prediction = predictions[1][i, :mel_len].detach().transpose(0, 1)
duration = predictions[5][i, :src_len].detach().cpu().numpy()
pitch = predictions[2][i, :src_len].detach().cpu().numpy()
energy = predictions[3][i, :src_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["pitch"]["feature"] == "phoneme_level":
pitch = expand(pitch, duration)
else:
pitch = targets[9][i, :mel_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["energy"]["feature"] == "phoneme_level":
energy = expand(energy, duration)
else:
energy = targets[10][i, :mel_len].detach().cpu().numpy()
with open(
os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")
) as f:
stats = json.load(f)
stats = stats["pitch"] + stats["energy"][:2]
fig = plot_mel(
[
(mel_prediction.cpu().numpy(), pitch, energy),
],
stats,
["Synthetized Spectrogram"],
)
plt.savefig(os.path.join(figure_dir, f"{basename}.{name}.synth.png"))
plt.close()
mel_predictions = predictions[1].transpose(1, 2)
lengths = predictions[9] * preprocess_config["preprocessing"]["stft"]["hop_length"]
max_wav_value = preprocess_config["preprocessing"]["audio"]["max_wav_value"]
wav_predictions = vocoder.infer(mel_predictions, max_wav_value, lengths=lengths)
sampling_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
for wav, basename in zip(wav_predictions, targets[0]):
wavfile.write(os.path.join(audio_dir, f"{basename}.{name}.synth.wav"), sampling_rate, wav)
| true
|
57cc39ab8e409511fac6308fc85ecdc9b83b761e
|
Python
|
uchicago-bio/final-project-bdallen-uchicago
|
/allvall_rmsd.py
|
UTF-8
| 3,300
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
"""
Compute pairwise RMSD of all pdbs listed in the input file, restricting
the calculation to the atoms provided in the second argument, which must
be present in all pdbs. Designed to be used in conjunction with
the "ALL" atom list from calc_presenting_surface.py.
Usage:
find /path/to/project-data -name '*_complex_0001.pdb' > pdb_list.txt
./allvall_rmsd.py pdb_list.txt atom_list.txt output/
"""
import sys
import multiprocessing
import os.path
import traceback
from pdbremix import pdbatoms, rmsd, v3
def rmsd_all(i, pdb_path, pdb_list, atom_list, outdir):
"""
Compute RMSD between pdb_path and every pdb in pdb_list, writing output
to outpath/rmsd_{basename(pdb_path)}.txt, and restricting the comparison
to atoms in atom_list, which are assumed to be present in all pdbs.
"""
pdb_name1 = get_pdb_name(pdb_path)
soup1 = pdbatoms.Soup(pdb_path)
coords1 = get_atom_coords(soup1, atom_list)
center1, coords1 = center_vlist(coords1)
outpath = os.path.join(outdir, "rmsd_%s.txt" % pdb_name1)
with open(outpath, "w") as out:
for pdb_path2 in pdb_list[i+1:]:
assert pdb_path2 != pdb_path
pdb_name2 = get_pdb_name(pdb_path2)
soup2 = pdbatoms.Soup(pdb_path2)
coords2 = get_atom_coords(soup2, atom_list)
center2, coords2 = center_vlist(coords2)
score, rot_matrix = rmsd.calc_rmsd_rot(coords1, coords2)
out.write("%0.6f %s %s\n" % (score, pdb_name1, pdb_name2))
def get_atom_coords(soup, atom_list):
atom_positions = []
for atom in atom_list:
chain, resi, resn, name = atom
residue = soup.residue_by_tag("%s:%d" % (chain, resi))
assert residue.num == resi, "%s != %s" % (residue.num, resi)
if resn != "X":
assert residue.type == resn, "%s != %s" % (residue.type, resn)
atom = residue.atom(name)
atom_positions.append(atom.pos)
return atom_positions
def center_vlist(vlist):
"""
Center a list of v3 vectors and return (center, centered_vlist_copy)
"""
center = v3.get_center(vlist)
center_matrix = v3.translation(-center)
return (center, [v3.transform(center_matrix, v) for v in vlist])
def _job_rmsd_all(args):
try:
return rmsd_all(*args)
except Exception:
traceback.print_exc()
raise
def get_pdb_name(pdb_path):
return os.path.splitext(os.path.basename(pdb_path))[0]
def _parse_atom_list(infile):
atom_list = []
for line in infile:
line = line.strip()
chain, resi, resn, atom_name = line.split()
atom_list.append((chain, int(resi), resn, atom_name))
return atom_list
def _main():
if len(sys.argv) != 4:
print "Usage: %s pdb_list.txt atom_list.txt outdir" % sys.argv[0]
sys.exit(1)
with open(sys.argv[1]) as f:
pdb_list = [line.strip() for line in f.readlines()]
with open(sys.argv[2]) as f:
atom_list = _parse_atom_list(f)
outdir = sys.argv[3]
jobs = [(i, pdb_path, pdb_list, atom_list, outdir)
for i, pdb_path in enumerate(pdb_list)]
ncpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(ncpus)
pool.map(_job_rmsd_all, jobs)
if __name__ == '__main__':
_main()
| true
|
804509a8e733c1185004800f9cd049da21aa05e3
|
Python
|
bfkg/jupyterhub-options-spawner
|
/tests/tests_checkbox_input_field.py
|
UTF-8
| 3,696
| 2.65625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Copyright (c) 2018, Zebula Sampedro, CU Research Computing
import unittest
from traitlets import (
Unicode,
Bool,
)
from optionsspawner.forms import CheckboxInputField
class CheckboxInputFieldTestCase(unittest.TestCase):
"""Tests for optionsspawner.forms.checkboxfield.CheckboxInputField."""
def test_render_unicode_value_checked_by_default(self):
expected = ("""<label for="test_attr">Test Attribute</label>\n"""
"""<input id="id_test_attr" class="form-control" checked name="test_attr" type="checkbox" value="test">\n""")
field = CheckboxInputField('test_attr',
label='Test Attribute',
attr_value='test',
attr_checked=True
)
rendered = field.render()
self.assertEqual(rendered, expected)
def test_render_unicode_value(self):
expected = ("""<label for="test_attr">Test Attribute</label>\n"""
"""<input id="id_test_attr" class="form-control" name="test_attr" type="checkbox" value="test">\n""")
field = CheckboxInputField('test_attr',
label='Test Attribute',
attr_value='test'
)
rendered = field.render()
self.assertEqual(rendered, expected)
def test_render_no_value(self):
expected = ("""<label for="test_attr">Test Attribute</label>\n"""
"""<input id="id_test_attr" class="form-control" name="test_attr" type="checkbox">\n""")
field = CheckboxInputField('test_attr',
label='Test Attribute'
)
rendered = field.render()
self.assertEqual(rendered, expected)
def test_returns_bool_trait_with_no_value(self):
expected = Bool().tag(config=True)
field = CheckboxInputField('test_attr',
label='Test Attribute'
)
traitlet = field.get_trait()
self.assertIsInstance(traitlet, Bool)
self.assertEqual(traitlet.metadata, expected.metadata)
self.assertEqual(traitlet.default_value, expected.default_value)
def test_returns_unicode_trait_with_unciode_value(self):
expected = Unicode().tag(config=True)
field = CheckboxInputField('test_attr',
label='Test Attribute',
attr_value='test'
)
traitlet = field.get_trait()
self.assertIsInstance(traitlet, Unicode)
self.assertEqual(traitlet.metadata, expected.metadata)
self.assertEqual(traitlet.default_value, expected.default_value)
def test_normalize_checkbox_no_value_unchecked(self):
expected = False
field = CheckboxInputField('test_attr',
label='Test Attribute'
)
normalized = field.normalize_user_option(None)
self.assertEqual(normalized, expected)
def test_normalize_checkbox_no_value_checked(self):
expected = True
field = CheckboxInputField('test_attr',
label='Test Attribute'
)
normalized = field.normalize_user_option(['on'])
self.assertEqual(normalized, expected)
def test_normalize_checkbox_unicode_value_unchecked(self):
expected = ''
field = CheckboxInputField('test_attr',
label='Test Attribute',
attr_value='test'
)
normalized = field.normalize_user_option(None)
self.assertEqual(normalized, expected)
def test_normalize_checkbox_unicode_value_checked(self):
expected = 'test'
field = CheckboxInputField('test_attr',
label='Test Attribute',
attr_value='test'
)
normalized = field.normalize_user_option(['test'])
self.assertEqual(normalized, expected)
if __name__ == '__main__':
unittest.main()
| true
|
d0580f90e72ea073b797da05e90da4b5728ea35f
|
Python
|
2100030721/Hackerrank-Artificial-Intelligence
|
/Statistics-and-Machine-Learning/multiple-linear-regression-predicting-house-prices.py
|
UTF-8
| 1,995
| 3.75
| 4
|
[] |
no_license
|
# Charlie wants to buy a house. He does a detailed survey of the area where
# he wants to live, in which he quantifies, normalizes, and maps the desirable
# features of houses to values on a scale of 0 to 1 so the data can be assembled
# into a table. If Charlie noted F features, each row contains F space-separated
# values followed by the house price in dollars per square foot (making for a
# total of F + 1 columns). If Charlie makes observations about H houses, his
# observation table has H rows. This means that the table has a total
# of (F + 1) * H entries.
# Unfortunately, he was only able to get the price per square foot for certain
# houses and thus needs your help estimating the prices of the rest! Given the
# feature and pricing data for a set of houses, help Charlie estimate the price
# per square foot of the houses for which he has compiled feature data but
# no pricing.
# Link: https://www.hackerrank.com/challenges/predicting-house-prices
# Reference: http://onlinestatbook.com/2/regression/intro.html
# Developer: Murillo Grubler
# Import library
from sklearn import linear_model
# Set data
features, rows = map(int, input().split())
X, Y = [], []
# Get the parameters X and Y for discovery the variables a and b
for i in range(rows):
x = [0]
elements = list(map(float, input().split()))
for j in range(len(elements)):
if j < features:
x.append(elements[j])
else:
Y.append(elements[j])
X.append(x)
# Set the model LinearRegression
model = linear_model.LinearRegression()
model.fit(X, Y)
a = model.intercept_
b = model.coef_
# Get the parameters X for discovery the Y
new_rows = int(input())
new_X = []
for i in range(new_rows):
x = [0]
elements = list(map(float, input().split()))
for j in range(len(elements)):
x.append(elements[j])
new_X.append(x)
# Gets the result and show on the screen
result = model.predict(new_X)
for i in range(len(result)):
print(round(result[i],2))
| true
|
871fefcfd603a7abbeb77656c89f6046ef390d0c
|
Python
|
vicchu/Unsupervised-Learning-PCA-K-Means
|
/K-Means-PartII.py
|
UTF-8
| 846
| 3.40625
| 3
|
[] |
no_license
|
###Exemplifies K-Means with classes generated from normal distributions
#Import modules
import aux_funcs
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
#Initializes the seed for the random numbers
np.random.seed(42)
#Sets parameters
nPoints=800 #sets total number of points
nClasses=10 #sets number of classes
s2=0.1 #sets variance for each class
#Generate random normally distributed points using the parameters above
[X,target]=aux_funcs.init_board_gauss(nPoints,nClasses,s2)
#Plots the data, find clusters and plots the centroids
plt.figure(3,figsize=(7,5))
plt.scatter(X[:,0],X[:,1],c=target,s=nPoints*[100]) #plots the data
model=KMeans(n_clusters=10).fit(X) #fits the K-Means model
plt.scatter(model.cluster_centers_[:,0],model.cluster_centers_[:,1],c="k",s=nClasses*[50]) #plots the centroids
| true
|
e2079334f78864ec2c5375c5402968976b75dfe3
|
Python
|
masenov/code-exercises
|
/cracking-the-coding-interview/data_structures/arrays/4.py
|
UTF-8
| 616
| 3.78125
| 4
|
[] |
no_license
|
def replaceCharacter(s,pos,char):
return s[0:pos]+char+s[pos+1:]
def replaceSpaces(s,true_length):
actual_length = len(s)-1
for i in range(true_length-1, -1, -1):
if (s[i]==' '):
s=replaceCharacter(s,actual_length,'0')
s=replaceCharacter(s,actual_length-1,'2')
s=replaceCharacter(s,actual_length-2,'%')
actual_length = actual_length - 3
else:
s=replaceCharacter(s,actual_length,s[i])
actual_length = actual_length - 1
return s
print (replaceCharacter("asdf",3,'x'))
print (replaceSpaces("Mr John Smith ", 13))
| true
|
13d3ed722112a315e7321eb808712df9e599de29
|
Python
|
xiaoyuhen/An-Introduction-to-Interactive-Programming-in-Python
|
/week4/exercises/prime_list.py
|
UTF-8
| 279
| 3.046875
| 3
|
[] |
no_license
|
# Prime number lists
###################################################
# Student should enter code below
print_lists = [2, 3, 5, 7, 11, 13]
print print_lists[1], print_lists[3], print_lists[5]
###################################################
# Expected output
#3 7 13
| true
|
8cb55843f17bc26c8e894b377d64c24682021d02
|
Python
|
alexandrabrt/calculator_grupa2
|
/to_do.py
|
UTF-8
| 847
| 3.109375
| 3
|
[] |
no_license
|
import datetime
class Todolist:
def __init__(self):
self.task = input('Introduceti task-ul: ')
self.list_task = []
self.choice = 'D'
self.data = input('Introduceti data dupa urmatorul format zz.ll.aaaa ')
try:
datetime.datetime.strptime(self.data, '%D.%M.%Y')
except Exception:
pass
self.persoana_responsabila = input('Introduceti persoana responsabila: ')
#while self.persoana_responsabila not in self.list_task
self.categorie
def metoda_aduagare_taskuri(self):
while self.choice == 'D':
self.tasks = input("Introduceti in lista task urile")
self.list_task.append(self.task)
self.choice = input("Doriti sa introduceti un alt task de la tastatura D/N: ")
return self.list_task
def
| true
|
3719bae1117fbf69bc94ca0464c9c8a05bcac6e1
|
Python
|
Litterman/EZClimate
|
/ezclimate/damage.py
|
UTF-8
| 15,898
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from abc import ABCMeta, abstractmethod
from ezclimate.damage_simulation import DamageSimulation
from ezclimate.forcing import Forcing
class Damage(object, metaclass=ABCMeta):
"""Abstract damage class for the EZ-Climate model.
Parameters
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
Attributes
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
"""
def __init__(self, tree, bau):
self.tree = tree
self.bau = bau
@abstractmethod
def average_mitigation(self):
"""The average_mitigation function should return a 1D array of the
average mitigation for every node in the period.
"""
pass
@abstractmethod
def damage_function(self):
"""The damage_function should return a 1D array of the damages for
every node in the period.
"""
pass
class DLWDamage(Damage):
"""Damage class for the EZ-Climate model. Provides the damages from emissions and mitigation outcomes.
Parameters
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
cons_growth : float
constant consumption growth rate
ghg_levels : ndarray or list
end GHG levels for each end scenario
Attributes
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
cons_growth : float
constant consumption growth rate
ghg_levels : ndarray or list
end GHG levels for each end scenario
dnum : int
number of simulated damage paths
d : ndarray
simulated damages
d_rcomb : ndarray
adjusted simulated damages for recombining tree
cum_forcings : ndarray
cumulative forcing interpolation coeffiecients, used to calculate forcing based mitigation
forcing : `Forcing` object
class for calculating cumulative forcing and GHG levels
damage_coefs : ndarray
interpolation coefficients used to calculate damages
"""
def __init__(self, tree, bau, cons_growth, ghg_levels, subinterval_len):
super(DLWDamage, self).__init__(tree, bau)
self.ghg_levels = ghg_levels
if isinstance(self.ghg_levels, list):
self.ghg_levels = np.array(self.ghg_levels)
self.cons_growth = cons_growth
self.dnum = len(ghg_levels)
self.subinterval_len = subinterval_len
self.cum_forcings = None
self.d = None
self.d_rcomb = None
self.emit_pct = None
self.damage_coefs = None
def _recombine_nodes(self):
"""Creating damage coefficients for recombining tree. The state reached by an up-down move is
separate from a down-up move because in general the two paths will lead to different degrees of
mitigation and therefore of GHG level. A 'recombining' tree is one in which the movement from
one state to the next through time is nonetheless such that an up move followed by a down move
leads to the same fragility.
"""
nperiods = self.tree.num_periods
sum_class = np.zeros(nperiods, dtype=int)
new_state = np.zeros([nperiods, self.tree.num_final_states], dtype=int)
temp_prob = self.tree.final_states_prob.copy()
self.d_rcomb = self.d.copy()
for old_state in range(self.tree.num_final_states):
temp = old_state
n = nperiods-2
d_class = 0
while n >= 0:
if temp >= 2**n:
temp -= 2**n
d_class += 1
n -= 1
sum_class[d_class] += 1
new_state[d_class, sum_class[d_class]-1] = old_state
sum_nodes = np.append(0, sum_class.cumsum())
prob_sum = np.array([self.tree.final_states_prob[sum_nodes[i]:sum_nodes[i+1]].sum() for i in range(len(sum_nodes)-1)])
for period in range(nperiods):
for k in range(self.dnum):
d_sum = np.zeros(nperiods)
old_state = 0
for d_class in range(nperiods):
d_sum[d_class] = (self.tree.final_states_prob[old_state:old_state+sum_class[d_class]] * self.d_rcomb[k, old_state:old_state+sum_class[d_class], period]).sum()
old_state += sum_class[d_class]
self.tree.final_states_prob[new_state[d_class, 0:sum_class[d_class]]] = temp_prob[0]
for d_class in range(nperiods):
self.d_rcomb[k, new_state[d_class, 0:sum_class[d_class]], period] = d_sum[d_class] / prob_sum[d_class]
self.tree.node_prob[-len(self.tree.final_states_prob):] = self.tree.final_states_prob
for p in range(1,nperiods-1):
nodes = self.tree.get_nodes_in_period(p)
for node in range(nodes[0], nodes[1]+1):
worst_end_state, best_end_state = self.tree.reachable_end_states(node, period=p)
self.tree.node_prob[node] = self.tree.final_states_prob[worst_end_state:best_end_state+1].sum()
def _damage_interpolation(self):
"""Create the interpolation coefficients used in `damage_function`."""
if self.d is None:
print("Importing stored damage simulation")
self.import_damages()
self._recombine_nodes()
if self.emit_pct is None:
bau_emission = self.bau.ghg_end - self.bau.ghg_start
self.emit_pct = 1.0 - (self.ghg_levels-self.bau.ghg_start) / bau_emission
self.damage_coefs = np.zeros((self.tree.num_final_states, self.tree.num_periods, self.dnum-1, self.dnum))
amat = np.ones((self.tree.num_periods, self.dnum, self.dnum))
bmat = np.ones((self.tree.num_periods, self.dnum))
self.damage_coefs[:, :, -1, -1] = self.d_rcomb[-1, :, :]
self.damage_coefs[:, :, -1, -2] = (self.d_rcomb[-2, :, :] - self.d_rcomb[-1, :, :]) / self.emit_pct[-2]
amat[:, 0, 0] = 2.0 * self.emit_pct[-2]
amat[:, 1:, 0] = self.emit_pct[:-1]**2
amat[:, 1:, 1] = self.emit_pct[:-1]
amat[:, 0, -1] = 0.0
for state in range(0, self.tree.num_final_states):
bmat[:, 0] = self.damage_coefs[state, :, -1, -2] * self.emit_pct[-2]
bmat[:, 1:] = self.d_rcomb[:-1, state, :].T
self.damage_coefs[state, :, 0] = np.linalg.solve(amat, bmat)
def import_damages(self, file_name="simulated_damages"):
"""Import saved simulated damages. File must be saved in 'data' directory
inside current working directory. Save imported values in `d`.
Parameters
----------
file_name : str, optional
name of file of saved simulated damages
Raises
------
IOError
If file does not exist.
"""
from ezclimate.tools import import_csv
try:
d = import_csv(file_name, ignore="#", header=False)
except IOError as e:
import sys
print(("Could not import simulated damages:\n\t{}".format(e)))
sys.exit(0)
n = self.tree.num_final_states
self.d = np.array([d[n*i:n*(i+1)] for i in range(0, self.dnum)])
self._damage_interpolation()
def damage_simulation(self, draws, peak_temp=9.0, disaster_tail=12.0, tip_on=True,
multi_tips=False, temp_map=1, temp_dist_params=None, maxh=100.0, save_simulation=True):
"""Initialization and simulation of damages, given by :mod:`ez_climate.DamageSimulation`.
Parameters
----------
draws : int
number of Monte Carlo draws
peak_temp : float, optional
tipping point parameter
disaster_tail : float, optional
curvature of tipping point
tip_on : bool, optional
flag that turns tipping points on or off
multi_tips : bool, optional
if to allow multiple tipping points in simulation
temp_map : int, optional
mapping from GHG to temperature
* 0: implies Pindyck displace gamma
* 1: implies Wagner-Weitzman normal
* 2: implies Roe-Baker
* 3: implies user-defined normal
* 4: implies user-defined gamma
temp_dist_params : ndarray or list, optional
if temp_map is either 3 or 4, user needs to define the distribution parameters
maxh : float, optional
time parameter from Pindyck which indicates the time it takes for temp to get half
way to its max value for a given level of ghg
cons_growth : float, optional
yearly growth in consumption
save_simulation : bool, optional
True if simulated values should be save, False otherwise
Returns
-------
ndarray
simulated damages
"""
ds = DamageSimulation(tree=self.tree, ghg_levels=self.ghg_levels, peak_temp=peak_temp,
disaster_tail=disaster_tail, tip_on=tip_on, temp_map=temp_map,
temp_dist_params=temp_dist_params, maxh=maxh, cons_growth=self.cons_growth)
self.ds = ds
print("Starting damage simulation..")
self.d = ds.simulate(draws, write_to_file=save_simulation, multiple_tipping_points=multi_tips)
print("Done!")
self._damage_interpolation()
return self.d
def _forcing_based_mitigation(self, forcing, period):
"""Calculation of mitigation based on forcing up to period. Interpolating between the forcing associated
with the constant degree of mitigation consistent with the damage simulation scenarios.
"""
p = period - 1
if forcing > self.cum_forcings[p][1]:
weight_on_sim2 = (self.cum_forcings[p][2] - forcing) / (self.cum_forcings[p][2] - self.cum_forcings[p][1])
weight_on_sim3 = 0
elif forcing > self.cum_forcings[p][0]:
weight_on_sim2 = (forcing - self.cum_forcings[p][0]) / (self.cum_forcings[p][1] - self.cum_forcings[p][0])
weight_on_sim3 = (self.cum_forcings[p][1] - forcing) / (self.cum_forcings[p][1] - self.cum_forcings[p][0])
else:
weight_on_sim2 = 0
weight_on_sim3 = 1.0 + (self.cum_forcings[p][0] - forcing) / self.cum_forcings[p][0]
return weight_on_sim2 * self.emit_pct[1] + weight_on_sim3*self.emit_pct[0]
def _forcing_init(self):
"""Initialize `Forcing` object and cum_forcings used in calculating the force mitigation up to a node."""
if self.emit_pct is None:
bau_emission = self.bau.ghg_end - self.bau.ghg_start
self.emit_pct = 1.0 - (self.ghg_levels-self.bau.ghg_start) / bau_emission
self.cum_forcings = np.zeros((self.tree.num_periods, self.dnum))
mitigation = np.ones((self.dnum, self.tree.num_decision_nodes)) * self.emit_pct[:, np.newaxis]
for i in range(0, self.dnum):
for n in range(1, self.tree.num_periods+1):
node = self.tree.get_node(n, 0)
self.cum_forcings[n-1, i] = Forcing.forcing_at_node(mitigation[i], node, self.tree,
self.bau, self.subinterval_len)
def average_mitigation_node(self, m, node, period=None):
"""Calculate the average mitigation until node.
Parameters
----------
m : ndarray or list
array of mitigation
node : int
node for which average mitigation is to be calculated for
period : int, optional
the period the node is in
Returns
-------
float
average mitigation
"""
if period == 0:
return 0
if period is None:
period = self.tree.get_period(node)
state = self.tree.get_state(node, period)
path = self.tree.get_path(node, period)
new_m = m[path[:-1]]
period_len = self.tree.decision_times[1:period+1] - self.tree.decision_times[:period]
bau_emissions = self.bau.emission_by_decisions[:period]
total_emission = np.dot(bau_emissions, period_len)
ave_mitigation = np.dot(new_m, bau_emissions*period_len)
return ave_mitigation / total_emission
def average_mitigation(self, m, period):
"""Calculate the average mitigation for all node in a period.
m : ndarray or list
array of mitigation
period : int
period to calculate average mitigation for
Returns
-------
ndarray
average mitigations
"""
nodes = self.tree.get_num_nodes_period(period)
ave_mitigation = np.zeros(nodes)
for i in range(nodes):
node = self.tree.get_node(period, i)
ave_mitigation[i] = self.average_mitigation_node(m, node, period)
return ave_mitigation
def _ghg_level_node(self, m, node):
return Forcing.ghg_level_at_node(m, node, self.tree, self.bau, self.subinterval_len)
def ghg_level_period(self, m, period=None, nodes=None):
"""Calculate the GHG levels corresponding to the given mitigation.
Need to provide either `period` or `nodes`.
Parameters
----------
m : ndarray or list
array of mitigation
period : int, optional
what period to calculate GHG levels for
nodes : ndarray or list, optional
the nodes to calculate GHG levels for
Returns
-------
ndarray
GHG levels
"""
if nodes is None and period is not None:
start_node, end_node = self.tree.get_nodes_in_period(period)
if period >= self.tree.num_periods:
add = end_node-start_node+1
start_node += add
end_node += add
nodes = np.array(list(range(start_node, end_node+1)))
if period is None and nodes is None:
raise ValueError("Need to give function either nodes or the period")
ghg_level = np.zeros(len(nodes))
for i in range(len(nodes)):
ghg_level[i] = self._ghg_level_node(m, nodes[i])
return ghg_level
def ghg_level(self, m, periods=None):
"""Calculate the GHG levels for more than one period.
Parameters
----------
m : ndarray or list
array of mitigation
periods : int, optional
number of periods to calculate GHG levels for
Returns
-------
ndarray
GHG levels
"""
if periods is None:
periods = self.tree.num_periods-1
if periods >= self.tree.num_periods:
ghg_level = np.zeros(self.tree.num_decision_nodes+self.tree.num_final_states)
else:
ghg_level = np.zeros(self.tree.num_decision_nodes)
for period in range(periods+1):
start_node, end_node = self.tree.get_nodes_in_period(period)
if period >= self.tree.num_periods:
add = end_node-start_node+1
start_node += add
end_node += add
nodes = np.array(list(range(start_node, end_node+1)))
ghg_level[nodes] = self.ghg_level_period(m, nodes=nodes)
return ghg_level
def _damage_function_node(self, m, node):
"""Calculate the damage at any given node, based on mitigation actions in `m`."""
if self.damage_coefs is None:
self._damage_interpolation()
if self.cum_forcings is None:
self._forcing_init()
if node == 0:
return 0.0
period = self.tree.get_period(node)
forcing, ghg_level = Forcing.forcing_and_ghg_at_node(m, node, self.tree, self.bau, self.subinterval_len, "both")
force_mitigation = self._forcing_based_mitigation(forcing, period)
ghg_extension = 1.0 / (1 + np.exp(0.05*(ghg_level-200)))
worst_end_state, best_end_state = self.tree.reachable_end_states(node, period=period)
probs = self.tree.final_states_prob[worst_end_state:best_end_state+1]
if force_mitigation < self.emit_pct[1]:
damage = (probs *(self.damage_coefs[worst_end_state:best_end_state+1, period-1, 1, 1] * force_mitigation \
+ self.damage_coefs[worst_end_state:best_end_state+1, period-1, 1, 2])).sum()
elif force_mitigation < self.emit_pct[0]:
damage = (probs * (self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 0] * force_mitigation**2 \
+ self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 1] * force_mitigation \
+ self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 2])).sum()
else:
damage = 0.0
i = 0
for state in range(worst_end_state, best_end_state+1):
if self.d_rcomb[0, state, period-1] > 1e-5:
deriv = 2.0 * self.damage_coefs[state, period-1, 0, 0]*self.emit_pct[0] \
+ self.damage_coefs[state, period-1, 0, 1]
decay_scale = deriv / (self.d_rcomb[0, state, period-1]*np.log(0.5))
dist = force_mitigation - self.emit_pct[0] + np.log(self.d_rcomb[0, state, period-1]) \
/ (np.log(0.5) * decay_scale)
damage += probs[i] * (0.5**(decay_scale*dist) * np.exp(-np.square(force_mitigation-self.emit_pct[0])/60.0))
i += 1
return (damage / probs.sum()) + ghg_extension
def damage_function(self, m, period):
"""Calculate the damage for every node in a period, based on mitigation actions `m`.
Parameters
----------
m : ndarray or list
array of mitigation
period : int
period to calculate damages for
Returns
-------
ndarray
damages
"""
nodes = self.tree.get_num_nodes_period(period)
damages = np.zeros(nodes)
for i in range(nodes):
node = self.tree.get_node(period, i)
damages[i] = self._damage_function_node(m, node)
return damages
| true
|
3fc148923dadc232841ce60b47405535a73dcbd2
|
Python
|
bgoonz/UsefulResourceRepo2.0
|
/MY_REPOS/Lambda-Resource-Static-Assets/1-projects/lambda/LambdaSQL/LambdaSQL-master/module1/buddymove_holidayiq.py
|
UTF-8
| 955
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
"""
Unit 3 Sprint 2 SQL Module 1
Part 2 Creating a Database
"""
import sqlite3 as sql
import pandas as pd
connection = sql.connect("buddymove_holidayiq.sqlite3")
curs = connection.cursor()
buddy = pd.read_csv(
"https://github.com/BrokenShell/DS-Unit-3-Sprint-2-SQL-and-Databases/raw/master/module1-introduction-to-sql/buddymove_holidayiq.csv"
)
buddy.to_sql("review", con=connection, index=False)
print("\nNumber of rows:", *curs.execute("SELECT COUNT(*) FROM review;").fetchone())
nature_and_shopping = """
SELECT COUNT(*) FROM review
WHERE Nature > 100
AND Shopping > 100;
"""
print("\nBoth Nature & Shopping > 100:", *curs.execute(nature_and_shopping).fetchone())
print("\nAverages (Stretch Goal):")
cols = ["Sports", "Religious", "Nature", "Theatre", "Shopping", "Picnic"]
print(
"\n".join(
[
f"{col}: {curs.execute(f'SELECT AVG({col}) FROM review;').fetchone()[0]:.2f}"
for col in cols
]
)
)
| true
|
0e146ff1087daf9712c60d45b4d823cd7fb5311d
|
Python
|
drazenzen/pybreak
|
/pybreak.py
|
UTF-8
| 15,420
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import json
import random
import argparse
try:
from tkinter import * # noqa py3
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
except ImportError:
try:
from Tkinter import * # noqa py2
import ttk
import tkFileDialog as filedialog
import tkMessageBox as messagebox
except ImportError as e:
sys.exit(e)
# TODO pybreak:
# + Nicer handling MainFrame entry_interval value
# + Save configuration file in HOME config directory
# + Add tests
__version__ = '0.1'
__doc__ = """
Relax yourself away from computer.
"""
INTERVAL = 1200 # default (in seconds)
def version():
"""Returns Python, Tkinter and program version."""
return 'pybreak: {}\nPython: {}\nTkinter: {}'.format(
__version__, '.'.join(map(str, sys.version_info[:3])), TclVersion)
def debug_info(*args):
"""Prints debug information on console"""
if DEBUG:
if args:
print(args)
else:
print(version())
def load_image(img_path):
"""Tries to load PhotoImage from img_path.
Returns PhotoImage or None on loading failure.
"""
img = None
if os.path.exists(img_path):
try:
img = PhotoImage(file=img_path)
except TclError as e:
msg = "Image format not supported."
debug_info(msg, e)
return img
def subsample_image(image, max_width, max_height):
"""
Subsamples image to a minimal value below max_width and max_height
arguments, if image width or height are larger than them.
If they are not than just return the same image.
"""
w, h = image.width(), image.height()
debug_info(w, h, max_width, max_height)
sx = sy = 0
while w > max_width:
sx += 1
w /= 2
if sx > 0:
image = image.subsample(sx)
while h > max_height:
sy += 1
h /= 2
if sy > 0:
image = image.subsample(sy)
debug_info(w, h, sx, sy)
return image
class Config:
"""Load and save program configuration.
JSON conf file will be saved in same directory as module itself.
"""
filename = '{}.{}'.format(__file__.split('.')[0], 'json')
def __init__(self, *args, **kwargs):
self.data = {}
self.load()
def load(self):
"""Load configuration, create it if necessary."""
if os.path.exists(self.filename):
with open(self.filename, "rb") as f:
try:
data = f.read().decode()
self.data = json.loads(data)
except ValueError:
self.create()
else:
self.create()
def save(self):
"""Save configuration."""
with open(self.filename, "w") as f:
json.dump(self.data, f, indent=4)
def create(self):
"""Create default configuration."""
self.data = {}
self.interval = INTERVAL
self.data['interval'] = self.interval
self.data['img_path'] = ""
self.save()
class MainFrame(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self, parent, padding=(10, 5, 10, 5))
self.parent = parent
self.config = Config()
self.init_str_vars()
self.init_ui()
self.running = False
self.passed = 0
debug_info(self.config.data)
def is_int(self, value):
"""Validates integer input."""
debug_info('interval input={}'.format(value))
try:
if value:
int(value)
except ValueError:
return False
return True
def init_str_vars(self):
"""Set initial state variables."""
self.interval = StringVar(value=self.config.data['interval'])
self.img_path = StringVar(value=self.config.data['img_path'])
self.text_running = StringVar(value="Run")
self.text_passed = StringVar(value="00:00")
self.text_status = StringVar(value="Ready.")
def init_ui(self):
"""Initialize main frame."""
self.parent.title("pybreak")
self.grid(column=0, row=0, sticky=(N, W, E, S))
self.parent.grid_columnconfigure(0, weight=1)
self.parent.grid_rowconfigure(0, weight=1)
# Interval
ttk.Label(self, text="Interval:", anchor=E).grid(
column=0, row=0, sticky=(W, E), padx=5, pady=5)
entry_interval_vcmd = self.register(self.is_int)
self.entry_interval = ttk.Entry(
self, textvariable=self.interval, validate='all',
validatecommand=(entry_interval_vcmd, '%P'))
self.entry_interval.bind('<FocusOut>', self.on_entry_interval)
self.entry_interval.grid(column=1, row=0, sticky=(W, E))
ttk.Label(self, text="in seconds", anchor=W).grid(
column=2, row=0, sticky=(W, E), padx=5)
# Image thumbnail
ttk.Label(self, text="Image:").grid(
column=0, row=1, sticky=(N, E), padx=5, pady=5)
self.thumbnail = ttk.Label(self, compound=CENTER)
self.set_thumbnail()
self.thumbnail.grid(column=1, row=1, sticky=N, padx=5, pady=5)
frm_img_btns = ttk.Frame(self)
ttk.Button(frm_img_btns, text='Choose...',
command=self.on_image_select).pack()
ttk.Button(frm_img_btns, text='Clear',
command=self.on_image_clear).pack()
frm_img_btns.grid(column=2, row=1, sticky=(N, W), padx=5, pady=5)
# Counter
self.label_time = ttk.Label(
self, textvariable=self.text_passed, font="-weight bold")
self.label_time.grid(column=0, row=2, sticky=E)
# Controls
frm_btns = ttk.Frame(self)
self.btn_run = ttk.Button(frm_btns, textvariable=self.text_running)
self.btn_run.focus_set()
self.btn_run.pack(side=LEFT, expand=True, fill=X)
self.btn_run.bind("<1>", self.on_run)
ttk.Button(frm_btns, text="Preview", command=self.on_preview).pack(
side=LEFT, expand=True, fill=X)
ttk.Button(frm_btns, text="Info", command=self.on_info).pack(
side=LEFT, expand=True, fill=X)
ttk.Button(frm_btns, text="Quit", command=self.on_quit).pack(
side=LEFT, expand=True, fill=X)
frm_btns.grid(column=0, row=8, columnspan=3, sticky=(E, W), ipady=5)
# Status bar
self.status = ttk.Label(self, textvariable=self.text_status, anchor=W)
self.status.grid(column=0, row=9, columnspan=3, sticky=W)
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(1, weight=1)
def on_entry_interval(self, event):
"""Checks and sets interval value."""
try:
value = int(self.interval.get())
except ValueError:
value = INTERVAL
self.interval.set(value)
self.config.data['interval'] = value
def on_save(self):
"""Save program configuration."""
try:
interval = int(self.interval.get())
except ValueError as e:
debug_info(e)
interval = INTERVAL
self.config.data['interval'] = interval
self.config.data['img_path'] = self.img_path.get()
self.config.save()
self.interval.set(interval)
def on_run(self, event):
"""Starts or stops work loop."""
self.running = not self.running
if self.running:
self.text_running.set("Stop")
self.text_status.set("Running...")
self.winfo_toplevel().iconify()
self.run()
else:
self.stop()
def run(self):
"""Work loop.
Runs every second, shows RelaxFrame if work is done."""
if self.running:
if self.passed >= self.config.data['interval']:
self.stop()
RelaxFrame(self, self.img_path.get())
self.hide()
else:
self.passed += 1
self.parent.after(1000, self.run)
self.text_passed.set("{}:{}".format(
str(int(self.passed / 60)).zfill(2),
str(self.passed % 60).zfill(2)))
def stop(self):
"""Reset work loop."""
self.passed = 0
self.running = False
self.text_running.set("Run")
self.text_status.set("Ready.")
def on_image_select(self):
"""Opens file dialog to select relaxing image."""
options = {}
if self.img_path.get():
options['initialdir'] = os.path.dirname(self.img_path.get())
else:
options['initialdir'] = os.path.expanduser("~")
options['defaultextension'] = ".png"
options['filetypes'] = [
('PNG', '*.png'), ('GIF', '*.gif'), ('All files', '.*')]
options['parent'] = self.parent
options['title'] = 'Choose a relax image...'
filename = filedialog.askopenfilename(**options)
if filename:
self.img_path.set(filename)
if not self.set_thumbnail():
self.text_status.set("Image format not supported.")
else:
self.text_status.set("Image loaded.")
def set_thumbnail(self):
"""Tries to load image and set image thumbnail."""
img = load_image(self.img_path.get())
if img:
img = subsample_image(img, 160, 160)
self.thumbnail.config(text='')
self.thumbnail.config(image=img)
self.thumbnail.img = img
return True
else:
self.thumbnail.config(image='')
self.thumbnail.config(text="Default relax frame")
self.thumbnail.img = None
return False
def on_image_clear(self):
self.img_path.set("")
self.thumbnail.config(image='')
self.thumbnail.config(text="Default relax frame")
self.thumbnail.img = None
self.text_status.set("Image cleared. Using default Relax frame.")
def on_preview(self):
"""Preview (test) RelaxFrame."""
RelaxFrame(self, self.img_path.get())
def hide(self):
"""Hides itself when work loop starts."""
self.parent.withdraw()
def show(self):
"""Shows itself."""
self.parent.update()
self.parent.deiconify()
def on_info(self):
"""Shows program info."""
prg_detail = "Relax yourself away from computer.\n"
img_detail = "Supported image formats: {}\n"
if TkVersion >= 8.5:
formats = "PNG, GIF"
else:
formats = "GIF"
img_detail = img_detail.format(formats)
tech_detail = version()
detail = '\n'.join([prg_detail, img_detail, tech_detail])
messagebox.showinfo(
'About', 'pybreak',
detail=detail)
def on_quit(self):
"""Saves configuration and exists program."""
self.on_save()
self.quit()
class RelaxFrame(Toplevel):
"""Relax frame."""
def __init__(self, caller, img_path):
"""Give frame a focus and put it on top of all other windows.
Hides caller frame.
"""
self.caller = caller
self.img_path = img_path
self.img = None
Toplevel.__init__(self)
self.init_ui()
self.focus()
self.focus_set()
self.lift()
self.attributes('-topmost', True)
self.attributes('-topmost', False)
self.caller.hide()
def init_ui(self):
"""Tries to load an image, else shows randomized ellipses."""
self.config(bg="black")
self.w, self.h = 640, 480
self.c = Canvas(self, bg="black", height=self.h, width=self.w)
self.font = '-*-fixed-medium-*-normal-*-9-*-*-*-*-*-*-*'
self.colors = [
'dark sea green', 'sea green', 'medium sea green',
'light sea green', 'pale green', 'spring green', 'lawn green',
'medium spring green', 'green yellow', 'lime green',
'yellow green', 'forest green', 'olive drab', 'dark khaki',
'khaki', 'pale goldenrod', 'light goldenrod yellow'
]
self.img = load_image(self.img_path)
if self.img:
self.img = subsample_image(self.img, self.w, self.h)
self.c.create_image(0, 0, anchor=NW, image=self.img)
self.w, self.h = self.img.width(), self.img.height()
self.c.config(width=self.w, height=self.h)
else:
self.ellipses()
self.c.create_text(
self.w - 80, self.h - 20, anchor=SW,
font=self.font, text="ESC to exit...", fill="green")
self.c.pack()
self.bind('<Escape>', self.on_close)
self.protocol('WM_DELETE_WINDOW', self.on_close)
def on_close(self, *args):
"""Calls caller show method and close itself."""
self.caller.show()
self.destroy()
def ellipses(self):
"""Draws grid, text message and random ellipses on canvas."""
# grid
for i in range(0, 640, 40):
# x lines
self.c.create_line(0, i, 640, i, fill="gray10")
self.c.create_text(
2, i, text=str(i), fill="gray30", anchor=SW, font=self.font)
# y lines
self.c.create_line(i, 0, i, 480, fill="gray10")
self.c.create_text(
i + 2, 12, text=str(i), fill="gray30", anchor=SW, font=self.font)
# random ellipses
offset = 60
for i in range(1, 10):
x1 = random.randint(offset, self.w - offset)
y1 = random.randint(offset, self.h - offset)
x2 = random.randint(offset, self.w - offset)
y2 = random.randint(offset, self.h - offset)
color = "{}".format(
self.colors[random.randint(0, len(self.colors) - 1)])
self.c.create_oval(x1, y1, x2, y2, fill=color, outline="")
# text
self.c.create_text(
60, 80, anchor=W, text="Relax for a bit or two...", fill="white")
def gui():
"""Creates main root Tk window and starts mainloop."""
root = Tk()
root.resizable(False, False)
if os.path.exists('images'): # taskbar/window list images
if os.name == 'nt':
root.wm_iconbitmap(
default=os.path.join(os.getcwd(), 'images/pybreak.ico'))
# Windows hack to show icon in task bar
import ctypes
appid = 'k2.util.pybreak.0.1'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)
elif TkVersion >= 8.5:
file = 'pybreak.png' if TkVersion >= 8.6 else 'pybreak.gif'
icons = [PhotoImage(file=os.path.join(os.getcwd(), 'images', file))]
root.tk.call('wm', 'iconphoto', str(root), "-default", *icons)
MainFrame(root)
root.mainloop()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Relax yourself away from computer.")
parser.add_argument(
"-v", "--version", action="store_true",
help="prints program, Python and Tkinter versions")
parser.add_argument(
"-d", "--debug", action="store_true",
help="prints debug information on console")
args = parser.parse_args()
DEBUG = args.debug
if args.version:
print(version())
else:
debug_info()
gui()
| true
|
68c9fff6349e988beb4486e5303227d9bdda537e
|
Python
|
JonhFiv5/aulas_python
|
/aula_32_1_teste.py
|
UTF-8
| 86
| 3.234375
| 3
|
[] |
no_license
|
def myfunc(n):
return lambda a: a * n
mydoubler = myfunc(2)
print(mydoubler(5))
| true
|
43baae96e493cdea8119717468fb39c4e300dd8a
|
Python
|
xvanov/brilliant
|
/daily/stand_slide_fall.py
|
UTF-8
| 1,210
| 3.09375
| 3
|
[] |
no_license
|
class problemMetaInfo():
def __init__(self):
self.url = 'https://brilliant.org/daily-problems/table-cloth-pull/'
self.area = 'Science and Engineering'
self.featured_course = 'Classic Mechanics'
self.title = 'Stand, Slide, or Fall'
self.difficulty = 3 # from 1 to 5
self.start = '7:41'
self.end = '8:14'
self.time = '0:33'
self.correct = True
class Problem():
def __init__(self):
self.mu = 0.5 # coefficient of friction
self.r = 0.02 # radius [m]
self.h = 0.05 # height of center of mass [m]
self.g = -9.8 # [m/s^2]
def solve(self):
'''
Fg = m*g
Fn = -Fg
Ff = mu*Fn = -mu*m*g
Fp = m*a
Tp = m*a*h
Tn = -m*g*w
slide condition: a >= -mu*g
topple condition: a>= -g*w/h
'''
a_slide_min = -self.mu*self.g
a_topple_min = -self.g*self.r/self.h
print('Slide min acc = ', a_slide_min)
print('Topple min acc = ', a_topple_min)
solution = None
return solution
if __name__ == '__main__':
p = Problem()
solution = p.solve()
print(f'problem solution: \n{solution}')
| true
|
6703371d0d21ac3dbed9b9decf0f38eeb8e0b64c
|
Python
|
wmytch/IntroToAlgorithms
|
/tools/tools.py
|
UTF-8
| 1,502
| 3.140625
| 3
|
[] |
no_license
|
import time
import random
class AlgorithmTools:
def __init__(self):
self.totalTime=0
def getSortedList(self,rangeStart,rangeEnd,step=1):
return [x for x in range(rangeStart,rangeEnd,step)]
def getUnSoredList(self,rangeStart,rangeEnd,length,step=1):
return random.sample(range(rangeStart,rangeEnd,step),length)
def getRandomNum(self,startNum,endNum):
return random.randrange(startNum,endNum)
def listSortTime(self,sortFunc,iterNum,length,*argList):
for i in range(iterNum):
unsortedList=self.getUnSortedList(0,length)
startTime=time.time()
sortFunc(unsortedList,*argList)
endTime=time.time()
totalTime+=endTime-startTime
return totalTime
def listSearchTime(self,searchFunc,iterNum,length,*argList):
sortedList=self.getSortedList(0,length)
for i in range(iterNum):
seekNum=self.getRandomNum(0,length-1)
startTime=time.time()
searchFunc(sortedList,seekNum,*argList)
endTime=time.time()
totalTime+=endTime-startTime
return totalTime
def FindTime(self,func,iterNum,length,*argList):
for i in range(iterNum):
unsortedList=self.getUnSortedList(-length,length,length)
startTime=time.time()
func(unsortedList,*argList)
endTime=time.time()
totalTime+=endTime-startTime
return totalTime
| true
|
0494c78b9869a400c158b266641993505b1c4c35
|
Python
|
DianaTs1/SC106A-Assignments
|
/Python-SC106A-Assignment3-main/forestfire.py
|
UTF-8
| 681
| 3.109375
| 3
|
[] |
no_license
|
from simpleimage import SimpleImage
INTENSITY_THRESHOLD = 1.05
def highlight_fires(filename):
image = SimpleImage(filename)
for pixel in image:
average = (pixel.red + pixel.blue + pixel.green) // 3
if pixel.red >= average * INTENSITY_THRESHOLD:
pixel.red = 255
pixel.green = 0
pixel.blue = 0
else:
pixel.green = pixel.blue = pixel.red = average
return image
def main():
original_fire = SimpleImage('images/greenland-fire.png')
original_fire.show()
highlighted_fire = highlight_fires('images/greenland-fire.png')
highlighted_fire.show()
if __name__ == '__main__':
main()
| true
|
60729eda7c7ef67d0b4ae21c60c093a929f225b7
|
Python
|
PGScatalog/PGS_Catalog
|
/release/scripts/run_copy_scoring_files.py
|
UTF-8
| 3,187
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import os
import shutil
import argparse
from release.scripts.CopyScoringFiles import CopyScoringFiles
from release.scripts.CopyHarmonizedScoringFilesPOS import CopyHarmonizedScoringFilesPOS
def copy_scoring_files(new_ftp_dir,staged_scores_dir,scores_dir,md5_sql_filepath):
print("\n#### Copy the new formatted scoring files ####")
pgs_scoring_files = CopyScoringFiles(new_ftp_dir,staged_scores_dir,scores_dir,md5_sql_filepath)
pgs_scoring_files.get_previous_release()
pgs_scoring_files.get_list_of_scores()
pgs_scoring_files.copy_scoring_files_to_production()
pgs_scoring_files.copy_scoring_files_to_metadata()
def copy_hmpos_scoring_files(new_ftp_dir,staged_scores_dir,scores_dir,md5_sql_filepath):
print("\n#### Copy the new harmonized position scoring files ####")
pgs_harmonized_files = CopyHarmonizedScoringFilesPOS(new_ftp_dir,staged_scores_dir,scores_dir,md5_sql_filepath)
pgs_harmonized_files.copy_harmonized_files_to_production()
pgs_harmonized_files.copy_harmonized_files_to_metadata()
def get_new_release_date(release_date_file):
""""
Generic method to retrieve the new release date, stored in a temporary file
"""
new_release_date = ''
try:
with open(release_date_file) as f:
new_release_date = f.readline()
except:
print(f"Can't open the file '{release_file}'")
exit()
return new_release_date
def main():
argparser = argparse.ArgumentParser(description='Script to copy the scoring files and harmonized scoring files to the new FTP.')
argparser.add_argument("--new_ftp_dir", type=str, help='The path to the data directory (only containing the metadata)', required=True)
argparser.add_argument("--staged_scores_dir", type=str, help='The path to the staged scoring files directory', required=True)
argparser.add_argument("--scores_dir", type=str, help='The path to the scoring files directory (Production)', required=False)
argparser.add_argument("--hm_staged_scores_dir", type=str, help='The path to the harmonized Position staged files directory', required=True)
argparser.add_argument("--hm_scores_dir", type=str, help='The path to the harmonized scoring files directory (Production)', required=False)
args = argparser.parse_args()
new_ftp_dir = args.new_ftp_dir
staged_scores_dir = args.staged_scores_dir
scores_dir = args.scores_dir
hm_staged_scores_dir = args.hm_staged_scores_dir
hm_scores_dir = args.hm_scores_dir
release_date_file = f'{new_ftp_dir}/release_date.txt'
new_release_date = get_new_release_date(release_date_file)
md5_sql_filename = f'scores_md5_{new_release_date}.sql'
md5_sql_filepath = f'{new_ftp_dir}/{md5_sql_filename}'
copy_scoring_files(new_ftp_dir,staged_scores_dir,scores_dir,md5_sql_filepath)
copy_hmpos_scoring_files(new_ftp_dir,hm_staged_scores_dir,hm_scores_dir,md5_sql_filepath)
# Move/remove temporary files
if os.path.isfile(release_date_file):
os.remove(release_date_file)
if os.path.isfile(md5_sql_filepath):
shutil.move(md5_sql_filepath, f'{new_ftp_dir}/../{md5_sql_filename}')
if __name__== "__main__":
main()
| true
|
836e0e11372f05720f3f980e083b481fea8e2dc0
|
Python
|
kylebejel/blood-analysis
|
/blood-analysis.py
|
UTF-8
| 1,519
| 3.3125
| 3
|
[] |
no_license
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.filterwarnings('ignore')
#import necessary data as panda dataframe
train = pd.read_csv('blood-train.csv')
test = pd.read_csv('blood-test.csv')
#rename the unnamed column
train.rename(columns={"Unnamed: 0" : "Donor_id"}, inplace=True)
test.rename(columns={"Unnamed: 0" : "Donor_id"}, inplace=True)
#view dataframe info to ensure there are no missing values
train.info()
test.info()
#calculate correlation for test and train
train_corr = train.corr()
test_corr = test.corr()
#set up heatmap to view the correlations between the features
train_map = plt.figure(1)
sns.heatmap(train_corr)
test_map = plt.figure(2)
sns.heatmap(test_corr)
#put needed rows in to X_train and y_train
X_train = train.iloc[:, [1,2,3,4]].values
y_train = train.iloc[:, -1].values
print(X_train)
print(y_train)
#assign X_test
X_test = test.iloc[:,[1,2,3,4]].values
print(X_test)
#assign scaler variable
Scaler = StandardScaler()
#fit scaler to X_train and X_test
X_train = Scaler.fit_transform(X_train)
X_test = Scaler.fit_transform(X_test)
#use random forest classifier (fit model and print accuracy)
rand_forest = RandomForestClassifier(random_state=0)
rand_forest.fit(X_train, y_train)
acc = rand_forest.score(X_train, y_train)
print(acc)
#make predictions
y_pred = rand_forest.predict(X_test)
print(y_pred)
#show plot
plt.show()
| true
|
bc5111aaeb90e09e833563e93a2c4d90a9c4bfce
|
Python
|
prakritidev/bitcoincharts
|
/chart.py
|
UTF-8
| 797
| 2.71875
| 3
|
[] |
no_license
|
import socket
import sqlite3
import json
db = sqlite3.connect("chart.sqlite")
cur = db.cursor()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("bitcoincharts.com", 27007))
try:
started = False
data = []
while True:
d = s.recv(1)
if d == "{":
started = True
if started:
data.append(d)
if d == "}":
started = False
jdata = json.loads("".join(data))
cur.execute("INSERT INTO chart (timestamp, price, volume, currency, tid, symbol) VALUES (?, ?, ?, ?, ?, ?)", (jdata['timestamp'], jdata['price'], jdata['volume'], jdata['currency'], jdata['tid'], jdata['symbol']))
db.commit()
print "".join(data)
data = []
except KeyboardInterrupt:
pass
| true
|
5956674e6c0958c4a45141b225868fdd8661624c
|
Python
|
vinnihoke/ca-sprint
|
/cpu.py
|
UTF-8
| 9,557
| 3.609375
| 4
|
[] |
no_license
|
"""CPU functionality."""
import sys
class CPU:
"""Main CPU class."""
'''
TERMINOLOGY
* `SP`: Stack pointer. Address 244 if stack is empty
* `PC`: Program Counter, address of the currently executing instruction
* `IR`: Instruction Register, contains a copy of the currently executing instruction
* `MAR`: Memory Address Register, holds the memory address we're reading or writing
* `MDR`: Memory Data Register, holds the value to write or the value just read
* `FL`: Flags, see below
'''
def __init__(self):
"""Construct a new CPU."""
self.ram = [0] * 256 # Bytes of memory
self.reg = [0] * 8 # Registers
self.pc = 0 # Program Counter
self.sp = self.reg[7] # Stack pointer. Address 244 if stack is empty
self.reg[7] = 0xF4
self.greaterThan = 0
self.lessThan = 0
self.equals = 0
self.running = False
self.operand_a = 0
self.operand_b = 0
self.branch_table = {
0b00000001: self.HLT,
0b10000010: self.LDI,
0b01000111: self.PRN,
0b10100010: self.MUL,
0b01000101: self.PUSH,
0b01000110: self.POP,
0b01010000: self.CALL,
0b00010001: self.RET,
0b10100000: self.ADD,
0b10000100: self.ST,
0b01010100: self.JMP,
0b10100111: self.CMP,
0b01010110: self.JNE,
0b01010101: self.JEQ,
0b01001000: self.PRA,
0b01100101: self.INC,
0b01100101: self.DEC,
}
def INC(self):
'''
*This is an instruction handled by the ALU.*
`INC register`
Increment (add 1 to) the value in the given register.
'''
self.alu("INC", self.operand_a, self.operand_b)
self.pc += 2
def DEC(self):
'''
*This is an instruction handled by the ALU.*
`DEC register`
Decrement (subtract 1 from) the value in the given register.
'''
self.alu("INC", self.operand_a, self.operand_b)
self.pc += 2
def PRA(self):
'''
Print alpha character value stored in the given register.
Print to the console the ASCII character corresponding to the value in the
register.
'''
print(ord(self.operand_a))
def JMP(self):
'''
Jump to the address stored in the given register.
Set the `PC` to the address stored in the given register.
'''
address = self.reg[self.operand_a]
self.pc = address
def JNE(self):
'''
If `E` flag is clear (false, 0), jump to the address stored in the given
register.
'''
if not self.equals:
address = self.reg[self.operand_a]
self.pc = address
else:
self.pc += 2
def JEQ(self):
'''
If `equal` flag is set (true), jump to the address stored in the given register.
'''
if self.equals:
address = self.reg[self.operand_a]
self.pc = address
else:
self.pc += 2
def ST(self):
'''
Store value in registerB in the address stored in registerA.
This opcode writes to memory.
'''
# Value of registerB
value = self.reg[self.operand_b]
# Address stored in registerA
address = self.reg[self.operand_a]
# Write to memory
self.ram[address] = value
def ADD(self):
'''
*This is an instruction handled by the ALU.*
`ADD registerA registerB`
Add the value in two registers and store the result in registerA.
'''
self.alu("ADD", self.operand_a, self.operand_b)
self.pc += 3
def RET(self):
'''
Pop the value from the top of the stack and store it in the `PC`.
'''
next_instruction = self.ram[self.sp]
self.sp += 1
self.pc = next_instruction
def CALL(self):
'''
Calls a subroutine (function) at the address stored in the register.
1. The address of the ***instruction*** _directly after_ `CALL` is
pushed onto the stack. This allows us to return to where we left off when the subroutine finishes executing.
2. The PC is set to the address stored in the given register. We jump to that location in RAM and execute the first instruction in the subroutine. The PC can move forward or backwards from its current location.
'''
# Get address to the instruction directy after CALL
next_instruction = self.pc + 2
# Push to stack
self.sp -= 1
self.ram[self.sp] = next_instruction
# PC is set to the address stored in the given register
self.pc = self.reg[self.operand_a]
def POP(self):
'''
Pop the value at the top of the stack into the given register.
1. Copy the value from the address pointed to by `SP` to the given register.
2. Increment `SP`.
'''
# Store the current value
value = self.ram[self.sp]
self.reg[self.operand_a] = value
self.sp += 1
self.pc += 2
def PUSH(self):
'''
Push the value in the given register on the stack.
1. Decrement the `SP`.
2. Copy the value in the given register to the address pointed to by
`SP`.
'''
# Decrement SP
self.sp -= 1
# Push the value in the given register on the stack.
value = self.reg[self.operand_a]
self.ram[self.sp] = value
self.pc += 2
def HLT(self):
self.running = False
def LDI(self):
'''
Set the value of a register to an integer.
'''
self.reg[self.operand_a] = self.operand_b
self.pc += 3
def PRN(self):
'''
Print numeric value stored in the given register.
Print to the console the decimal integer value that is stored in the given
register.
'''
print(self.reg[self.operand_a])
self.pc += 2
def MUL(self):
'''
Multiply the values in two registers together and store the result in registerA.
'''
self.alu("MUL", self.operand_a, self.operand_b)
self.pc += 3
def CMP(self):
'''
Compare the values in two registers.
* If they are equal, set the Equal `E` flag to 1, otherwise set it to 0.
* If registerA is less than registerB, set the Less-than `L` flag to 1,
otherwise set it to 0.
* If registerA is greater than registerB, set the Greater-than `G` flag
to 1, otherwise set it to 0.
'''
self.alu("CMP", self.operand_a, self.operand_b)
self.pc += 3
def load(self, file):
"""Load a program into memory."""
address = 0
try:
with open(file, 'r') as reader:
# read and print the entire file line by line
for line in reader:
line_arr = line.split()
# if a binary string, store in ram
for word in line_arr:
try:
instruction = int(word, 2)
self.ram[address] = instruction
address += 1
except ValueError:
continue
except IOError:
print('Please specify a valid file name')
def alu(self, op, reg_a, reg_b):
"""ALU operations."""
if op == "ADD":
self.reg[reg_a] += self.reg[reg_b]
elif op == "MUL":
self.reg[reg_a] *= self.reg[reg_b]
elif op == "CMP":
# Reset counters
self.equals = 0
self.lessThan = 0
self.greaterThan = 0
if self.reg[reg_a] == self.reg[reg_b]:
self.equals += 1
elif self.reg[reg_a] < self.reg[reg_b]:
self.lessThan += 1
elif self.reg[reg_a] > self.reg[reg_b]:
self.greaterThan += 1
elif op == "INC":
self.reg[reg_a] += 1
elif op == "DEC":
self.reg[reg_a] -= 1
else:
raise Exception("Unsupported ALU operation")
def trace(self):
"""
Handy function to print out the CPU state. You might want to call this
from run() if you need help debugging.
"""
print(f"TRACE: %02X | %02X %02X %02X |" % (
self.pc,
#self.fl,
#self.ie,
self.ram_read(self.pc),
self.ram_read(self.pc + 1),
self.ram_read(self.pc + 2)
), end='')
for i in range(8):
print(" %02X" % self.reg[i], end='')
print()
def run(self):
"""Run the CPU."""
self.running = True
while self.running:
ir = self.ram[self.pc] # Instruction Register
self.operand_a = self.ram_read(self.pc + 1)
self.operand_b = self.ram_read(self.pc + 2)
self.branch_table[ir]()
def ram_read(self, mar):
# MAR: Memory Address Register contains the address that is being read or written to
return self.ram[mar]
def ram_write(self, mdr, address):
# MDR: Memory Data Register contains the data that was read or the data to write
self.ram[address] = mdr
| true
|
9ed4f8b81b8d744d34e3ae5536e6946c95b5f876
|
Python
|
t12343/Quoridor
|
/quoridor/data.py
|
UTF-8
| 1,772
| 3.203125
| 3
|
[] |
no_license
|
WIDTH, HEIGHT = 800, 850 # The dimensions of the window
ROWS, COLS = 9, 9 # The amount of rows and columns on the board
GAP = (WIDTH / 9) / ROWS - 1 # THE distance between two adjacent squares
SQUARE_SIZE = (WIDTH - GAP * 8) / ROWS # The size of a square
SQUARE_DISTANCE = SQUARE_SIZE + GAP # The distance between one corner of a square to the same corner of an adjacent square
WALLS_AT_THE_START = 10 # The amount of walls each player begins with
RED_START_POS = COLS - 1, 0
BLUE_START_POS = COLS - 1, (ROWS - 1) * 2
QUORIDOR = 'quoridor'
FONT = 'arial'
FONT_SIZE = 20
DEPTH = 1
TITLE_SIZE = 100
HOME_TEXT1 = '1 player'
HOME_TEXT2 = '2 players'
HOME_TEXT3 = 'instructions'
TEXT_SIZE = 30
WINNER_TEXT1 = 'The '
WINNER_TEXT2 = ' player is the winner!'
WINNER_SIZE = 50
RETURN_TEXT = 'return to homepage'
INSTRUCTIONS = """In quoridor, your goal is to get your pawn to the other side of\n
the board before your opponent. There are 2 possible actions you can\n
make: move your pawn one block to any side, or place a wall. a pawn\n
can't move through a wall. To move a pawn, tou need to click on a\n
square adjacent to the pawn's square. When your mouse is above a gap\n
in the board, you will see the wall you can place there, if it is\n
possible according to these rules: all of the wall is within the board,\n
walls can not overlap and walls can not cross each other. to permanently\n
place a wall, left click when you see the wall. to reset the board and\n
the game, click 'r'."""
# RGB
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BROWN = (175, 100, 0)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
| true
|
77f1a4c00306cc597072919a254dd9cb32107f5d
|
Python
|
JackW987/opencv_practice
|
/L18.py
|
UTF-8
| 1,537
| 2.8125
| 3
|
[] |
no_license
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
# img = cv.imread('img-1.jpg')
# hist = cv.calcHist([img],[0],None,[256],[0,256])
# hist_numpy,bins = np.histogram(img.ravel(),256,[0,256])
# plt.subplot(121),plt.imshow(img),plt.title('original')
# plt.subplot(122),plt.hist(img.ravel(),256,[0,256]),plt.title('x_y_img')
# plt.show()
# img = cv.imread('white.png')
# cols,rows = img.shape[:2]
# img[0:rows,0:150] = [255,0,0]
# img[0:rows,150:300] = [0,255,0]
# img[0:rows,300:450] = [0,0,255]
# color = ('b','g','r')
# for i,col in enumerate(color):
# histr = cv.calcHist([img],[i],None,[256],[0,256])
# plt.plot(histr,color = col)
# plt.xlim([0,256])
# plt.show()
# img = cv.imread('img-2.jpg')
# color = ('b','g','r')
# for i,col in enumerate(color):
# histr = cv.calcHist([img],[i],None,[256],[0,256])
# print(i)
# print(col)
# plt.plot(histr,color = col)
# plt.xlim([0,256])
# plt.show()
img = cv.imread('img-2.jpg',0)
# create a mask
mask = np.zeros(img.shape[:2], np.uint8)
mask[100:1200, 100:1200] = 255
masked_img = cv.bitwise_and(img,img,mask = mask)
# ่ฎก็ฎๆฉ็ ๅบๅๅ้ๆฉ็ ๅบๅ็็ดๆนๅพ
# ๆฃๆฅไฝไธบๆฉ็ ็็ฌฌไธไธชๅๆฐ
hist_full = cv.calcHist([img],[0],None,[256],[0,256])
hist_mask = cv.calcHist([img],[0],mask,[256],[0,256])
plt.subplot(221), plt.imshow(img, 'gray')
plt.subplot(222), plt.imshow(mask,'gray')
plt.subplot(223), plt.imshow(masked_img, 'gray')
plt.subplot(224), plt.plot(hist_full), plt.plot(hist_mask)
plt.xlim([0,256])
plt.show()
| true
|
3b68ce024b8e91ed0cfa5eec448074c1b0bb5730
|
Python
|
Zahirgeek/learning_python
|
/OOP/6.2.5-4.py
|
UTF-8
| 807
| 3.96875
| 4
|
[] |
no_license
|
#็ปงๆฟไธญ็ๆ้ ๅฝๆฐ4
class Animal():
pass
class PaxingAni(Animal):
def __init__(self,name):
print("Paxing Dongwu {0}".format(name))
class Dog(PaxingAni):
#__init__ๅฐฑๆฏๆ้ ๅฝๆฐ
#ๆฏๆฌกๅฎไพๅ็ๆถๅ,็ฌฌไธไธช่ขซ่ชๅจ่ฐ็จ
#ๅ ไธบไธป่ฆๅทฅไฝๆฏ่ฟ่กๅๅงๅ,ๆไปฅๅพๅ
def __init__(self):
print("I am init in dog")
#Catๆฒกๆๅๆ้ ๅฝๆฐ
class Cat(PaxingAni):
pass
#ๅฎไพๅ็ๆถๅ,ๆฌๅทๅ
็ๅๆฐ้่ฆ่ทๆ้ ๅฝๆฐๅๆฐๅน้
#ๅฎไพๅ็ๆถๅ,่ชๅจ่ฐ็จไบDog็ๆ้ ๅฝๆฐ
#ๆฒกๆ่ฐ็จ็ถ็ฑปๆ้ ๅฝๆฐ
wang = Dog()
#ๆญคๆถๅบ่ฏฅ่ชๅจ่ฐ็จๆ้ ๅฝๆฐ,ๅ ไธบCatๆฒกๆๆ้ ๅฝๆฐ,ๆไปฅๆฅๆพ็ถ็ฑปๆ้ ๅฝๆฐ
#ๅ ไธบๅจPaxingAniไธญๆ้ ๅฝๆฐ้่ฆไธคไธชๅๆฐ,ๅฎไพๅ็ๆถๅ็ปไบไธไธช,ๆฅ้
miao = Cat()
| true
|
36ab27079745c0446a43ae02740f7145bdfd206e
|
Python
|
Dylandelon/mytest
|
/HeapSort.py
|
UTF-8
| 600
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/python3
def sift(list,left,right):
i = left
j = 2*i + 1
temp = list[left]
while j <= right:
if j+1 <= right and list[j] < list[j+1]:
j = j+1
if temp < list[j]:
list[i] = list[j]
else:
break
i = j
j = 2*i + 1
list[i] = temp
def heapSort(list):
n = len(list)
for i in range(n//2-1, -1, -1):
sift(list, i, n-1)
for i in range(n-1,-1,-1):
list[i], list[0] = list[0], list[i]
sift(list, 0, i-1)
return list
nums = [5,2,45,6,8,2,1]
print(heapSort(nums))
| true
|
e9f8cdb6f1b77e9a5d6a1f240a2971c2c2cde999
|
Python
|
rfindra/fundamental-python
|
/fundamental1-konstruksi-dasar.py
|
UTF-8
| 649
| 4.03125
| 4
|
[] |
no_license
|
# Konstruksi Dasar Python
# Sequential: Eksekusi code secara berurutan
print("Hello World")
print("by Indra Rizky Firmansyah")
print("12 July 2021")
print("-" * 25)
# Percabangan : Eksekusi Terpilih
ingin_cepat = True # ingin_cepat adalah variabel bernilai "True"
if ingin_cepat:
print("Jalan Lurus") # Jika Nilai variable True, maka ini akan di print ke layar
else:
print("ambil jalan lain") # Jika nilai variable False, maka ini akan di print ke layar
# Perulangan
jumlah_anak = 4
for urutan_anak in range (1, jumlah_anak+1): # Jumlah perulangan = 5 - 1 = 4 (perhitungan dalam pemrograman di mulai dari 0)
print(f"Halo anak #{urutan_anak}")
#
| true
|
dbb43d72998fc7d2bca9826f5cd2d82f4d5700e2
|
Python
|
Toufique-Sk/IOT-geolocaton-HomeAutomation
|
/publishmqttpython.py
|
UTF-8
| 2,177
| 2.71875
| 3
|
[] |
no_license
|
import paho.mqtt.client as mqttClient
import time
import requests
import json
from geopy.distance import vincenty
'''def geolocation():
g=geocoder.ip('me')
lat,lon=g.latlng
lat2,lon2= radians(20.985),radians( 80.3697)
R= 6373.0
lat1=radians(lat)
lon1=radians(lon)
dlon=lon2-lon1
dlat=lat2-lat1
a=sin(dlat/2)**2+cos(lat1)*cos(lat2)*sin(dlon/2)**2
c=2*atan2(sqrt(a),sqrt(1-a))
geocoderdistance=R*c
return distance,lat,lon
#print (distance)
#print (lat,lon)
'''
def geoloc():
send_url = "http://api.ipstack.com/check?access_key=7b531d5c14f239eda4bd5168cadc2beb"
geo_req = requests.get(send_url)
geo_json = json.loads(geo_req.text)
latitude = geo_json['latitude']
longitude = geo_json['longitude']
coords_1=(latitude,longitude)
coords_2=(lat2,lon2)= (22.572646,88.363895)
city = geo_json['city']
distance= vincenty(coords_1, coords_2).km
print (latitude,longitude,city)
return distance,latitude,longitude
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to broker")
global Connected #Use global variable
Connected = True #Signal connection
else:
print("Connection failed")
Connected = False #global variable for the state of the connection
broker_address= "iot.iandwe.in"
port = 1883
##user = "hello"
##password = "shantam1234"
client = mqttClient.Client("Python") #create new instance
##client.username_pw_set(user, password=password) #set username and password
client.on_connect= on_connect #attach function to callback
client.connect(broker_address, port=port) #connect to broker
client.loop_start() #start the loop
while Connected != True: #Wait for connection
time.sleep(1)
try:
while True:
#value = input('Enter the message:')
distance,lat,lon=geoloc()
client.publish("Anything/SIT",str(distance))
time.sleep(5)
except KeyboardInterrupt:
client.disconnect()
client.loop_stop()
| true
|
c76eb0f1bd95c88b518a116374e24ee64badc6ce
|
Python
|
sbairishal/CodePath-Alumni-Professional-Interview-Prep-Course
|
/interviewbit-trees-min-depth-of-binary-tree.py
|
UTF-8
| 694
| 3.25
| 3
|
[] |
no_license
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution:
# @param A : root node of tree
# @return an integer
def minDepth(self, A):
if not (A):
return 0
queue = deque([[A, 1]])
while queue:
current, depth = queue.popleft()
if not current.left and not current.right:
return depth
if current.left:
queue.append([current.left, depth + 1])
if current.right:
queue.append([current.right, depth + 1])
| true
|
a2db6bd5e35d7891e9bfd0599dd9b56f4fa4d1fc
|
Python
|
se7ven012/TrafficFlowPrediction
|
/train.py
|
UTF-8
| 3,621
| 2.71875
| 3
|
[] |
no_license
|
#%%
import numpy as np
import pandas as pd
from data.data import process_data
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM, GRU, SimpleRNN
from keras.models import Sequential
from keras.utils.vis_utils import plot_model
from keras import losses
import matplotlib.pyplot as plt
def LSTM_model(structure):
model = Sequential()
model.add(LSTM(structure[1], input_shape=(structure[0], 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(structure[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(structure[3], activation='sigmoid'))
model.compile(loss=losses.mean_squared_error, optimizer="nadam")
return model
def GRU_model(structure):
model = Sequential()
model.add(GRU(structure[1], input_shape=(structure[0], 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(GRU(structure[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(structure[3], activation='sigmoid'))
model.compile(loss=losses.mean_squared_error, optimizer="nadam")
return model
def RNN_model(structure):
model = Sequential()
model.add(SimpleRNN(structure[1], input_shape=(structure[0], 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(SimpleRNN(structure[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(structure[3], activation='sigmoid'))
model.compile(loss=losses.mean_squared_error, optimizer="nadam")
return model
def train(model,x_train,y_train,config,name,lag,dataset):
hist = model.fit(
x_train, y_train,
batch_size=config["batch"],
epochs=config["epochs"],
validation_split=0.05)
#save model
model.save('model/' + dataset + '_' + name + '_' + str(lag) + '_' + str(config["batch"])+'.h5')
df = pd.DataFrame.from_dict(hist.history)
df.to_csv('model/' + dataset + '_' + name + '_' + str(lag) + '_' + str(config["batch"])+ ' loss.csv', encoding='utf-8', index=False)
def main():
lags = [6,9,12,18]
config = {"batch": 256, "epochs": 300}
datasets=['workdays']
names=['LSTM','GRU','RNN']
for lag in lags:
for dataset in datasets:
file1 = 'data/'+ dataset + '_' +'train.csv'
file2 = 'data/'+ dataset + '_' +'test.csv'
x_train, y_train, x_test, y_test, scaler = process_data(file1, file2, lag)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
#ไธบไบ่ฎญ็ปๆทปๅ ็ปดๅบฆ ๆทปๅ ็ฌฌไธไธช็ปดๅบฆ
netstructure=[lag,64,64,1]
for name in names:
if name=='LSTM':
m=LSTM_model(netstructure)
train(m,x_train,y_train,config,name,lag,dataset)
plot_model(m,to_file='result/LSTM'+'_'+ str(lag) + '.png',show_shapes=True,show_layer_names=False,rankdir='TB')
if name=='GRU':
m=GRU_model(netstructure)
train(m,x_train,y_train,config,name,lag,dataset)
plot_model(m,to_file='result/GRU'+'_'+ str(lag) + '.png',show_shapes=True,show_layer_names=False,rankdir='TB')
if name=='RNN':
m=RNN_model(netstructure)
train(m,x_train,y_train,config,name,lag,dataset)
plot_model(m,to_file='result/RNN'+'_'+ str(lag) + '.png',show_shapes=True,show_layer_names=False,rankdir='TB')
print(str(dataset)+':Finished!')
print(str(lag)+':Done!!')
main()
#%%
| true
|
9a5a2686f43faca9ff76fca7c95c15e855225c46
|
Python
|
ymirthor/T-201-GSKI
|
/WeeklyGlossary/Vika 5/Recursive programming.py
|
UTF-8
| 859
| 3.5625
| 4
|
[] |
no_license
|
def length_of_string(string):
if len(string) == 0:
return 1
return len(string)
length_of_string(string[1:])
print(length_of_string("Hello world"))
def linear_search(lis, value):
if lis == []:
return False
if lis[0] == value:
return True
return linear_search(lis[1:], value)
#print(linear_search([1,2,3], 2))
# def count_instances(lis, value):
# if lis == []:
# return 0
# if lis[0] == value:
# return 1 + count_instances(lis[1:], value)
# return 0 + count_instances(lis[1:], value)
# print(count_instances([1,2,3,3,3,5], 6))
def check_dub(lis):
if lis == []:
return False
if linear_search(lis[1:], lis[0]) == True:
return True
else:
return check_dub(lis[1:])
print(check_dub([1, 3, 3]))
| true
|
ed7003d6cd4fc1a17c1807c74f72b3cd2512001a
|
Python
|
clovery410/mycode
|
/leetcode/337house_robber3.py
|
UTF-8
| 945
| 2.875
| 3
|
[] |
no_license
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def rob(self, root):
def dfs(root, status):
if root is None:
return 0
elif (root, status) in cache:
return cache[(root, status)]
if not status:
left_sum = dfs(root.left, True)
right_sum = dfs(root.right, True)
cache[(root, status)] = left_sum + right_sum
else:
curr_sum = dfs(root.left, False) + dfs(root.right, False) + root.val
left_sum = dfs(root.left, True)
right_sum = dfs(root.right, True)
cache[(root, status)] = max(curr_sum, left_sum + right_sum)
return cache[(root, status)]
if root is None:
return 0
cache = {}
return dfs(root, True)
| true
|
915df0fed33a28d668ae40d4ec8d76fab398589d
|
Python
|
AtTheMatinee/Roguelike-Core
|
/gameLoop.py
|
UTF-8
| 8,340
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
'''
game.py
'''
import libtcodpy as libtcod
import worldMap
from config import *
import objects
import objectClass
from actors import Actor
#import actorStats
import textwrap
import factions
import actorSpawner
import itemSpawner
import spellSpawner
import commands
import statusEffects
import shelve
import random
'''
====================
Game Engine
====================
'''
class GameLoop:
def __init__(self,ui,mapWidth,mapHeight,seed):
# ====================
# Initialization
# ====================
self.ui = ui
self.mapWidth = mapWidth
self.mapHeight = mapHeight
self.globalSeed = seed
self.experiencePerLevel = 0
self._currentActor = 0
self._currentLevel = None
self._objects = []
self._messages = []
self.turnCost = 12
self.spendEnergyOnFailure = False
self.stopAfterEveryProcess = False
self.actorSpawner = actorSpawner.ActorSpawner(self)
self.itemSpawner = itemSpawner.ItemSpawner(self)
self.spellSpawner = spellSpawner.SpellSpawner(self)
self.factions = factions.FactionTracker()
# !!! When I continue an old game, load saved game.factions._factions
def process(self):
'''
The process() method asks the current actor for a command
object, then allows that command object to execute itself.
If the command fails to execute, the old command may provide
a new command to try instead. If there is no alternative,
the method gives up and tries again on the next frame.
'''
#import pdb; pdb.set_trace()
actor = self._currentLevel._actors[self._currentActor % len(self._currentLevel._actors)]
# Prevent the loop from skipping an actor if they havn't taken their turn E.G. the player
if (actor.energy >= self.turnCost) and (actor.needsInput()): return
command = None
while (command == None): # cycle through alternatives until one resolves
actor = self._currentLevel._actors[self._currentActor % len(self._currentLevel._actors)]
if (actor.energy >= self.turnCost) or (actor.gainEnergy()):
if actor.needsInput(): return
command = actor.getCommand()
else:
self._currentActor = (self._currentActor + 1) % len(self._currentLevel._actors)
if self.stopAfterEveryProcess == True: return
success,alternative = command.perform()
while alternative != None:
command = alternative
success,alternative = command.perform()
if (self.spendEnergyOnFailure == True) or (success == True):
if actor == self.hero:
# after the hero has successfully taken a turn,
# update the tick method on every object in the level
for obj in self._currentLevel._objects:
obj.tick()
self._currentActor = (self._currentActor + 1) % len(self._currentLevel._actors)
def message(self,newMsg,color = UI_PRIMARY_COLOR):
# split the message if the line is too long
newMsgLines = textwrap.wrap(newMsg,MSG_WIDTH)
for line in newMsgLines:
# if the buffer is full, remove the first line to make room for the new one
if len(self._messages) == MSG_HEIGHT:
del self._messages[0]
# add the new line as a tuple with the text and the color
self._messages.append((line,color))
def newGame(self,heroClass,heroName):
self.map = worldMap.Map(self, self.mapWidth, self.mapHeight)
for i in xrange(20):
self.map.createNewLevel() #the location of this will probably change
self.map.loadLevel(0)
heroX = self._currentLevel.stairsUp.x
heroY = self._currentLevel.stairsUp.y
self.hero = self.actorSpawner.spawn(heroX,heroY,heroClass)
if heroName == None:
heroName = "Hero"
else:
self.hero.properNoun = True
self.hero.name = heroName
def saveGame(self):
#print self._objects
file = shelve.open('savegame','n')
# ==== Game Engine Data ====
file['mapWidth'] = self.mapWidth
file['mapHeight'] = self.mapHeight
file['globalSeed'] = self.globalSeed
file['currentLevelIndex'] = self._currentLevel.levelDepth
file['currentActor'] = self._currentActor
if self.hero in self._objects:
file['heroIndex'] = self._objects.index(self.hero)
else:
file['heroIndex'] = 0
# ==== Game Objects Data ====
savedObjects = []
for obj in self._objects:
data = obj.saveData()
savedObjects.append(data)
try:
file['_objects'] = savedObjects
except Exception as e:
print "Error while saving objects"
print e
# ==== Game Level Data ====
savedLevels = []
for level in self.map._levels:
data = level.saveData()
savedLevels.append(data)
file['mapLevels'] = savedLevels
file.close()
def loadGame(self):
file = shelve.open('savegame','r')
# ==== Engine Data ====
self.mapWidth = file['mapWidth']
self.mapHeight = file['mapHeight']
self.seed = file['globalSeed']
# ==== Initialize levels ====
self.map = worldMap.Map(self, self.mapWidth, self.mapHeight)
savedLevels = file['mapLevels']
for levelData in savedLevels:
level = self.map.createNewLevel()
level.loadData(levelData)
# set curent level
currentLevelIndex = file['currentLevelIndex']
self.map.loadLevel(currentLevelIndex)
# ==== Objects ====
savedObjects = file['_objects']
for objectData in savedObjects:
try:
# create an object of the same type
if objectData['dataType'] == 'Object':
x = objectData['x']
y = objectData['y']
char = objectData['char']
name = objectData['name']
color = objectData['color']
blocks = objectData['blocks']
properNoun = objectData['properNoun']
alwaysVisible = objectData['alwaysVisible']
if objectData['class'] == objects.Stairs:
destination = objectData['destination']
obj = objects.Stairs(self,x,y,char,name,color,destination,blocks,properNoun,alwaysVisible)
elif objectData['class'] == objects.Trace:
obj = objects.Trace(self,x,y,char,name,color,None,blocks,properNoun,alwaysVisible)
else:
try:
obj = objectClass.Object(self,x,y,char,name,color,blocks,properNoun,alwaysVisible)
except:
self._objects.append(None)
continue
elif objectData['dataType'] == 'Actor':
x = objectData['x']
y = objectData['y']
key = objectData['_spawnKey']
obj = self.actorSpawner.spawn(x,y,key,new = False)
elif objectData['dataType'] == 'Item':
x = objectData['x']
y = objectData['y']
key = objectData['_spawnKey']
level = objectData['level']
obj = self.itemSpawner.spawn(x,y,key,level,False)
else:
print 'Cannot load ',objectData['class']
self._objects.append(None)
continue
obj.loadData(objectData)
except Exception as e:
print "Error loading ",objectData['class']
print e
continue
# ==== Equip Actors ====
for i in xrange(len(self._objects)):
if isinstance(self._objects[i], Actor):
actor = self._objects[i]
actorData = savedObjects[i]
# Inventory
for itemIndex in actorData['inventory']:
item = self._objects[itemIndex]
item.moveToInventory(actor)
# Equipment
for itemIndex in actorData['equipment']:
if itemIndex != None:
item = self._objects[itemIndex]
actor.equipItem(item)
else:
continue
# ==== Fill Levels ====
for i in xrange(len(self.map._levels)):
level = self.map._levels[i]
levelData = savedLevels[i]
level._objects = []
for index in xrange(len(levelData['_objects'])):
level._objects.append(self._objects[ levelData['_objects'][index] ])
level._items = []
for index in xrange(len(levelData['_items'])):
level._items.append(self._objects[ levelData['_items'][index] ])
level._actors = []
for index in xrange(len(levelData['_actors'])):
level._actors.append(self._objects[ levelData['_actors'][index] ])
stairsUpIndex = levelData['StairsUp']
if stairsUpIndex != None:
level.StairsUp = self._objects[stairsUpIndex]
stairsDownIndex = levelData['StairsDown']
if stairsDownIndex != None:
level.StairsDown = self._objects[stairsDownIndex]
# hero
heroIndex = file['heroIndex']
self.hero = self._objects[heroIndex]
# current actor
self._currentActor = file['currentActor']
file.close()
def getSeeds(self):
random.seed(self.globalSeed)
seeds = []
for i in xrange(21):
seeds.append(random.random())
return seeds
def addObject(self, object):
self._objects.append(object)
def removeObject(self, object):
self._objects.remove(object)
| true
|
e61e89969c943d754a9c71bd47db2bcb2cd3f1dc
|
Python
|
pseudobabble/python-tdd
|
/test_fizzbuzz.py
|
UTF-8
| 611
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
import pytest
def multipleOf(multiple, factor):
return (multiple % factor) == 0
def fizzbuzz(multiple):
if (multipleOf(multiple, 3) and (multipleOf(multiple, 5))):
return 'FizzBuzz'
if multipleOf(multiple, 3):
return 'Buzz'
if multipleOf(multiple, 5):
return 'Fizz'
def test__assertTrue():
assert True
def test__returnFizzOnMultipleOfThree():
assert fizzbuzz(6) == 'Buzz'
def test_returnBuzzOnMultipleOfFive():
assert fizzbuzz(10) == 'Fizz'
def test__returnFizzBuzzOnMultipleOfThreeOrFive():
assert fizzbuzz(15) == 'FizzBuzz'
| true
|
8837a921df1171ff3f00cbc1b2723747f57a6eab
|
Python
|
nvseenu/tech-practices
|
/bookapi/books/book.py
|
UTF-8
| 6,366
| 2.734375
| 3
|
[] |
no_license
|
from abc import ABC, abstractmethod
import psycopg2
import json
import logging
from datetime import datetime
logger = logging.getLogger(__name__)
class BookRepo:
def __init__(self, cpool):
self._cpool = cpool
def get_books(self, **filters):
conn = None
try:
query = self._get_all_books_query(filters)
logger.debug('Executing query: %s', query)
conn = self._cpool.getconn()
cur = conn.cursor()
cur.execute(query, (filters[key] for key in sorted(filters.keys())))
books = [DbBook._from_db_row(self._cpool, row) for row in cur.fetchall()]
cur.close()
return books
except psycopg2.Error as err:
raise ValueError('Unable to fetch books with filters: {} dur to error: {}'.format(filters), err.pgerror)
finally:
if conn:
self._cpool.putconn(conn)
def _get_all_books_query(self, filters):
query = 'SELECT id, name, isbn, authors, country, number_of_pages, publisher, release_date FROM books'
if not filters:
return query
query += ' WHERE '
values = ['{}=%s'.format(key) for key in sorted(filters.keys())]
return query + ', '.join(values)
def get_book(self, id):
conn = None
try:
conn = self._cpool.getconn()
cur = conn.cursor()
query = 'SELECT id, name, isbn, authors, country, number_of_pages, publisher, release_date FROM books WHERE id = %s'
logger.debug('Executing query: %s', query)
cur.execute(query, (id,))
row = cur.fetchone()
if not row:
raise ValueError('Could not find a book with id: {}'.format(id))
book = DbBook._from_db_row(self._cpool, row)
return book
except psycopg2.Error as e:
raise ValueError('Unable to fetch a book due to error: ', e.pgerror, e.pgcode)
finally:
if conn:
self._cpool.putconn(conn)
def get_empty_book(self):
return DbBook(self._cpool)
class DbBook:
def __init__(self, cpool, id=None):
self._id = id
self._cpool = cpool
self._name = ''
self._isbn = ''
self._authors = []
self._country = ''
self._number_of_pages = 0
self._publisher = ''
self._release_date = None
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def isbn(self):
return self._isbn
@isbn.setter
def isbn(self, isbn):
self._isbn = isbn
@property
def authors(self):
return self._authors
@authors.setter
def authors(self, authors):
self._authors = authors
@property
def country(self):
return self._country
@country.setter
def country(self, country):
self._country = country
@property
def number_of_pages(self):
return self._number_of_pages
@number_of_pages.setter
def number_of_pages(self, number_of_pages):
self._number_of_pages = number_of_pages
@property
def publisher(self):
return self._publisher
@publisher.setter
def publisher(self, publisher):
self._publisher = publisher
@property
def release_date(self):
return self._release_date
@release_date.setter
def release_date(self, release_date):
self._release_date = release_date
def values(self, **values):
for key, value in values.items():
if hasattr(self, key):
setattr(self, key, value)
def validate(self):
"""
"""
pass
def save(self):
conn = None
try:
conn = self._cpool.getconn()
cur = conn.cursor()
if self._id:
self._update(cur)
else:
self._create(cur)
conn.commit()
except psycopg2.Error as e:
raise ValueError('Unable to save a book due to error: ', e.pgerror, e.pgcode)
finally:
if conn:
self._cpool.putconn(conn)
def delete(self):
conn = None
try:
conn = self._cpool.getconn()
cur = conn.cursor()
cur.execute('DELETE FROM books WHERE id = %s', (self._id,))
conn.commit()
cur.close()
except psycopg2.Error as e:
raise ValueError('Unable to save a book due to error: ', e.pgerror, e.pgcode)
finally:
if conn:
self._cpool.putconn(conn)
def _create(self, cur):
authors = json.dumps(self._authors) if self._authors else None
cur.execute(
'INSERT INTO books (name, isbn, authors, country,number_of_pages, publisher, release_date) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING id',
(self._name,
self._isbn,
authors,
self._country,
self._number_of_pages,
self._publisher,
self._release_date))
self._id = cur.fetchone()[0]
logger.debug('New book record has been created with id: %d', self._id)
def _update(self, cur):
cur.execute('UPDATE books SET name = %s, isbn = %s WHERE id = %s',
(self._name, self._isbn, self._id))
logger.debug('book record has been updated with id:%d', self._id)
@staticmethod
def _from_db_row(cpool, row):
book = DbBook(cpool, row[0])
book.name = row[1]
book.isbn = row[2]
book.authors = json.loads(row[3])
book.country = row[4]
book.number_of_pages = row[5]
book.publisher = row[6]
book.release_date = datetime.strftime(row[7], '%Y-%m-%d')
return book
def dict(self):
return {
'id': self._id,
'name': self._name,
'isbn': self._isbn,
'authors': self._authors,
'country': self._country,
'number_of_pages': self._number_of_pages,
'publisher': self._publisher,
'release_date': self._release_date
}
def __repr__(self):
return self.dict()
| true
|
b7f64c02ac0db70cb94809a49a453d90627e905a
|
Python
|
jacquelineawatts/d3_tester
|
/states.py
|
UTF-8
| 529
| 3.109375
| 3
|
[] |
no_license
|
import pickle
import requests
def pickle_state_names(data):
"""Make call to API to find state names from ID, pickles object."""
file_object = open('state_names', 'w')
state_names = {}
for state_details in data:
state_id = state_details['geo']
geo_url = 'http://api.datausa.io/attrs/geo/' + state_id
json = requests.get(geo_url).json()
state_name = json["data"][0][1]
state_names[state_id] = state_name
pickle.dump(state_names, file_object)
file_object.close()
| true
|
4fcd081cd3d0f4b7d243be311c02e842a33e848c
|
Python
|
jixinfeng/leetcode-soln
|
/python/147_insersion_sort_list.py
|
UTF-8
| 888
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
"""
Sort a linked list using insertion sort.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def insertionSortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return head
p = ListNode(head.val - 1)
p.next = head
prev = p
curr = p.next
while curr:
if prev.val <= curr.val:
curr = curr.next
prev = prev.next
continue
prev.next = curr.next
ins = p
while curr.val >= ins.next.val:
ins = ins.next
curr.next = ins.next
ins.next = curr
curr = prev.next
return p.next
| true
|
8321166512950c316ada5f17dee2bc9a7002b518
|
Python
|
Abdullah-khan399/Docker
|
/p_dkr_images.py
|
UTF-8
| 3,006
| 2.609375
| 3
|
[] |
no_license
|
def images():
import os
import p_docker_menu
x = 0
while x != 9 :
os.system("clear")
os.system("tput setaf 1")
os.system("tput bold")
print('''
\t###### ######## ######## ## ## ######## ########
\t## ### ## ## ## ## ## ## ## ##
\t## ### ## ## ## ## ## ## ## ##
\t## ### ## ## ## #### ##### ########
\t## ### ## ## ## ## ## ## ## ##
\t## ### ## ## ## ## ## ## ## ##
\t###### ######## ######## ## ## ######## ## ##
\t___________________________________________________________
\t___________________________________________________________
\t\*\ /*/
\t \*\ _____________________________________ /*/
\t \*\ /*/ \*\ /*/
\t \*\ \*\ WELCOME TO DOCKER IMAGES TUI /*/ /*/
\t \*\ ------------------------------------- /*/
\t \*\###########################################/*/
\n\n''')
os.system("tput setaf 1")
os.system("tput rmul")
print('''
Press 1 : To install a new Image
Press 2 : To upload a image
Press 3 : To show the list of all Images
Press 4 : To delete a Image
Press 5 : To rename a Image
Press 6 : To commit a Image from Container
Press 7 : To inspect a Image
Press 8 : To GO BACK to Main Menu
Press 9 : To EXIT\n''')
os.system("tput setaf 1")
x = int(input("Enter your choices : "))
os.system("tput setaf 15")
if ( x == 1 ):
name=input("enter the name of image : ")
os.system("docker pull {}".format(name))
print("image installed successfully")
elif ( x == 3 ):
print("\nDocker images..... \n")
os.system("docker images")
elif ( x == 2 ):
name=input("enter the name of image : ")
os.system("docker push {}".format(name))
elif ( x == 4 ):
name=input("enter the name of image : ")
os.system("docker rmi {}".format(name))
print("image removed successfully")
elif ( x == 5 ):
name=input("enter the name of image : ")
namei=input("enter the new name of image : ")
os.system("docker tag {} {}".format(name,namei))
print("image renamed successfully")
elif ( x == 6 ):
name=input("enter the name of container : ")
namei=input("enter the name of image : ")
os.system("docker commit {} {}".format(name,namei))
print("image created successfully")
elif ( x == 7 ):
name=input("enter the name of image : ")
os.system("docker image inspect {}".format(name))
elif ( x == 8 ):
p_docker_menu.docker()
elif ( x == 9 ):
exit()
os.system("tput setaf 1")
y=input("\n\nEnter to continue.......")
| true
|
5f2bb93905fcde76a2dfa7abcdf8e8bef451057b
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03102/s487218100.py
|
UTF-8
| 309
| 2.578125
| 3
|
[] |
no_license
|
import sys
sys.setrecursionlimit(10**6)
n, m, c = map(int, input().split())
b = list(map(int, input().split()))
readline = sys.stdin.readline
A = [[int(i) for i in readline().split()] for _ in range(n)]
ans = 0
for a in A:
if sum([i*j for i,j in zip(a,b)]) + c > 0:
ans += 1
print(ans)
| true
|
4b1806d8fb1e1bf6255929fd8570da53832092e5
|
Python
|
nortikin/nikitron_tools
|
/pythonism/ะดะตะฝัะฝะตะดะตะปะธ.py
|
UTF-8
| 940
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 ะะพัะพะดะตัะบะธะน
# Licensed GPL 3.0
# http://nikitron.cc.ua/
# Python 3
import sys
import calendar
if __name__ == "__main__":
if len(sys.argv) != 5:
print ('''ะดะฐ, ะฝัะถะฝะพ ะฒะฒะตััะธ ะณะพะด ะฝะฐัะฐะปะฐ, ะณะพะด ะบะพะฝัะฐ, ะผะตััั ะธ ะดะตะฝั
ะฝะฐะฟัะธะผะตั: $python3.5 ะดะตะฝัะฝะตะดะตะปะธ.py 1984 2158 2 7''')
sys.exit(1)
ะณะพะดะฝะฐัะฐะปะฐ = int(sys.argv[1])
ะผะตั = int(sys.argv[3])
ะดะฝะธ = int(sys.argv[4])
ะณะพะด = int(sys.argv[2])
ะฝะตะด = ['ะฟะพะฝะตะดะตะปัะฝะธะบ','ะฒัะพัะฝะธะบ','ััะตะดะฐ','ัะตัะฒะตัะณ','ะฟััะฝะธัะฐ','ััะฑะฑะพัะฐ','ะฒะพัะบัะตัะตะฝัะต']
ะฒัะฒะพะด = ''
for ะณะพะดะธะบ in range(ะณะพะดะฝะฐัะฐะปะฐ,ะณะพะด):
ะฒัะฒะพะด += 'ะ {0} ะณะพะดั ััะพ {1} \n'.format(ะณะพะดะธะบ,ะฝะตะด[calendar.weekday(ะณะพะดะธะบ, ะผะตั, ะดะฝะธ)])
print(ะฒัะฒะพะด)
| true
|
3306a9ff08d0dce49977b1c4da8e6f467b8bb3f1
|
Python
|
SaNDy4ortyFivE/python_mini_project_2k19
|
/main.py
|
UTF-8
| 2,367
| 2.625
| 3
|
[] |
no_license
|
#importing all written scripts
import nmap,scapy
import nmapScanner, scapy_main_file, banner, os
def main():
os.system('clear')
banner.banner_print()
#defining colors
G = '\033[92m'
Y = '\033[93m'
B = '\033[94m'
R = '\033[91m'
W = '\033[0m'
choice = int(input())
if choice == 1:
word_input = input('Enter Hash:')
os.system('./a.out words %s'%(word_input))
elif choice ==2:
# creatin object for nmapScanner class
scanner = nmapScanner.Scanner()
lan_id = input('Enter lan:%s' % (W))
# take the lan_id for getting wifi mac address,it will be needed for scapy later
print('%sGetting MAC address for entered lan.............' % (B))
mac = scanner.get_mac(lan_id)
print('LAN MAC captured:',mac)
temp = lan_id
if not lan_id.endswith('/24'):
lan_id = lan_id + '/24 --exclude ' + str(temp)
scanner.basic_scanner(lan_id)
print('*' * 100)
# choose target
target_index = int(input('%sEnter target(index)%s:' % (R, W)))
# ask if deep scan is to be performed or skip it
deep_scan_choice = input('%sperform (Y)deep scan or enter to skip to attack section:%s' % (R, W)).lower()
print('*' * 100)
if deep_scan_choice == 'y':
scanner.deep_scanner(target_index)
scanner.deep_scan_advanced(target_index)
print('*' * 100)
elif choice == 3:
iface = input('Enter interface name:')
get_scapy = scapy_main_file.scapy_scapy()
scanner = nmapScanner.Scanner()
lan_id = input('Enter lan:%s' % (W))
# take the lan_id for getting wifi mac address,it will be needed for scapy later
print('%sGetting MAC address for entered lan.............' % (B))
mac = scanner.get_mac(lan_id)
print('LAN MAC captured:', mac)
temp = lan_id
if not lan_id.endswith('/24'):
lan_id = lan_id + '/24 --exclude ' + str(temp)
scanner.basic_scanner(lan_id)
print('*' * 100)
# choose target
target_index = int(input('%sEnter target(index)%s:' % (R, W)))
target_mac = scanner.get_mac(scanner.available_host[target_index-1])
get_scapy.deauth(mac, target_mac, iface)
if __name__=="__main__":
main()
| true
|
435fbef678cba55f226ebc3960273153ece38a78
|
Python
|
jren10/Simple-Spacetime-Crawler
|
/QuerySearch.py
|
UTF-8
| 1,970
| 2.984375
| 3
|
[] |
no_license
|
import json
import operator
from collections import Counter
def search():
count_docs = 0
count_tokens = 0
results = []
doc_list = []
ranker = {}
index = {}
with open("dict.json", "rb") as file_in:
index = json.load(file_in)
# print index
with open("bookkeeping.json") as json_file:
json_dict = json.load(json_file)
query = str(raw_input("Enter search term(s): "))
query = query.lower()
query_tokens = query.split()
for token in query_tokens:
for value in index[token]:
if isinstance(value, float):
pass
else:
temp_doc_id = value[0]
temp_tf_idf = value[2]
if temp_doc_id in ranker:
ranker[temp_doc_id] += temp_tf_idf
else:
ranker[temp_doc_id] = temp_tf_idf
sorted_ranker = sorted(ranker.items(), key=operator.itemgetter(1), reverse=True)[:10]
# print (sorted_ranker)
for key in sorted_ranker:
print json_dict[key[0]]
"""
for value in index[query]:
if isinstance(value, float):
pass
else:
doc_list.append(value[0])
if value[0] in ranker:
ranker[value[0]].append(value[2])
else:
ranker[value[0]] = value[2]
"""
# for key in doc_list:
# print doc_list[key]
# doc_list = set(doc_list)
index = Counter(index)
"""for i in index.keys():
#print i
count_tokens += 1
print count_tokens"""
# for elem in doc_list:
# results.append(json_dict[elem])
# docs = set(docs)
# print "docs is " + len(docs)
# print "test with duplicate values is " + len(test)
# print "test with lists is" + len(test)
# print len(index.keys())
# print "Search Results:\n"
# for i in results:
# print i
# print "\n"
# print results
def main():
search()
if __name__ == '__main__':
main()
| true
|
a3edf6694bae20a0102321e7ea8cb4814a50dd09
|
Python
|
ofirofir85/CoPilot
|
/modules/module_utils.py
|
UTF-8
| 647
| 2.578125
| 3
|
[] |
no_license
|
import win32api
import pyperclip
import keyboard
import time
import winsound
def show_popup(title, message):
win32api.MessageBox(0, message, title)
def put_in_paste(data):
pyperclip.copy(data)
def get_copied_data():
return pyperclip.paste()
def get_highlighted():
orig_clip = get_copied_data()
keyboard.release('alt')
keyboard.send('ctrl+c')
time.sleep(0.1)
highlighted = get_copied_data()
put_in_paste(orig_clip)
return highlighted
def show_popup_and_put_paste(title, message):
show_popup(title, message)
put_in_paste(message)
def error_sound():
winsound.MessageBeep(winsound.MB_ICONHAND)
| true
|
cce2298b3e3a387f7884998e769eb45e29295fa8
|
Python
|
MarcAntoineSchmidtQC/hdfe
|
/tests/test_groupby.py
|
UTF-8
| 1,031
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import pandas as pd
from hdfe import Groupby
import pytest
@pytest.fixture
def df() -> pd.DataFrame:
np.random.seed(0)
n_obs = 100
n_categories = 10
return pd.DataFrame(
{
"first category": np.random.choice(n_categories, n_obs),
"y": np.random.normal(0, 1, n_obs),
}
)
def test_groupby_apply_mean(df: pd.DataFrame) -> None:
pandas_results = df.groupby("first category")[["y"]].mean()
groupby_results = Groupby(df["first category"]).apply(
np.mean, df["y"], broadcast=False, as_dataframe=True
)
pd.testing.assert_frame_equal(pandas_results, groupby_results)
def test_groupby_transform_mean(df: pd.DataFrame) -> None:
pandas_results = df.groupby("first category")["y"].transform("mean")
groupby_results = Groupby(df["first category"]).apply(
np.mean, df["y"], broadcast=True, as_dataframe=True
)
np.testing.assert_almost_equal(
pandas_results.values, np.squeeze(groupby_results.values)
)
| true
|
bc2091deefc5d033ee873373918ff6f1e61b5522
|
Python
|
PiotrDabkowski/Js2Py
|
/js2py/internals/speed.py
|
UTF-8
| 974
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
from __future__ import print_function
from timeit import timeit
from collections import namedtuple
from array import array
try:
#python 2 code
from itertools import izip as zip
except ImportError:
pass
from collections import deque
class Y(object):
UUU = 88
def __init__(self, x):
self.x = x
def s(self, x):
return self.x + 1
class X(Y):
A = 10
B = 2
C = 4
D = 9
def __init__(self, x):
self.x = x
self.stack = []
self.par = super(X, self)
def s(self, x):
pass
def __add__(self, other):
return self.x + other.x
def another(self):
return Y.s(self, 1)
def yet_another(self):
return self.par.s(1)
def add(a, b):
return a.x + b.x
t = []
Type = None
try:
print(timeit(
"""
t.append(4)
t.pop()
""",
"from __main__ import X,Y,namedtuple,array,t,add,Type, zip",
number=1000000))
except:
raise
| true
|
e3ab1cadf68b65548834efee00a65b3cebd01ff5
|
Python
|
ArkiWang/math
|
/divide_difference.py
|
UTF-8
| 634
| 3.375
| 3
|
[] |
no_license
|
import numpy as np
x = [0, 2, 3, 4, 5, 7, 8]
def f(x):
return x**3 + 70*x**2 + 10*x + 5
x = [0, 1, 2, 3, 4]
y = [0, 0, 6, 24, 60]
x = [-2, -1, 0, 1, 2, 3]
y = [-5, -2, 3, 10, 19, 30]
def divide_difference(x, y):
n = len(x)
ans = []
for l in range(1, n+1):
tmp = []
if l == 1:
for i in range(n+1 - l):
#tmp.append(f(x[i]))
tmp.append(y[i])
else:
for i in range(len(ans[-1]) - 1):
tmp.append((ans[-1][i+1] - ans[-1][i])/(x[i+l-1] - x[i]))
ans.append(tmp)
return ans
ans = divide_difference(x, y)
print(ans)
| true
|
da67ef58b84e77641a6e7b2ce454c1538611a672
|
Python
|
bombthanthap/SKILL_63
|
/week3/truck.py
|
UTF-8
| 425
| 3.109375
| 3
|
[] |
no_license
|
N,W = input().split()
N = int(N)
W = int(W)
while(N != 0 and W != 0):
arr = []
inp = input()
for i in inp.split():
arr.append(int(i))
tmp = 0
tructCount = 0
for i in range(N):
if(tmp < arr[i]):
tmp = W
tructCount += 1
tmp -= arr[i]
print(tructCount)
N,W = input().split()
N = int(N)
W = int(W)
| true
|
f3d5764507f4e695d3af7fdd3f64da98c4bfe843
|
Python
|
pgj-ctrl/pgj-Repository
|
/py01/day3/mtalbe.py
|
UTF-8
| 174
| 3.5625
| 4
|
[] |
no_license
|
n=int(input('you want to number: '))
k = n+1
p = k-1
for i in range(1,10):
for j in range(p,k):
print('%s*%s=%s' % (i,j,i*j),end='\t')
print(end=' ')
print()
| true
|
efa87a7d01c424fc2bf6061ea9b9376f09f9ced9
|
Python
|
MasterOfBrainstorming/Python2.7
|
/Simple_TCP_Chat/chat client.py
|
UTF-8
| 956
| 2.78125
| 3
|
[] |
no_license
|
import socket, time, threading
tLock = threading.Lock()
shutdown = False
def receiving(name, sock):
while not shutdown:
try:
tLock.acquire()
while True:
data, addr = sock.recvfrom(1024)
print str(data)
except:
pass
finally:
tLock.release()
host = "localhost"
# Pick any available port
port = 0
sport = 6421
server = (host, sport)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
s.setblocking(0)
rT = threading.Thread(target=receiving, args=("RecvThread",s))
rT.start()
alias = raw_input("Name: ")
message = raw_input(alias+"-> ")
while message != "q":
if message != "":
s.sendto(alias + ": " + message, server)
tLock.acquire()
message = raw_input(alias + "-> ")
tLock.release()
time.sleep(0.2)
shutdown = True
rT.join()
s.close()
| true
|
a888625f3fe8bcdfd62a047bec5c8de8f54b8587
|
Python
|
vidartf/globmatch
|
/globmatch/translation.py
|
UTF-8
| 4,139
| 2.96875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Simula Research Laboratory.
# Distributed under the terms of the Modified BSD License.
#
# Copyright ยฉ 2001-2019 Python Software Foundation; All Rights Reserved
"""Utilities for matching a path against globs."""
import os
import re
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
from .pathutils import iexplode_path, SEPARATORS
# TODO: In the future use f-strings for formatting
os_sep_class = '[%s]' % re.escape(SEPARATORS)
double_start_re = r'.*((?<=(%s))|(?<=(\A)))' % (os_sep_class,)
@lru_cache(maxsize=256, typed=True)
def compile_pattern(pat, subentries_match=None):
"""Translate and compile a glob pattern to a regular expression matcher.
Parameters:
pat: string
The pattern to compile
subentries_match: boolean
When true, acts as if
"""
if isinstance(pat, bytes):
pat_str = pat.decode('ISO-8859-1')
res_str = translate_glob(os.path.normcase(pat_str), subentries_match=subentries_match)
res = res_str.encode('ISO-8859-1')
else:
res = translate_glob(os.path.normcase(pat), subentries_match=subentries_match)
return re.compile(res).match
def translate_glob(pat, subentries_match=None):
"""Translate a glob PATTERN to a regular expression."""
translated_parts = []
for part in iexplode_path(pat):
translated_parts.append(translate_glob_part(part))
res = join_translated(translated_parts, os_sep_class, subentries_match=subentries_match)
res = r'(?s:{res})\Z'.format(res=res)
return res
def join_translated(translated_parts, os_sep_class, subentries_match):
"""Join translated glob pattern parts.
This is different from a simple join, as care need to be taken
to allow ** to match ZERO or more directories.
"""
res = ''
for part in translated_parts[:-1]:
if part == double_start_re:
# drop separator, since it is optional
# (** matches ZERO or more dirs)
res += part
else:
res += part + os_sep_class
if translated_parts[-1] == double_start_re:
# Final part is **
# Should not match directory:
res += '.+'
# Follow stdlib/git convention of matching all sub files/directories:
res += '({os_sep_class}?.*)?'.format(os_sep_class=os_sep_class)
elif subentries_match:
# Should match directory, but might also match sub entries:
res += translated_parts[-1]
res += '({os_sep_class}?.*)?'.format(os_sep_class=os_sep_class)
else:
res += translated_parts[-1]
# Allow trailing slashes
# Note: This indicates that the last part whould be a directory, but
# we explictly say that we don't consult the filesystem, so there is
# no way for us to know.
res += '{os_sep_class}?'.format(os_sep_class=os_sep_class)
return res
def translate_glob_part(pat):
"""Translate a glob PATTERN PART to a regular expression."""
# Code modified from Python 3 standard lib fnmatch:
if pat == '**':
return double_start_re
i, n = 0, len(pat)
res = []
while i < n:
c = pat[i]
i = i+1
if c == '*':
# Match anything but path separators:
res.append('[^%s]*' % SEPARATORS)
elif c == '?':
res.append('[^%s]?' % SEPARATORS)
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res.append('\\[')
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res.append('[%s]' % stuff)
else:
res.append(re.escape(c))
return ''.join(res)
| true
|
e43687fae2932c964e15335ae6a7d9f618c40622
|
Python
|
sabarish-gurajada/python
|
/date_file_check.py
|
UTF-8
| 659
| 3.046875
| 3
|
[] |
no_license
|
import subprocess
from datetime import date
today = date.today()
# dd/mm/YY
today_date = today.strftime("%d/%m/%Y")
record_date = subprocess.check_output(['head', '-1', 'test.txt'])
record_size_line = subprocess.check_output(['tail', '-1', 'test.txt'])
record_size = record_size_line.split(':')[1]
print(record_size)
file = open("test.txt","r")
Counter = 0
# Reading from file
Content = file.read()
CoList = Content.split("\n")
for i in CoList:
if i:
Counter += 1
Required_lines = Counter -2
print(Required_lines)
if [(record_date == today_date) and (record_size == Required_lines)]:
print("all good")
else:
print("something fishy")
| true
|
22b13c74941e482915076eaa70e425bc38d3d219
|
Python
|
LukasErekson/pyrisk
|
/ai/mctsstate.py
|
UTF-8
| 4,198
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/python3
import math
import random
from itertools import islice
from world import AREAS
import hashlib
def define_neighbours(world):
for te in list(world.territories.values()):
MCTSState.TERRITORIES_NEIGHBOUR[te.name] = list([t.name for t in list(te.adjacent(None, None))])
class MCTSState(object):
# CONSTS
AREA_WEIGHT = {"Australia": [2.97, 0, 8.45, 9.99, 10.71],
"South America": [0.69, 1.23, 3.90, 0, 17.72],
"Africa": [14.40, 12.87, 10.72, 7.16, 1.23, 0, 29.80],
"North America": [3.11, 0.98, 0, 2.17, 7.15, 19.35, 24.82, 24.10, 36.15, 48.20],
"Europe": [42.33, 45.11, 43.11, 43.77, 41.35, 50.77, 43.85, 36.93],
"Asia": [27.10, 23.90, 23.61, 23.10, 23.61, 23.68, 19.32, 15.63, 17.43, 13.84, 10.25, 6.66, 3.07]}
UNIQUE_ENEMY_WEIGHT = -0.07
PAIR_FRIENDLY_WEIGHT = 0.96
AREA_TERRITORIES = {key: value[1] for (key, value) in AREAS.items()}
TERRITORIES_NEIGHBOUR = {}
def __init__(self, player, territories, action=None):
# todo clean, et repasser sur gym
self.territories = territories
self.player = player
self.players = player.ai.game.players
self.empty = [name for name, owner in self.territories.items() if owner == None]
self.value = 0
self.all_values = []
self.action = action
# 0 if first, 1 if second etc
self.play_order = player.ai.game.turn_order.index(self.player.name)
#peut etre passer sur la "montercarlorollout", une fonction dans MCTS qui va juste ajouter un territoire, s'appeller
#recursivement puis l'enlever
def next_random_state(self):
terri = self.territories.copy()
empt = [name for name, owner in self.territories.items() if owner == None]
action = random.choice(empt)
empt.remove(action)
terri[action] = self.player.name
return MCTSState(self.players[self.player.ai.game.turn_order[(self.play_order+1)%len(self.players)]], terri, action)
def softmax(self,vector):
total = sum([math.exp(x) for x in vector])
return list([math.exp(x)/total for x in vector])
def values(self):
player_scores = {}
for player in self.players.values():
score = 0
unique_enemy = set()
allied_pairs = 0
for t in self.territories.keys():
for u in MCTSState.TERRITORIES_NEIGHBOUR[t]:
if self.territories[u] is not None and self.territories[u] != player.name:
unique_enemy.add(u)
elif self.territories[u] == player.name:
allied_pairs = allied_pairs + 0.5
score = len(unique_enemy) * MCTSState.UNIQUE_ENEMY_WEIGHT + allied_pairs * MCTSState.PAIR_FRIENDLY_WEIGHT
for area, list_terri in MCTSState.AREA_TERRITORIES.items():
count = 0
for terri in list_terri:
if self.territories[terri] == self.player.name:
count = count + 1
score = score + MCTSState.AREA_WEIGHT[area][count]
# just for 3 players
if self.play_order == 0:
score = score + 13.38
elif self.play_order == 1:
score = score + 5.35
player_scores[player.name] = max(score, 0)
player_rewards = {}
for player in self.players.values():
player_rewards[player.name] = player_scores[player.name] / sum(player_scores.values())
self.all_values = player_rewards
self.value = player_rewards[self.player.name]
# .values() ?
return self.all_values.values()
def terminal(self):
if len(self.empty) == 0:
return True
return False
def __hash__(self):
return int(hashlib.md5((str(self.territories)).encode('utf-8')).hexdigest(), 16)
def __eq__(self, other):
if hash(self) == hash(other):
return True
return False
def __repr__(self):
s = "Empty=%s;Action=%s" % (str(len(self.empty)), str(self.action))
return s
| true
|
85584f86d704f1f6b7c8e07ad8ab2e345ec96103
|
Python
|
luismi-gamo/Lambda_Arch_Spark
|
/Query.py
|
UTF-8
| 3,074
| 2.671875
| 3
|
[] |
no_license
|
import sqlite3
import threading
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import time
from pymongo import MongoClient
class QueryClass(threading.Thread):
# Singleton for DB connections
bd = None
mongodb = None
def __init__(self, db, mongodb, sleeptime):
threading.Thread.__init__(self)
self.name = "query"
# Sets the DB parameters
self.db = db
if QueryClass.bd is None:
QueryClass.bd = db
self.mongodb = mongodb
if QueryClass.mongodb is None:
QueryClass.mongodb = mongodb
# al iniciar el flujo activo es el 1
self.active = 1
#Time to be inactive before querying the database for changes
self.sleeptime = sleeptime
def run(self):
while True:
bview = QueryClass.readBView(QueryClass.dbConnection())
rtview = QueryClass.readRTView(QueryClass.dbConnection(), self.active)
if bview.size != 0 and rtview.size != 0:
total_df = bview.merge(rtview, on=['meridian', 'lab', 'index'], how='left').fillna(0)
total_df['count'] = total_df['count_x'] + total_df['count_y']
#print total_df
total_df.to_sql("PowerCount_Total", QueryClass.dbConnection(), if_exists = 'replace', index = False)
print "\nwrote PowerCount_Total = PowerCount_bv + PowerCount_rt" + str(self.active)
else:
print "\nQuery: No data available"
time.sleep(self.sleeptime)
def changeTable(self):
if self.active == 1:
self.active = 2
else:
self.active = 1
@staticmethod
def dbConnection():
return sqlite3.connect(QueryClass.bd)
@staticmethod
def mongodbConnection():
return MongoClient(QueryClass.mongodb)
@staticmethod
def readBView(conn):
mongoconn = QueryClass.mongodbConnection()
db = mongoconn.lambdaDB
# Make a query to the specific DB and Collection
cursor = db['PowerCount_bv'].find({})
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if df.size != 0:
del df['_id']
#print df
return df
#return pd.read_sql_query("SELECT * from PowerCount_bv", conn)
@staticmethod
def readRTView(conn, activeTable):
collection = "PowerCount_rt" + str(activeTable)
mongoconn = QueryClass.mongodbConnection()
db = mongoconn.lambdaDB
# Make a query to the specific DB and Collection
cursor = db[collection].find({})
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id field
if df.size != 0:
del df['_id']
#print df
return df
# query = "SELECT * from PowerCount_rt" + str(activeTable)
# return pd.read_sql_query(query, conn)
# consulta = QueryClass(DB_LOCATION)
# consulta.start()
| true
|
3ddd0d4df4b526d0d5a061bfe923a068047e856a
|
Python
|
doom2020/doom
|
/one_csv.py
|
UTF-8
| 13,747
| 2.640625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Author:doom
datetime:2019.2.26
email:408575225@qq.com
function:pdf file change csv file
comment:if you update my code,please update comment as too,thanks
this is format case 12:
format:
author
position
email
"""
import codecs
import re
import csv
import time
import os
import glob
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal, LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
# ๅpdfๆไปถ็ๅๅง่ทฏๅพ
basic_pdf_file_path = r'C:\Users\Administrator\Desktop\pdf'
# ่ทๅๆๆ็pdfๆไปถ
pdf_files = glob.glob("{}/*.pdf".format(basic_pdf_file_path))
# ่งฃๆๅ็txtๆไปถๅๅง่ทฏๅพ
basic_txt1_file_path = r'C:\Users\Administrator\Desktop\txt'
# ่ทๅๆๆ็txtๆไปถ
txt_files = glob.glob("{}/*.txt".format(basic_txt1_file_path))
# ๆ็ป็ๆ็csvๆไปถๅๅง่ทฏๅพ
basic_csv_file_path = r'C:\Users\Administrator\Desktop\csv'
def parse_pdf2txt():
"""่งฃๆPDFๆๆฌ๏ผๅนถไฟๅญๅฐTXTๆไปถไธญ"""
count = 0
for pdf_file in pdf_files:
try:
fp = open(pdf_file, 'rb')
# ่งฃๆๅ็txtๆไปถ็ปๅฏน่ทฏๅพ
txt_file = os.path.join(basic_txt1_file_path, pdf_file.replace(basic_pdf_file_path, "")[1:-4] + ".txt")
count += 1
# ็จๆไปถๅฏน่ฑกๅๅปบไธไธชPDFๆๆกฃๅๆๅจ
parser = PDFParser(fp)
# ๅๅปบไธไธชPDFๆๆกฃ
doc = PDFDocument()
# ่ฟๆฅๅๆๅจ๏ผไธๆๆกฃๅฏน่ฑก
parser.set_document(doc)
doc.set_parser(parser)
# ๆไพๅๅงๅๅฏ็ ๏ผๅฆๆๆฒกๆๅฏ็ ๏ผๅฐฑๅๅปบไธไธช็ฉบ็ๅญ็ฌฆไธฒ
doc.initialize()
# ๆฃๆตๆๆกฃๆฏๅฆๆไพtxt่ฝฌๆข๏ผไธๆไพๅฐฑๅฟฝ็ฅ
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
else:
# ๅๅปบPDF๏ผ่ตๆบ็ฎก็ๅจ๏ผๆฅๅ
ฑไบซ่ตๆบ
rsrcmgr = PDFResourceManager()
# ๅๅปบไธไธชPDF่ฎพๅคๅฏน่ฑก
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# ๅๅปบไธไธชPDF่งฃ้ๅ
ถๅฏน่ฑก
interpreter = PDFPageInterpreter(rsrcmgr, device)
# ๅพช็ฏ้ๅๅ่กจ๏ผๆฏๆฌกๅค็ไธไธชpageๅ
ๅฎน
# doc.get_pages() ่ทๅpageๅ่กจ
for page in doc.get_pages():
interpreter.process_page(page)
# ๆฅๅ่ฏฅ้กต้ข็LTPageๅฏน่ฑก
layout = device.get_result()
# ่ฟ้layoutๆฏไธไธชLTPageๅฏน่ฑก ้้ขๅญๆพ็ ่ฟไธชpage่งฃๆๅบ็ๅ็งๅฏน่ฑก
# ไธ่ฌๅ
ๆฌLTTextBox, LTFigure, LTImage, LTTextBoxHorizontal ็ญ็ญ
# ๆณ่ฆ่ทๅๆๆฌๅฐฑ่ทๅพๅฏน่ฑก็textๅฑๆง๏ผ
for x in layout:
if (isinstance(x, LTTextBoxHorizontal)):
# ๆไปถๆ็นๆฎ็ๅญ็ฌฆ(่ฒไผผๅพทๆใใ)่ฆๅ็ผ็ ๅค็
with open(txt_file, 'a', encoding="UTF-8") as f:
results = x.get_text()
print(results)
f.write(results + "\n")
except Exception as e:
print(e)
continue
print("็ฌฌ{}ไธชpdfๆไปถ่ฝฌๅtxtๅฎๆ".format(count))
time.sleep(1)
def txt_dealwith_second():
"""ๅฐtxtๆๆกฃ่ฟ่กๆๅ"""
info_list = []
# ่งฃๆๅ็csvๆไปถ
txt_file = r'C:\Users\Administrator\Desktop\txt_new\ICPAM 2015.txt'
with open(txt_file, "r", encoding="utf-8-sig") as fr:
# ่ฏปๅๆดไธชๆไปถ
src_file = fr.read()
# print(src_file)
# ๅฐๆไปถ(ไธไธชๅคง็ๅญ็ฌฆไธฒ)ไปฅ"ABSTRACT"ๅๅฒๆๅ่กจ
result_list = src_file.split("\n\n")
for result in result_list:
if "@" in result:
index_b = result_list.index(result)
index_e = index_b
try:
while "Abstract" not in result_list[index_e+1]:
index_e += 1
except IndexError:
info = result_list[index_b:index_e+1]
if "International Conference on Pure and Applied" in info[2]:
# print(info)
info_list.append(info)
info = result_list[index_b:index_e+2]
if "International Conference on Pure and Applied" in info[2]:
# print(info)
info_list.append(info)
count = len(info_list)
for i in range(count):
try:
title = "".join(info_list[i][3:5])
if "1" in info_list[i][4]:
title = "".join(info_list[i][3:4])
author = info_list[i][4]
position = info_list[i][5]
email = info_list[i + 1][0]
info_first = [title, author, position, email]
print(info_first)
count_e = info_first[-1].count("@")
# case:1(ๅชๆไธไธชemail)
if count_e == 1:
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author = info_first[1].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
position = info_first[2].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email = info_first[-1]
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
elif 1 < count_e < 4:
count_d = len(info_first[-1].split(","))
if count_d > 1:
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author = info_first[1].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
position = info_first[2].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email_ls = info_first[-1].split(",")
for z in range(count_d):
email = email_ls[z]
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
else:
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author_ls = info_first[1].split(",")
position = info_first[2].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email_ls = info_first[-1].split("\n")
for x in range(len(author_ls)):
author = author_ls[x].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email = email_ls[x].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
elif count_e > 3:
count_d = len(info_first[-1].split("\n"))
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author_ls = info_first[1].split(",")
position = "NA"
if len(author_ls) < count_d:
author_ls = "".join(info_first[1:3]).split(",")
email_ls = info_first[-1].split("\n")
for y in range(count_d):
author = author_ls[y].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email = email_ls[y].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
else:
author = info_list[i][5]
position = info_list[i][6]
email = info_list[i + 1][0]
info_first = [title, author, position, email]
print(info_first)
count_e = info_first[-1].count("@")
# case:1(ๅชๆไธไธชemail)
if count_e == 1:
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author = info_first[1].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
position = info_first[2].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email = info_first[-1]
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
elif 1 < count_e < 4:
count_d = len(info_first[-1].split(","))
if count_d > 1:
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author = info_first[1].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
position = info_first[2].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email_ls = info_first[-1].split(",")
for z in range(count_d):
email = email_ls[z]
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
else:
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author_ls = info_first[1].split(",")
position = info_first[2].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email_ls = info_first[-1].split("\n")
for x in range(len(author_ls)):
author = author_ls[x].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email = email_ls[x].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
elif count_e > 3:
count_d = len(info_first[-1].split("\n"))
title = info_first[0].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
author_ls = info_first[1].split(",")
position = "NA"
if len(author_ls) < count_d:
author_ls = "".join(info_first[1:3]).split(",")
email_ls = info_first[-1].split("\n")
for y in range(count_d):
author = author_ls[y].strip().replace("โ", "").replace("โ", "").replace("โ", "").replace("โ", "")
email = email_ls[y]
result_info = [title, author, position, email]
if author and "@" in email:
info_list.append(result_info)
except IndexError:
continue
txt2csv(info_list)
print("ๅญๅ
ฅcsvๅฎๆ")
def txt2csv(info_list):
csv_file = r'C:\Users\Administrator\Desktop\csv_new\ICPAM 2015aa.csv'
"""ๅฐtxtๅ
ๅฎนๅๅ
ฅ่ณcsvๆไปถ"""
# ๅๅๆ ้ข
first_line = ["TITLE", "AUTHOR", "POSITION", "EMAIL"]
# "utf-8"็ผ็ ่ฟๆฏไผๆไนฑ็ ,้ไฝฟ็จ"utf_8_sig"
with open(csv_file, 'a', newline="", encoding="utf-8-sig") as csvf:
# csvf.write(codecs.BOM_UTF8)
writer = csv.writer(csvf)
# ๆทปๅ ้ฆ่กๅๅๆ ้ข
writer.writerow(first_line)
writer.writerows(info_list)
if __name__ == "__main__":
txt_dealwith_second()
| true
|
3da6db15dffa30dc962a5679e7680d7ff3a81454
|
Python
|
THODESAIPRAJWAL/candlestick_model
|
/code/model_paper.py
|
UTF-8
| 901
| 2.546875
| 3
|
[] |
no_license
|
"""
่ซๆใฎcnnใขใใซๅ็พ
https://www.arxiv-vanity.com/papers/1903.12258/
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
def create_paper_cnn(input_shape=(80, 80, 3), num_classes=3, activation="softmax"):
inputs = layers.Input(input_shape)
x = inputs
for ch in [32, 48]:
x = layers.Conv2D(ch, 3, padding="same")(x)
x = layers.ReLU()(x)
x = layers.MaxPool2D()(x)
x = layers.Dropout(0.2)(x)
for ch in [64, 96]:
x = layers.Conv2D(ch, 3, padding="same")(x)
x = layers.ReLU()(x)
x = layers.MaxPool2D()(x)
x = layers.Dropout(0.2)(x)
x = layers.Flatten()(x)
x = layers.Dense(256)(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(num_classes, activation=activation)(x)
return tf.keras.models.Model(inputs, x)
if __name__ == "__main__":
model = create_paper_cnn()
model.summary()
| true
|
f3c30f9889682cf61e395839b16264c34b0a0a4e
|
Python
|
lrondi/Python-challenge
|
/PyBank/pybank.py
|
UTF-8
| 2,096
| 3.203125
| 3
|
[] |
no_license
|
import os
import csv
month=0
date=[]
total_amount=0
total_change=[]
total_change_av=[]
total_change_av_sum=0
max_inc=0
min_inc=0
file_path=os.path.join("..","Resources","budget_data.csv")
with open(file_path,newline='') as budget_csv:
budget_reader=csv.reader(budget_csv,delimiter=',')
#remove header
budget_header=next(budget_reader)
#calculate total number of months, total amount of prof/loss and make lists of dates and prof/loss
for line in budget_reader:
month+=1
total_amount+=int(line[1])
total_change.append(int(line[1]))
date.append(line[0])
#iterate through list of profit/losses to calculate change
for i in range(1,len(total_change)):
total_change_av.append(total_change[i]-total_change[i-1])
#calculate sum of changes
for i in total_change_av:
total_change_av_sum+=i
#calculate avg of changes
total_change_av_value=round(total_change_av_sum/(month-1),2)
#calculate max and min of profit/losses
max_inc=max(total_change_av)
min_inc=min(total_change_av)
#search for dates of max and min
for i in range(len(total_change_av)):
if total_change_av[i]==max_inc:
max_date=date[i+1]
for i in range(len(total_change_av)):
if total_change_av[i]==min_inc:
min_date=date[i+1]
print("Financial Analysis")
print("--------------------------")
print(f"Total Months: {month}")
print(f"Total: ${total_amount}")
print(f"Average Change: ${total_change_av_value}")
print(f"Greatest Increase in Profits: {max_date} $({max_inc})")
print(f"Greatest Decrease in Profits: {min_date} $({min_inc})")
#create txt file with results
file=open("financial_anaylisis.txt","w")
file.write("Financial Analysis\n")
file.write("--------------------------\n")
file.write(f"Total Months: {month}\n")
file.write(f"Total: ${total_amount}\n")
file.write(f"Average Change: ${total_change_av_value}\n")
file.write(f"Greatest Increase in Profits: {max_date} $({max_inc})\n")
file.write(f"Greatest Decrease in Profits: {min_date} $({min_inc})\n")
file.close()
| true
|
e1468d56ada1b788182f3a59b40dfb623212ff12
|
Python
|
moritz-wundke/hg-incpush
|
/hgincpush/__init__.py
|
UTF-8
| 6,636
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Just a simple script that adds/commits/pushes all changes by parts.
# Used to push very large changesets in an incremental manner
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Moritz Wundke
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
import time
import subprocess
import argparse
__license__ = 'MIT'
__version__ = '1.0'
__author__ = 'Moritz Wundke'
start_time = []
def split(arr, size):
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
def time_push():
"""
Push a start time mark
"""
global start_time
start_time.append(time.time())
def time_pop():
"""
Calculate ellapsed time from the last push start mark
"""
global start_time
start = start_time.pop()
if start >= 0:
return time.time() - start
return 0
def hg_status(repo_path, is_dry_run):
proc = subprocess.Popen(
["hg", "status"],
stdout=subprocess.PIPE,
cwd=repo_path
)
stdout = proc.communicate()[0]
if proc.returncode != 0:
raise RuntimeError("Status command failed to succeed! {error}".format(error=stdout))
if is_dry_run:
print(stdout)
return stdout.split('\n')
def hg_push(repo_path, is_dry_run):
print(" - Pushing to default branch")
if not is_dry_run:
proc = subprocess.Popen(
["hg", "push", "--chunked"],
stdout=subprocess.PIPE,
cwd=repo_path
)
stdout = proc.communicate()[0]
if proc.returncode != 0:
print(stdout)
raise RuntimeError("Push command failed to succeed! {error}".format(error=stdout))
def hg_commit(repo_path, is_dry_run, msg):
print(" - Commiting")
if not is_dry_run:
proc = subprocess.Popen(
["hg", "commit", "-m", msg],
stdout=subprocess.PIPE,
cwd=repo_path
)
stdout = proc.communicate()[0]
if proc.returncode != 0:
raise RuntimeError("Commit command failed to succeed! {error}".format(error=stdout))
def hg_add(repo_path, is_dry_run, files):
# Split the process in smaller pieces
print(" - Adding {num}".format(num=len(files)))
bunches = split(files,10)
for bunch in bunches:
if not is_dry_run:
proc = subprocess.Popen(
["hg", "add",] + bunch,
stdout=subprocess.PIPE,
cwd=repo_path
)
stdout = proc.communicate()[0]
if proc.returncode != 0:
raise RuntimeError("Add command failed to succeed! {error}".format(error=stdout))
else:
print(" > {files}".format(files=bunch))
def get_buckets(repo_path, files, max_files=200, max_bucket_size=200):
buckets = {}
bucket_num = 1
file_counter = 0
file_size_counter = 0
for file_status in files:
status = file_status.split()
if len(status) == 2:
if not bucket_num in buckets:
buckets[bucket_num] = []
path = os.path.join(repo_path, status[1])
file_size = 0
if os.path.exists(path):
file_size = os.path.getsize(path) / float(1024 * 1024)
buckets[bucket_num].append(status[1])
if len(buckets[bucket_num]) >= max_files or file_size_counter >= max_bucket_size:
bucket_num += 1
file_size_counter = 0
file_size_counter += file_size
return buckets
def push_buckets(repo_path, is_dry_run, buckets, template_msg):
buckets_num = len(buckets)
for bucket in buckets:
msg = template_msg.format(bucket=bucket, total=buckets_num, files=len(buckets[bucket]))
print("> {msg}".format(msg=msg))
time_push()
commit_bucket(repo_path, is_dry_run, buckets[bucket], msg)
print(" - Completed in {secs}s".format(secs=time_pop()))
# Now push all changes
hg_push(repo_path, is_dry_run)
def commit_bucket(repo_path, is_dry_run, files, msg):
hg_add(repo_path, is_dry_run, files)
hg_commit(repo_path, is_dry_run, msg)
def do_push_buckets(repo_path, is_dry_run, max_files, max_bucket_size, template_msg):
buckets = get_buckets(repo_path, hg_status(repo_path, is_dry_run), max_files=max_files, max_bucket_size=max_bucket_size)
push_buckets(repo_path, is_dry_run, buckets, template_msg)
def main():
parser = argparse.ArgumentParser(description='Mercurial incremental push helper (%s). By %s' % (__version__, __author__))
parser.add_argument('-v', '--version', action='version', version=__version__)
parser.add_argument('-s', help='Maximum size in MB a bucket can hold', default=200, type=int)
parser.add_argument('-f', help='Number of max file a bucket can hold', default=2000000, type=int)
parser.add_argument('-p', '--path', help='Path to the mercurial clone', required=True)
parser.add_argument('-m', '--msg', help='Custom msg to be used when performing the comments. Use {bucket}, {total} and {files} for extra information.', default='Commiting bucket {bucket}/{total} with {files} files.')
parser.add_argument('-d', '--dry', help='Perform a dryrun printing into the log the content of the possible buckets', action="store_true", default=False)
args = parser.parse_args()
time_push()
do_push_buckets(args.path, args.dry, args.f, args.s, args.msg)
print("Total time: {secs}s".format(secs=time_pop()))
if __name__ == "__main__":
main()
| true
|
2d0633a5fa2973642f44bd75e9e742203f3c9f10
|
Python
|
lkjhgff11/BaekJoon
|
/2614์ ์๊ถ.py
|
UTF-8
| 54
| 3.0625
| 3
|
[] |
no_license
|
a,b = map(int,input().split())
x=(a*(b-1))+1
print(x)
| true
|
a9f5fba7fbd4a7a1f9ac46665d9d45a9a4a25094
|
Python
|
fhafb/glyphosate
|
/get_ades_by_station.py
|
UTF-8
| 850
| 2.71875
| 3
|
[] |
no_license
|
import sys
import requests
import csv
stations=set()
with open(sys.argv[1],newline='') as f:
reader=csv.reader(f,delimiter=';',quotechar='"')
for row in reader:
if len(row)>=11 and row[10] in ("35","39","63"):
stations.add(row[0])
total=len(stations)
i=0
for sta in stations:
i=i+1
print("{}/{} - {}".format(i,total,sta),file=sys.stderr)
params={'bss_id':sta,'code_param':'1506,1907','size':'20000','code_unite':"133"}
res=requests.get("http://hubeau.eaufrance.fr/api/v1/qualite_nappes/analyses.csv",params=params)
if res.status_code==requests.codes.ok:
if i==1:
print(res.text,end='')
else:
content=res.text
print(content[content.find('\n')+1:],end='')
else:
print('Erreur {} dans "{}"'.format(res.status_code,res.url),file=sys.stderr)
| true
|
23521cea2aad55d18f1af7c15263ed0513f2a50a
|
Python
|
DxW9617888/count_networks
|
/count_netmask.py
|
UTF-8
| 1,349
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python3
import sys
try:
import ipaddress
except Exception as e:
exit(-1)
def count_networks(netmask):
count = 2 << (32 - int(netmask) - 1)
print('ip address count:', count)
n = 0
while n < 255:
yield n
n += count
def hosts(network):
for x in network.hosts():
yield int(str(x).split('.')[-1])
if __name__ == '__main__':
if len(sys.argv) == 2:
nmk = sys.argv[1]
else:
print('Usage: %s <netmask(ex: 24)>' %sys.argv[0])
exit(-1)
try:
int(nmk)
except TypeError:
print('Specify netmask(%s) is TypeError!' %(nmk))
exit(-1)
exIP = '192.168.1'
num = 1
for n in count_networks(nmk):
fromIP = "%s.%s" %(exIP,n)
print('network-ec:', fromIP.split('.')[-1], end=' / ')
try:
nets = ipaddress.ip_network("%s/%d" %(fromIP,int(nmk)))
except ValueError:
print ("value error!")
exit(-1)
# if n == 0:
# print([x for x in hosts(nets)])
_min, _max = (min(hosts(nets)), max(hosts(nets)))
print('range of %d: (%s~%s)' %(num, _min, _max), end=' / ')
print('mask-ec:', str(nets.netmask).split('.')[-1], end=' / ')
print('broadcast-ec:', str(nets.broadcast_address).split('.')[-1])
num += 1
| true
|
e8e6075b933bbfd8c953e8d2ad528a72ac5e0024
|
Python
|
gen4438/vtk-python-stubs
|
/typings/vtkmodules/vtkIOExportPDF.pyi
|
UTF-8
| 21,261
| 2.609375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
This type stub file was generated by pyright.
"""
import vtkmodules.vtkIOExport as __vtkmodules_vtkIOExport
import vtkmodules.vtkRenderingContext2D as __vtkmodules_vtkRenderingContext2D
class vtkPDFContextDevice2D(__vtkmodules_vtkRenderingContext2D.vtkContextDevice2D):
"""
vtkPDFContextDevice2D - vtkContextDevice2D implementation for use
with vtkPDFExporter.
Superclass: vtkContextDevice2D
Quirks:
- Libharu does not support RGBA images. If an alpha channel is
present in any drawn images, it will be blended into an opaque
background filled with the active Brush color to produce a flat RGB
image.
"""
def ComputeJustifiedStringBounds(self, string, p_float=..., p_float=..., p_float=..., p_float=...):
"""
V.ComputeJustifiedStringBounds(string, [float, float, float,
float])
C++: void ComputeJustifiedStringBounds(const char *string,
float bounds[4]) override;
Compute the bounds of the supplied string while taking into
account the justification of the currently applied text property.
Simple rotations (0, 90, 180, 270) are also correctly taken into
account.
"""
...
def ComputeStringBounds(self, string, p_float=..., p_float=..., p_float=..., p_float=...):
"""
V.ComputeStringBounds(string, [float, float, float, float])
C++: void ComputeStringBounds(const vtkStdString &string,
float bounds[4]) override;
V.ComputeStringBounds(unicode, [float, float, float, float])
C++: void ComputeStringBounds(const vtkUnicodeString &string,
float bounds[4]) override;
Compute the bounds of the supplied string. The bounds will be
copied to the supplied bounds variable, the first two elements
are the bottom corner of the string, and the second two elements
are the width and height of the bounding box. NOTE: This function
does not take account of the text rotation or justification.
"""
...
def DrawColoredPolygon(self, *float, **kwargs):
"""
V.DrawColoredPolygon([float, ...], int, [int, ...], int)
C++: void DrawColoredPolygon(float *points, int numPoints,
unsigned char *colors=nullptr, int nc_comps=0) override;
"""
...
def DrawEllipseWedge(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5, p_float_6, p_float_7):
"""
V.DrawEllipseWedge(float, float, float, float, float, float,
float, float)
C++: void DrawEllipseWedge(float x, float y, float outRx,
float outRy, float inRx, float inRy, float startAngle,
float stopAngle) override;
Draw an elliptic wedge with center at x, y, outer radii outRx,
outRy, inner radii inRx, inRy between angles startAngle and
stopAngle (expressed in degrees).
\pre positive_outRx: outRx>=0
\pre positive_outRy: outRy>=0
\pre positive_inRx: inRx>=0
\pre positive_inRy: inRy>=0
\pre ordered_rx: inRx<=outRx
\pre ordered_ry: inRy<=outRy
"""
...
def DrawEllipticArc(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):
"""
V.DrawEllipticArc(float, float, float, float, float, float)
C++: void DrawEllipticArc(float x, float y, float rX, float rY,
float startAngle, float stopAngle) override;
Draw an elliptic arc with center at x,y with radii rX and rY
between angles startAngle and stopAngle (expressed in degrees).
\pre positive_rX: rX>=0
\pre positive_rY: rY>=0
"""
...
def DrawImage(self, p_float=..., p_float=..., *args, **kwargs):
"""
V.DrawImage([float, float], float, vtkImageData)
C++: void DrawImage(float p[2], float scale, vtkImageData *image)
override;
V.DrawImage(vtkRectf, vtkImageData)
C++: void DrawImage(const vtkRectf &pos, vtkImageData *image)
override;
Draw the supplied image at the given x, y (p[0], p[1]) (bottom
corner), scaled by scale (1.0 would match the image).
"""
...
def DrawLines(self, *float, **kwargs):
"""
V.DrawLines([float, ...], int, [int, ...], int)
C++: void DrawLines(float *f, int n,
unsigned char *colors=nullptr, int nc_comps=0) override;
Draw lines using the points - memory layout is as follows:
l1p1,l1p2,l2p1,l2p2... The lines will be colored by colors array
which has nc_comps components (defining a single color).
\sa DrawPoly()
"""
...
def DrawMarkers(self, p_int, bool, *float, **kwargs):
"""
V.DrawMarkers(int, bool, [float, ...], int, [int, ...], int)
C++: void DrawMarkers(int shape, bool highlight, float *points,
int n, unsigned char *colors=nullptr, int nc_comps=0)
override;
Draw a series of markers centered at the points supplied. The
shape argument controls the marker shape, and can be one of
- VTK_MARKER_CROSS
- VTK_MARKER_PLUS
- VTK_MARKER_SQUARE
- VTK_MARKER_CIRCLE
- VTK_MARKER_DIAMOND
\param colors is an optional array of colors.
\param nc_comps is the number of components for the color.
"""
...
def DrawMathTextString(self, *float, **kwargs):
"""
V.DrawMathTextString([float, ...], string)
C++: void DrawMathTextString(float *point,
const vtkStdString &str) override;
Draw text using MathText markup for mathematical equations. See
http://matplotlib.sourceforge.net/users/mathtext.html for more
information.
"""
...
def DrawPoints(self, *float, **kwargs):
"""
V.DrawPoints([float, ...], int, [int, ...], int)
C++: void DrawPoints(float *points, int n,
unsigned char *colors=nullptr, int nc_comps=0) override;
Draw a series of points - fastest code path due to memory layout
of the coordinates. The colors and nc_comps are optional - color
array.
"""
...
def DrawPointSprites(self, vtkImageData, *float, **kwargs):
"""
V.DrawPointSprites(vtkImageData, [float, ...], int, [int, ...],
int)
C++: void DrawPointSprites(vtkImageData *sprite, float *points,
int n, unsigned char *colors=nullptr, int nc_comps=0)
override;
Draw a series of point sprites, images centred at the points
supplied. The supplied vtkImageData is the sprite to be drawn,
only squares will be drawn and the size is set using
SetPointSize.
\param colors is an optional array of colors.
\param nc_comps is the number of components for the color.
"""
...
def DrawPoly(self, *float, **kwargs):
"""
V.DrawPoly([float, ...], int, [int, ...], int)
C++: void DrawPoly(float *points, int n,
unsigned char *colors=nullptr, int nc_comps=0) override;
Draw a poly line using the points - fastest code path due to
memory layout of the coordinates. The line will be colored by the
colors array, which must be have nc_comps components (defining a
single color).
\sa DrawLines()
"""
...
def DrawPolyData(self, p_float=..., p_float=..., *args, **kwargs):
"""
V.DrawPolyData([float, float], float, vtkPolyData,
vtkUnsignedCharArray, int)
C++: void DrawPolyData(float p[2], float scale,
vtkPolyData *polyData, vtkUnsignedCharArray *colors,
int scalarMode) override;
Draw the supplied PolyData at the given x, y (p[0], p[1]) (bottom
corner), scaled by scale (1.0 would match the actual dataset).
Only lines and polys are rendered. Only the x/y coordinates of
the polydata are used.
@param p Offset to apply to polydata.
@param scale Isotropic scale for polydata. Applied after offset.
@param polyData Draw lines and polys from this dataset.
@param colors RGBA for points or cells, depending on value of
scalarMode.
Must not be NULL.
@param scalarMode Must be either VTK_SCALAR_MODE_USE_POINT_DATA
or
VTK_SCALAR_MODE_USE_CELL_DATA.
The base implementation breaks the polydata apart and renders
each polygon individually using the device API. Subclasses should
override this method with a batch-drawing implementation if
performance is a concern.
"""
...
def DrawPolygon(self, *float, **kwargs):
"""
V.DrawPolygon([float, ...], int)
C++: void DrawPolygon(float *, int) override;
"""
...
def DrawQuad(self, *float, **kwargs):
"""
V.DrawQuad([float, ...], int)
C++: void DrawQuad(float *, int) override;
Draw a quad using the specified number of points.
"""
...
def DrawQuadStrip(self, *float, **kwargs):
"""
V.DrawQuadStrip([float, ...], int)
C++: void DrawQuadStrip(float *, int) override;
Draw a quad using the specified number of points.
"""
...
def DrawString(self, *float, **kwargs):
"""
V.DrawString([float, ...], string)
C++: void DrawString(float *point, const vtkStdString &string)
override;
V.DrawString([float, ...], unicode)
C++: void DrawString(float *point, const vtkUnicodeString &string)
override;
Draw some text to the screen.
"""
...
def EnableClipping(self, bool):
"""
V.EnableClipping(bool)
C++: void EnableClipping(bool enable) override;
Enable or disable the clipping of the scene.
"""
...
def GetMatrix(self, vtkMatrix3x3):
"""
V.GetMatrix(vtkMatrix3x3)
C++: void GetMatrix(vtkMatrix3x3 *m) override;
Set the model view matrix for the display
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Return 1 if this class is the same type of (or a subclass of) the
named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Return 1 if this class type is the same type of (or a subclass
of) the named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def MultiplyMatrix(self, vtkMatrix3x3):
"""
V.MultiplyMatrix(vtkMatrix3x3)
C++: void MultiplyMatrix(vtkMatrix3x3 *m) override;
Multiply the current model view matrix by the supplied one
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkPDFContextDevice2D
C++: vtkPDFContextDevice2D *NewInstance()
"""
...
def PopMatrix(self):
"""
V.PopMatrix()
C++: void PopMatrix() override;
Pop the current matrix off of the stack.
"""
...
def PushMatrix(self):
"""
V.PushMatrix()
C++: void PushMatrix() override;
Push the current matrix onto the stack.
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkPDFContextDevice2D
C++: static vtkPDFContextDevice2D *SafeDownCast(vtkObjectBase *o)
"""
...
def SetClipping(self, *int):
"""
V.SetClipping([int, ...])
C++: void SetClipping(int *x) override;
Supply an int array of length 4 with x1, y1, width, height
specifying clipping region for the device in pixels.
"""
...
def SetColor4(self, p_int=..., p_int=..., p_int=..., p_int=...):
"""
V.SetColor4([int, int, int, int])
C++: void SetColor4(unsigned char color[4]) override;
Set the color for the device using unsigned char of length 4,
RGBA.
"""
...
def SetHaruObjects(self, void, void_1):
"""
V.SetHaruObjects(void, void)
C++: void SetHaruObjects(void *doc, void *page)
Set the HPDF_Doc and HPDF_Page to use while exporting the scene.
The type is void* to keep the libharu opaque types from leaking
into headers. This function expects HPDF_Document* and HPDF_Page*
as the arguments.
"""
...
def SetLineType(self, p_int):
"""
V.SetLineType(int)
C++: void SetLineType(int type) override;
Set the line type type (using anonymous enum in vtkPen).
"""
...
def SetLineWidth(self, p_float):
"""
V.SetLineWidth(float)
C++: void SetLineWidth(float width) override;
Set the line width.
"""
...
def SetMatrix(self, vtkMatrix3x3):
"""
V.SetMatrix(vtkMatrix3x3)
C++: void SetMatrix(vtkMatrix3x3 *m) override;
Set the model view matrix for the display
"""
...
def SetPointSize(self, p_float):
"""
V.SetPointSize(float)
C++: void SetPointSize(float size) override;
Set the point size for glyphs/sprites.
"""
...
def SetRenderer(self, vtkRenderer):
"""
V.SetRenderer(vtkRenderer)
C++: void SetRenderer(vtkRenderer *)
"""
...
def SetTexture(self, vtkImageData, p_int):
"""
V.SetTexture(vtkImageData, int)
C++: void SetTexture(vtkImageData *image, int properties)
override;
Set the texture for the device, it is used to fill the polygons
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
class vtkPDFExporter(__vtkmodules_vtkIOExport.vtkExporter):
"""
vtkPDFExporter - Exports vtkContext2D scenes to PDF.
Superclass: vtkExporter
This exporter draws context2D scenes into a PDF file.
If ActiveRenderer is specified then it exports contents of
ActiveRenderer. Otherwise it exports contents of all renderers.
"""
def GetFileName(self):
"""
V.GetFileName() -> string
C++: virtual char *GetFileName()
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetTitle(self):
"""
V.GetTitle() -> string
C++: virtual char *GetTitle()
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Return 1 if this class is the same type of (or a subclass of) the
named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Return 1 if this class type is the same type of (or a subclass
of) the named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkPDFExporter
C++: vtkPDFExporter *NewInstance()
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkPDFExporter
C++: static vtkPDFExporter *SafeDownCast(vtkObjectBase *o)
"""
...
def SetFileName(self, string):
"""
V.SetFileName(string)
C++: virtual void SetFileName(const char *_arg)
"""
...
def SetTitle(self, string):
"""
V.SetTitle(string)
C++: virtual void SetTitle(const char *_arg)
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
__loader__ = ...
__spec__ = ...
| true
|
704e0d2accbd7565e8e7563e31a1ba721ba600ac
|
Python
|
open-mmlab/mmengine
|
/mmengine/optim/optimizer/base.py
|
UTF-8
| 4,472
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List
import torch
class BaseOptimWrapper(metaclass=ABCMeta):
def __init__(self, optimizer):
self.optimizer = optimizer
# The Following code is used to initialize `base_param_settings`.
# `base_param_settings` is used to store the parameters that are not
# updated by the optimizer.
# The `base_param_settings` used for tracking the base learning in the
# optimizer. If the optimizer has multiple parameter groups, this
# params will not be scaled by the loss factor.
if len(optimizer.param_groups) > 1:
self.base_param_settings = {
'params': torch.tensor([0.0], dtype=torch.float)
}
self.base_param_settings.update(**self.optimizer.defaults)
else:
self.base_param_settings = None # type: ignore
@abstractmethod
def update_params(self, *args, **kwargs):
"""Update parameters in :attr:`optimizer`."""
@abstractmethod
def backward(self, loss: torch.Tensor, **kwargs) -> None:
"""Perform gradient back propagation."""
@abstractmethod
def zero_grad(self, **kwargs) -> None:
"""A wrapper of ``Optimizer.zero_grad``."""
@abstractmethod
def step(self, **kwargs):
"""Call the step method of optimizer."""
def state_dict(self) -> dict:
"""A wrapper of ``Optimizer.state_dict``."""
state_dict = self.optimizer.state_dict()
if self.base_param_settings is not None:
state_dict['base_param_settings'] = self.base_param_settings
return state_dict
def load_state_dict(self, state_dict: dict) -> None:
"""A wrapper of ``Optimizer.load_state_dict``. load the state dict of
:attr:`optimizer`.
Provide unified ``load_state_dict`` interface compatible with automatic
mixed precision training. Subclass can overload this method to
implement the required logic. For example, the state dictionary of
GradScaler should be loaded when training with ``torch.cuda.amp``.
Args:
state_dict (dict): The state dictionary of :attr:`optimizer`.
"""
base_param_settings = state_dict.pop('base_param_settings', None)
if base_param_settings is not None:
self.base_param_settings = base_param_settings
# load state_dict of optimizer
self.optimizer.load_state_dict(state_dict)
@property
def param_groups(self) -> List[dict]:
"""A wrapper of ``Optimizer.param_groups``.
Make OptimizeWrapper compatible with :class:`_ParamScheduler`.
Returns:
dict: the ``param_groups`` of :attr:`optimizer`.
"""
if self.base_param_settings is not None:
return self.optimizer.param_groups + [self.base_param_settings]
else:
return self.optimizer.param_groups
@property
def defaults(self) -> dict:
"""A wrapper of ``Optimizer.defaults``.
Make OptimizeWrapper compatible with :class:`_ParamScheduler`.
Returns:
dict: the ``param_groups`` of :attr:`optimizer`.
"""
return self.optimizer.defaults
def get_lr(self):
"""Get the learning rate of the optimizer.
Provide unified interface to get learning rate of optimizer.
Returns:
Dict[str, List[float]]:
param_groups learning rate of the optimizer.
"""
res = {}
if self.base_param_settings is not None:
res['base_lr'] = [self.base_param_settings['lr']]
res['lr'] = [group['lr'] for group in self.optimizer.param_groups]
return res
def get_momentum(self) -> Dict[str, List[float]]:
"""Get the momentum of the optimizer.
Provide unified interface to get momentum of optimizer.
Returns:
Dict[str, List[float]]: Momentum of the optimizer.
"""
momentum = []
for group in self.optimizer.param_groups:
# Get momentum of SGD.
if 'momentum' in group.keys():
momentum.append(group['momentum'])
# Get momentum of Adam.
elif 'betas' in group.keys():
momentum.append(group['betas'][0])
else:
momentum.append(0)
return dict(momentum=momentum)
| true
|
268fa2d9d2d059f3d71bff57cb3387558ef31e53
|
Python
|
phousanakhan/CMPUT355_ASN4
|
/main.py
|
UTF-8
| 2,824
| 3.421875
| 3
|
[] |
no_license
|
import numpy as np
import connect4 as c4
import os
boardWidth = 7 #col
boardHeight = 6 #row
quit = False
def main():
print("Welcome to the Connect4 game!\n")
print("Type 'quit' if you want to quit\n")
print("Type 'man' if you want to see the manual\n")
print("Type 'hist' if you want to see the a brief history of the game\n")
print("Type 'strat' for strategy\n")
os.system("afplay -v 0.5 beat.mp3&")
board = np.zeros((boardHeight,boardWidth))
player_turn = 1
c4.board_strip_print(board)
c4.show_col_num()
while not quit:
#--player 1
if player_turn == 1:
player_one(board, player_turn)
else: #player2
player_two(board, player_turn)
c4.board_strip_print(board)
c4.show_col_num()
player_turn += 1
player_turn = player_turn % 2 #so that turn switch
def player_one(board, player_turn):
usr_input_col = input("Player 1: Enter a column number between 0-6: ")
while c4.validate_input(usr_input_col) == False:
c4.board_strip_print(board)
c4.show_col_num()
usr_input_col = input("Player 1: Enter a column number between 0-6: ")
usr_input_col = int(usr_input_col)
if c4.legal_move(board, usr_input_col) == True:
row = c4.get_open_row(board, usr_input_col)
board[row][usr_input_col] = 1 #dropping the piece
if c4.check_for_win(board, 1) == True:
c4.board_strip_print(board)
c4.show_col_num()
print("Player 1 Win!")
print("Player 2 Lose!")
again = input("Do you want to play again y/n ? ")
if again == "y" or again == "Y":
main()
else:
print("Quitting!")
exit(0)
else:
print("ILLEGAL MOVE!!")
def player_two(board, player_turn):
usr_input_col = input("Player 2: Enter a column number between 0-6: ")
while c4.validate_input(usr_input_col) == False:
c4.board_strip_print(board)
c4.show_col_num()
usr_input_col = input("Player 2: Enter a column number between 0-6: ")
usr_input_col = int(usr_input_col)
if c4.legal_move(board, usr_input_col) == True:
row = c4.get_open_row(board, usr_input_col)
board[row][usr_input_col] = 2 #dropping the piece
if c4.check_for_win(board, 2) == True:
c4.board_strip_print(board)
c4.show_col_num()
print("Player 2 Win!")
print("Player 1 Lose!\n")
again = input("Do you want to play again y/n ? ")
if again == "y" or again == "Y":
main()
else:
print("Quitting!")
exit(0)
else:
print("ILLEGAL MOVE!!")
if __name__ == '__main__':
main()
| true
|
ec8e11f5fdc43c5e13b92064d25d598de300a4e4
|
Python
|
LoveMuzi/statsvninfoparser
|
/svnfileinfo.py
|
UTF-8
| 1,704
| 2.796875
| 3
|
[] |
no_license
|
# coding:utf-8
class SvnFileInfo:
def __init__(self):
self._total_files = ''
self._average_file_size = ''
self._average_revision_per_file = ''
self._file_types_summary_dict_list = []
self._largest_files_detail_dict_list = []
self._files_with_most_revisions_dict_list = []
def get_total_files(self):
return self._total_files
def set_total_files(self, total_files):
self._total_files = total_files
def get_average_file_size(self):
return self._average_file_size
def set_average_file_size(self, average_file_size):
self._average_file_size = average_file_size
def get_average_revision_per_file(self):
return self._average_revision_per_file
def set_average_revision_per_file(self, average_revision_per_file):
self._average_revision_per_file = average_revision_per_file
def get_file_types_summary_dict_list(self):
return self._file_types_summary_dict_list
def set_file_types_summary_dict_list(self, file_type_summary_dict_list):
self._file_types_summary_dict_list = file_type_summary_dict_list
def get_largest_files_detail_dict_list(self):
return self._largest_files_detail_dict_list
def set_largest_files_detail_dict_list(self, largest_files_detail_dict_list):
self._largest_files_detail_dict_list = largest_files_detail_dict_list
def get_files_with_most_revisions_dict_list(self):
return self._files_with_most_revisions_dict_list
def set_files_with_most_revisions_dict_list(self, files_with_most_revisions_dict_list):
self._files_with_most_revisions_dict_list = files_with_most_revisions_dict_list
| true
|
58fc0301a8168e32cb671aa45cf9c5a16c257a78
|
Python
|
MadMrCrazy/ChocolateAgeCounter
|
/chocolate.py
|
UTF-8
| 999
| 4.25
| 4
|
[] |
no_license
|
year = input("What is the year?:")
year = int(year)
chocolate = input("pick the number of times a week that you would like to have chocolate. Cannot be 0:")
chocolate = int(chocolate)
print("Multiplying the number by 2")
print(str(chocolate) + " times 2")
print(chocolate * 2)
chocolate = chocolate * 2
print("adding 5...")
chocolate = chocolate + 5
print(chocolate)
print("Multiplying " + str(chocolate) + " by 50")
chocolate = chocolate * 50
print("Have you had your birthday this year? (Use 1 for yes, 0 for no please")
x = input(":")
if x == "1":
y = -250 + year
chocolate = chocolate + y
elif x == "0":
y = -251 + year
chocolate = chocolate + y
else:
print("error, forcefully crashing script")
print(end)
age = input("What was the year you were born?:")
print(str(chocolate) + " - " +str(age))
chocolate = chocolate - int(age)
print(chocolate)
print("Now you have a number.")
print("The first is the amount of chocolate you wanted")
print("The next, is your age.")
| true
|
05e8bf1407813aa20d0ba69cfeb6175b1620212a
|
Python
|
Barud21/ActivityMonitor
|
/tests/testsHelper.py
|
UTF-8
| 1,651
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
import datetime
import json
import os
import ApplicationObjects as Ao
import jsonFormatter as jF
def createTimestamp(timeDigitsTup):
t1 = datetime.time(timeDigitsTup[0][0], timeDigitsTup[0][1], timeDigitsTup[0][2])
t2 = datetime.time(timeDigitsTup[1][0], timeDigitsTup[1][1], timeDigitsTup[1][2])
return Ao.TimeStamp(t1, t2)
def createDetailedInstance(timeDigitsTupList, instanceName):
timestamps = []
for tdt in timeDigitsTupList:
timestamps.append(createTimestamp(tdt))
return Ao.DetailedInstance(instanceName, timestamps)
def createBasicApp(timeDigitsTupList, instanceName, appName):
detailed = createDetailedInstance(timeDigitsTupList, instanceName)
return Ao.ApplicationWithInstances(appName, [detailed])
# fileAtr here so we can build an absolute path based on the file when call this function
# so we can specifiy paths in test file, relatively to the test file itself, for a better readability
def getAbsPath(filename, fileAtr):
dirAbsPath = os.path.dirname(os.path.abspath(fileAtr))
return os.path.join(dirAbsPath, filename)
def getResultFromFileInString(fileRelativePath, fileAtr):
with open(getAbsPath(fileRelativePath, fileAtr), encoding='utf8') as result_file:
return result_file.read()
def getJsonObjectsFromFile(filePath, fileAtr):
with open(getAbsPath(filePath, fileAtr), 'r', encoding='utf8') as input_file:
return json.load(input_file, cls=jF.CustomJsonDecoder)
def dumpObjectsToJsonString(objects):
return json.dumps(objects, cls=jF.CustomJsonEncoder, ensure_ascii=False)
def removeWhitespacesFromString(s):
return ''.join(s.split())
| true
|
5779cc1cd677941c20da6c261a3ce38fa5683b01
|
Python
|
jar398/tryphy
|
/tests/test_ts_all_species.py
|
UTF-8
| 4,216
| 2.53125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
# 6. ts/all_species
# Get all species that belong to a particular Taxon.
import sys, unittest, json
sys.path.append('./')
sys.path.append('../')
import webapp
service = webapp.get_service(5004, 'ts/all_species')
class TestTsAllSpecies(webapp.WebappTestCase):
@classmethod
def get_service(self):
return service
def test_no_parameter(self):
request = service.get_request('GET', {})
x = self.start_request_tests(request)
self.assertTrue(x.status_code >= 400)
self.assertTrue(u'taxon' in x.json()[u'message'], #informative?
'no "taxon" in "%s"' % x.json()[u'message'])
def test_bad_name(self):
request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})
x = self.start_request_tests(request)
m = x.json().get(u'message')
self.assertTrue(x.status_code >= 400, '%s: %s' % (x.status_code, m))
self.assertTrue(u'No ' in m, #informative?
'%no "No" in "%s"' % x.status_code)
# TBD: maybe try a very long name?
def taxon_tester(self, name):
request = service.get_request('GET', {u'taxon': name})
x = self.start_request_tests(request)
self.assert_success(x, name)
print '%s: %s %s' % (name, len(x.json()[u'species']), x.time)
# Found this taxon lineage sequence using the 'lineage' script in
# opentreeoflife/reference-taxonomy/bin
def test_nested_sequence(self):
"""Try progressively larger taxa to see when the service breaks."""
self.taxon_tester('Apis mellifera')
self.taxon_tester('Apis')
self.taxon_tester('Apini')
self.taxon_tester('Apinae')
# Apidae at 5680 species is a struggle
self.taxon_tester('Apidae')
if False:
# Apoidea: 19566 takes 223 seconds
# Doc says "maximum taxonomic rank allowed: family" so why did it work at all?
# Doc says "depending on rank" which isn't right, it depends on
# the number of species in the taxon. TBD: note it.
self.taxon_tester('Apoidea')
# Aculeata fails after 339 seconds
self.taxon_tester('Aculeata')
self.taxon_tester('Apocrita')
self.taxon_tester('Hymenoptera')
self.taxon_tester('Endopterygota')
self.taxon_tester('Neoptera')
self.taxon_tester('Pterygota')
self.taxon_tester('Dicondylia')
self.taxon_tester('Insecta')
self.taxon_tester('Hexapoda')
self.taxon_tester('Pancrustacea')
self.taxon_tester('Mandibulata')
self.taxon_tester('Arthropoda')
self.taxon_tester('Panarthropoda')
self.taxon_tester('Ecdysozoa')
self.taxon_tester('Protostomia')
self.taxon_tester('Bilateria')
self.taxon_tester('Eumetazoa')
self.taxon_tester('Metazoa')
self.taxon_tester('Holozoa')
self.taxon_tester('Opisthokonta')
self.taxon_tester('Eukaryota')
@unittest.skip("takes too long")
def test_big_family(self):
"""The documentation suggests that you can use the service on families.
So try it on a big family (>60,000 species) to what happens.
As of 2017-11-05, this fails after crunching for 22 minutes -
returns with a non-200 status code."""
self.taxon_tester('Staphylinidae')
# Insert here: edge case tests
# Insert here: inputs out of range, leading to error or long delay
# Insert here: error-generating conditions
# (See ../README.md)
def test_example_15(self):
x = self.start_request_tests(example_15)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
def test_example_16(self):
x = self.start_request_tests(example_16)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
null=None; false=False; true=True
example_15 = service.get_request('GET', {u'taxon': u'Vulpes'})
example_16 = service.get_request('GET', {u'taxon': u'Canidae'})
if __name__ == '__main__':
webapp.main()
| true
|
5f4438087e49d3e023ec2ed400edfaa5cb206103
|
Python
|
JonathanLoscalzo/catedra-big-data
|
/spark/Entrega3/02/02.py
|
UTF-8
| 1,618
| 2.90625
| 3
|
[] |
no_license
|
from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
import sys
if len(sys.argv) < 2:
persistent = "/tmp/data/Entrega3/02/"
else:
persistent = sys.argv[1]
conf = SparkConf().setMaster("local[2]").setAppName("ContarDestinos")
sc = SparkContext(conf=conf)
sc.setLogLevel("OFF")
ssc = StreamingContext(sc, 5)
stream = ssc.socketTextStream("localhost", 7777)
ssc.checkpoint(persistent + "counts")
# rdd inicial con todos los posibles destinos.
# la usamos para el history.
initialStateRDD = sc.parallelize(
[
(place, 0)
for place in ["Zoologico", "Shopping", "Plaza", "Museo", "Cine", "Teatro"]
]
)
counts = (
stream.map(lambda line: line.split(";"))
.map(lambda x: (x[4], 1)) # (lugar, 1)
.filter(
lambda a: a[0] != "" and a[0] != "Otro"
) # filtramos los que no son lugares y Otros
.reduceByKey(lambda a, b: a + b) # Sumarizamos
)
def fUpdate(newValues, history):
if history == None:
history = 0
if newValues == None:
newValues = 0
else:
newValues = sum(newValues) # viene un arreglo con los nuevos valores
return newValues + history
history = counts.updateStateByKey(fUpdate, initialRDD=initialStateRDD)
# https://spark.apache.org/docs/latest/streaming-programming-guide.html#dataframe-and-sql-operations
# fue la manera que encontramos para mostrar los datos.
# tomamos los 3 lugares mรกs visitados
history.foreachRDD(
lambda time, rdd: print(
" {} -- {}".format(time, rdd.takeOrdered(3, key=lambda a: -a[1]))
)
)
ssc.start()
ssc.awaitTermination()
| true
|
dc6a79eb649df55c526833471cbf82c16c983feb
|
Python
|
allanlykkechristensen/random_survey_results
|
/random_survey_results.py
|
UTF-8
| 3,484
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
from faker import Faker
import numpy as np
import pandas as pd
import datetime
import argparse
import json
fake = Faker()
def init_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTION]",
description="Generate random survey dataset."
)
parser.add_argument("-v", "--version", action="version",
version=f"{parser.prog} version 1.0.0")
parser.add_argument("-c", "--config", required="true", type=str,
help="JSON file containing the configuration of the survey")
parser.add_argument(
"--header", help="Include header a the top of the file", action="store_true")
parser.add_argument("-o", "--out", type=str,
help="Name of the file to output the results. If this is not specified the output will be printed on the screen")
parser.add_argument(
"-a", "--append", help="If the output is redirected to a file, use this flag to if you want to append to the file, otherwise the file will be overwritten", action="store_true")
return parser
def random_date(start, end, entries):
'''Returns a list of random dates between the start and end dates provided.
Parameters
----------
start : datetime
Earliest possible date
end : datetime
Latest possible date
entries : int
Number of entries to generate
Returns
-------
list
Random dates between the start and end dates
'''
random_dates = list()
while 0 < entries:
random_dates.append(fake.date_time_between(
start_date=start, end_date=end))
entries -= 1
return random_dates
def generate_entries(config):
entries = config['entries']
columns = []
rows = []
for q in config['questions']:
columns.append(q['question'])
for q in config['questions']:
if q['answers']['type'] == 'int':
decoded_start = int(q['answers']['start'])
decoded_end = int(q['answers']['end'])
row_column_int = np.random.randint(
decoded_start, decoded_end + 1, size=entries)
rows.append(row_column_int)
elif q['answers']['type'] == "datetime":
decoded_start_date = datetime.datetime.strptime(
q['answers']['start'], "%Y%m%d")
decoded_end_date = datetime.datetime.strptime(
q['answers']['end'], "%Y%m%d")
row_column_datetime = random_date(
decoded_start_date, decoded_end_date, entries)
rows.append(row_column_datetime)
elif q['answers']['type'] == "choice":
choices = q['answers']['choices']
row_column_choice = np.random.choice(choices, size=entries)
rows.append(row_column_choice)
data = list(zip(*rows))
df = pd.DataFrame(data, columns=columns)
return df
def main() -> None:
parser = init_argparse()
args = parser.parse_args()
config = args.config
with open(config) as json_file:
data = json.load(json_file)
random_entries = generate_entries(data)
result = pd.concat([random_entries])
if not args.out:
print(result.to_csv(index=False))
else:
mode = 'w'
if (args.append):
mode = 'a'
result.to_csv(args.out, mode=mode, index=False, header=args.header)
if __name__ == "__main__":
main()
| true
|
7ce2e6bb940bd0d79116099b589c9bd69c398469
|
Python
|
brandonholderman/snakes-cafe
|
/snakes_cafe.py
|
UTF-8
| 5,133
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
import uuid
menu = {
'Appetizers': {
'Wings': 8.00,
'Spring Rolls': 5.00,
'Cookies': 2.00,
'Grilled Squid': 8.00,
'Crab Wonton': 6.00,
'Satay': 7.00
},
'Entrees': {
'Salmon': 15.00,
'Steak': 20.00,
'Meat Tornado': 25.00,
'A Literal Garden': 12.00,
'Pad Thai': 10.00,
'Spicy Meatballs': 12.00
},
'Desserts': {
'Ice Cream': 6.00,
'Cake': 6.00,
'Pie': 7.00,
'Mango Sicky Rice': 6.00,
'Mushroom Yogurt': 5.00,
'Popsicle': 3.00
},
'Drinks': {
'Coffee': 3.00,
'Tea': 2.00,
'Innocent Blood': 50.00,
'Champagne': 8.00,
'Martini': 11.00,
'Italian Lemondrop': 10.00
},
'Sides': {
'Bread': 2.00,
'Hot Peppers': 1.00,
'Potatoes': 3.00,
'Bacon': 5.00,
'Apples': 1.00,
'Rice': 4.00
}
}
user_order = {}
tax = .101
def welcome_message():
'''
Prints the welcome message to the user
'''
welcome = print('**************************************\n\
** Welcome to the Snakes Cafe! **\n\
** Please see our menu below. **\n\
** To quit at any time, type "quit" **\n\
**************************************')
return welcome
def print_menu():
'''
Prints the menu to the user
'''
return_value = ''
for key, value in menu.items():
print('\n{}\n----------\n' .format(key))
'''
Input from the user for menu item
'''
for item, price in value.items():
item_str = item.ljust(20)
price_str = ('$' + str(price) + '0').rjust(15)
new_str = item_str + price_str
print(new_str)
return_value += '{}: ${p:0.2f}\n' .format(item, p=price)
return return_value
def get_user_input():
'''
Input from the user for menu item
'''
return input('***********************************\n\
** What would you like to order? ''**\n\
***********************************\n->')
def exit_program():
'''
Will exit program when called
'''
exit(0)
def check_user_input():
'''
Prompts user for input and listens for input
'''
user_input = input('->')
if user_input == 'quit':
user_quit()
elif user_input == 'order':
place_order()
elif user_input.startswith('remove'):
removed_item = user_input.split(' ')[-1]
remove_item(removed_item)
elif user_input == 'menu':
print_menu()
elif user_input.title() in menu:
categories_items(user_input)
else:
add_order(user_input)
return user_input
def user_quit():
print('Order Complete')
exit_program()
def place_order():
print(print_order(user_order))
def categories_items(category):
category = category.title()
for key in menu[category]:
print(key)
def print_order(user_order):
'''
Prints order when user is finished selecting items
'''
sub_total = calculate_total()
order_tax_total = calculate_tax()
final_total = order_tax_total + sub_total
order_summary = 'Order #{}\n'.format(uuid.uuid4())
for item, quantity in user_order.items():
item = item.title()
for category in menu.values():
if item in category:
order_summary += '\n{}: {} ${}'.format(quantity, item, category[item])
order_summary += '\nSubtotal: ${}'.format(sub_total)
order_summary += '\nTax: ${t:0.2f}'.format(t = order_tax_total)
order_summary += '\nTotal: ${h:0.2f}'.format(h = final_total)
return order_summary
def calculate_total():
order_total = 0
for item, quantity in user_order.items():
item = item.title()
for category in menu.values():
if item in category:
item_price = category[item] * quantity
order_total += item_price
return order_total
def calculate_tax():
tax_total = calculate_total() * tax
return tax_total
def add_order(item):
'''
Adds items to users total order
'''
for course in menu:
item = item.title()
if item in menu[course]:
if item in user_order:
user_order[item] = user_order[item] + 1
else:
user_order[item] = 1
print('{} has been added to your order' .format(item))
print(print_order(user_order))
return item
def remove_item(item):
'''
Will remove items when called
'''
item = item.title()
if item in user_order:
user_order[item] -= 1
if user_order[item] == 0:
del user_order[item]
print('{} has been removed from your order' .format(item))
else:
print('{} not found' .format(item))
def main():
welcome_message()
print_menu()
while True:
user_input = check_user_input()
if user_input is None:
print('Not on the Menu. Try again...')
continue
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| true
|
c872adb28f24d8d62744c9bf6b8a1fe70840bcdc
|
Python
|
lf2225/Blackjack
|
/Deck.py
|
UTF-8
| 929
| 3.640625
| 4
|
[] |
no_license
|
import random
import itertools
Suits = 'cdhs'
Ranks = '23456789TJQKA'
class Deck(object):
def __init__(self):
#print 'I am entering the init routine of Deck'
self.CardShoe = tuple(''.join(card) for card in itertools.product(Ranks, Suits))
self.NumberOfCards = len(self.CardShoe)
self.shuffle()
#print 'I am exiting the init routine of Deck'
#will be a helper method to be called during gameplay over and over again
#shuffle all 52 cards, return shuffled deck (new CardShoe)
def shuffle(self):
self.ShuffledDeck = random.sample(self.CardShoe, 52)
#interaction of the deck class, namely dealing cards to the assigned players
def deal_one_card(self):
#print 'I am at the start of the DealOneCard routine'
self.OneCard = (random.sample(self.ShuffledDeck, 1))[0]
#print "Deal card", self.OneCard
self.ShuffledDeck.remove(self.OneCard)
#print "deck with removed card", self.ShuffledDeck
return self.OneCard
| true
|
0330589ed4653be6b59e00fe84bf9f7199f0cb96
|
Python
|
dosart/Graph_algorithms
|
/tests/test_kruskal.py
|
UTF-8
| 2,139
| 3.078125
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
"""Tests of Kruskal's algorithm."""
from graph_algorithms.algorithms.kruskal.kruskal import kruskal
from graph_algorithms.data_structure.graph.graph import Graph
def test_kruskal1():
graph = Graph()
graph.create_vertex_by_id('A')
graph.create_vertex_by_id('B')
graph.create_vertex_by_id('C')
graph.create_vertex_by_id('D')
graph.create_vertex_by_id('E')
graph.create_vertex_by_id('F')
graph.add_edge('A', 'B', 2)
graph.add_edge('A', 'C', 1)
graph.add_edge('B', 'A', 2)
graph.add_edge('B', 'C', 2)
graph.add_edge('B', 'D', 1)
graph.add_edge('D', 'C', 2)
graph.add_edge('D', 'E', 3)
graph.add_edge('D', 'F', 4)
graph.add_edge('C', 'D', 2)
graph.add_edge('C', 'E', 3)
graph.add_edge('C', 'A', 1)
graph.add_edge('C', 'B', 2)
graph.add_edge('E', 'D', 3)
graph.add_edge('E', 'C', 3)
graph.add_edge('E', 'F', 2)
graph.add_edge('F', 'E', 2)
graph.add_edge('F', 'D', 4)
edges = kruskal(graph)
assert len(edges) == 5
assert find_edge('A', 'B', edges) is not None
assert find_edge('A', 'C', edges) is not None
assert find_edge('B', 'D', edges) is not None
assert find_edge('D', 'E', edges) is not None
assert find_edge('E', 'F', edges) is not None
def test_kruskal2():
graph = Graph()
edges = kruskal(graph)
assert len(edges) == 0
def test_kruskal3():
graph = Graph()
graph.create_vertex_by_id('A')
graph.create_vertex_by_id('B')
graph.create_vertex_by_id('C')
graph.add_edge('A', 'C', 5)
graph.add_edge('B', 'C', 2)
graph.add_edge('C', 'A', 5)
graph.add_edge('C', 'B', 2)
edges = kruskal(graph)
assert len(edges) == 2
assert find_edge('A', 'C', edges)
assert find_edge('B', 'C', edges)
def find_edge(first_id, second_id, edges):
res_one = next((edge for edge in edges if edge.first.identifier == first_id and edge.second.identifier == second_id), None)
res_two = next((edge for edge in edges if edge.first.identifier == second_id and edge.second.identifier == first_id), None)
return res_one or res_two
| true
|
6e6561f2c85cb0cb4fb0f0359992bde5b61b8da6
|
Python
|
ELORCHI/python-bootcamp
|
/ex04/operations.py
|
UTF-8
| 1,247
| 3.828125
| 4
|
[] |
no_license
|
import sys
def elementary_operations(a, b):
summ = a + b
Difference = a - b
Product = a * b
if b == 0:
Quotient = "err"
Remainder = "err"
else:
Quotient = a / b
Remainder = a % b
return (summ, Difference, Product, Quotient, Remainder)
error = False
nb_args = len(sys.argv) - 1
arg1 = 0
arg2 = 0
tup = ()
if nb_args > 2:
print("InputError: too many arguments")
error = True
elif nb_args < 2:
print("InputError: few arguments")
error = True
elif (isinstance(sys.argv[1], int)) or (isinstance(sys.argv[2], int)):
print("InputError: only numbers")
error = True
if error:
print("Usage: python operations.py <number1> <number2> \nExample:\n\tpython operation.py 10 3")
else:
arg1 = sys.argv[1]
arg2 = sys.argv[2]
tup = elementary_operations(int(arg1), int(arg2))
print("SUM: " + str(tup[0]))
print("Difference: " + str(tup[1]))
print("Product: " + str(tup[2]))
print("Quotient: " , end="")
if tup[3] != "err":
print(str(tup[3]))
else:
print("ERROR (div by zero)")
print("Remainder: " , end="")
if tup[3] != "err":
print(str(tup[4]))
else:
print("ERROR (modulo by zero)")
| true
|
8f94bd3b14aa7657dd22e692cb374999b50bf519
|
Python
|
msjuck/DC_INSIDE_VR
|
/VR_WASABI.PY
|
UTF-8
| 3,221
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
from bs4 import BeautifulSoup
import requests
import time
def get_doc():
BASE_URL = "https://gall.dcinside.com/mgallery/board/view/"
url = 'https://gall.dcinside.com/mgallery/board/lists?id=vr_games_xuq'
# ํ๋ผ๋ฏธํฐ ์ค์
params = {'id' : 'vr_games_xuq'}
# ํค๋ ์ค์
headers = {
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding' : 'gzip, deflate, br',
'Connection' : 'keep-alive',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36',
}
resp = requests.get(url, params=params, headers=headers)
print(resp)
#print(resp.content)
try:
soup = BeautifulSoup(resp.content, 'html.parser')
contents = soup.find('tbody').find_all('tr')
#print(contents)
except:
print('Fail')
return 0
doc = []
# ํ ํ์ด์ง์ ์๋ ๋ชจ๋ ๊ฒ์๋ฌผ์ ๊ธ์ด์ค๋ ์ฝ๋
for i in contents:
print('-'*15)
# ์ ๋ชฉ ์ถ์ถ
title_tag = i.find('a')
title = title_tag.text
print("์ ๋ชฉ: ", title)
# ๊ธ์ด์ด ์ถ์ถ
writer_tag = i.find('td', class_='gall_writer ub-writer').find('span', class_='nickname')
if writer_tag is not None: # None ๊ฐ์ด ์์ผ๋ฏ๋ก ์กฐ๊ฑด๋ฌธ์ ํตํด ํํผ
writer = writer_tag.text
print("๊ธ์ด์ด: ", writer)
else:
print("๊ธ์ด์ด: ", "์์")
writer = '์์'
# ์ ๋์ด๋ ๊ณ ๋์ด ์๋ ๊ธ์ด์ด ์์ ์๋ ip ์ถ์ถ
ip_tag = i.find('td', class_='gall_writer ub-writer').find('span', class_='ip')
if ip_tag is not None: # None ๊ฐ์ด ์์ผ๋ฏ๋ก ์กฐ๊ฑด๋ฌธ์ ํตํด ํํผ
ip = ip_tag.text
print("ip: ", ip)
else:
ip = None
# ๋ ์ง ์ถ์ถ
date_tag = i.find('td', class_='gall_date')
date_dict = date_tag.attrs
if len(date_dict) is 2:
print("๋ ์ง: ", date_dict['title'])
date = date_dict['title']
else:
print("๋ ์ง: ", date_tag.text)
date = date_tag.text
pass
# ์กฐํ ์ ์ถ์ถ
views_tag = i.find('td', class_='gall_count')
views = views_tag.text
print("์กฐํ์: ", views)
# ์ถ์ฒ ์ ์ถ์ถ
recommend_tag = i.find('td', class_='gall_recommend')
recommend = recommend_tag.text
print("์ถ์ฒ์: ", recommend)
article = {'title': title, 'writer':writer, 'ip':ip, 'date':date, 'views':views, 'recommend':recommend}
doc.append(article)
for article in doc:
if article['writer'] == '์์ฌ๋น':
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
while(True):
get_doc()
time.sleep(6)
| true
|
ae821c002801d5c14276234b3603a2e8e62415e8
|
Python
|
olugboyegaonimole/machine_learning
|
/supervised learning/regression/Random Forest Regression/random_forest_regression.py
|
UTF-8
| 6,413
| 3.0625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 00:15:18 2018
@author: onimo
"""
### ### PLAN ### ###
### FIND ###
# import libraries
# load dataset
# database
# nosql
# csv
# spreadsheet
# web service
# web socket
# api
### EXPLORE ###
# summarize
# shape
# head
# describe
# groupby.size
# visualize
# univariate - box, histogram
# multivariate - scatter
### PREPARE ###
# clean
# missing
# invalid
# infinity
# duplicate
# outlier
# transform
# add features
# reduce dimensions
# aggregate features eg create aggregates to remove noise and variability
# disaggregate features eg from daily totals, segment into categories (oranges, apples, bananas) and create categorical totals (total yearly oranges, total yearly apples, total yearly bananas)
# encode features - label encode, one hot encode, dummy variable trap
# scale features
### ANALYSE ###
# CROSS VALIDATION
# extract features, extract target variable
# create train_test_split
# calculate (mean, std) and visualize (box plot) cross_val_score for a selection of relevant algorithms with a view to choosing the best algorithm to solve this problem
# using boxplot visualize cross_val_score array for each of the algorithms selected
# choose model with highest cross_val_score
# MODEL BOOSTING (define a function for this purpose)
# create gridsearchCV object
# fit to training data
# return object.best_estimator_
# REGRESSION
# create object (ONLY IF MODEL BOOSTING NOT USED ABOVE)
# fit (ONLY IF MODEL BOOSTING NOT USED ABOVE)
# call model boosting function (IF MODEL BOOSTING WAS USED ABOVE)
# predict
### REPORT ###
# test performance
# accuracy score
# confusion matrix
# classification report
# visualise results
# matplotlib
### ### IMPLEMENTATION ### ###
### FIND ###
# import libraries
import sys
import pandas as pd
import numpy
import sklearn
import scipy
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# load dataset
dataset = pd.read_csv('Position_Salaries.csv')
### EXPLORE ###
# summarize
print(dataset.shape)
print(dataset.head(10))
print(dataset.describe())
print(dataset.groupby('Level').size())
# visualize
dataset.plot(kind='box', subplots='True', layout=(2,2), sharex = False, sharey = False)
plt.show()
dataset.hist()
plt.show()
scatter_matrix(dataset)
plt.show()
### PREPARE ###
# clean
# missing
# invalid
# infinity
# duplicate
# outlier
# transform
# add features
# reduce dimensions
# aggregate features eg create aggregates to remove noise and variability
# disaggregate features eg from daily totals, segment into categories (oranges, apples, bananas) and create categorical totals (total yearly oranges, total yearly apples, total yearly bananas)
# encode features - label encode, one hot encode, dummy variable trap
# scale features
### ANALYSE ###
# CROSS VALIDATION
# extract features, extract target variable
X = dataset.iloc[:, 1:2 ].values
y = dataset.iloc[:, 2 ].values
""" OR
array = dataset.values
X = array[, ]
y = array[, ]
"""
# create train_test_split
from sklearn.model_selection import train_test_split
validation_size = 0.2
seed = 0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = validation_size, random_state = seed)
""" # calculate cross_val_score (mean, std) for a selection of algorithms
from sklearn import model_selection
estimators = []
estimators.append(("linear regression", LinearRegression()))
estimators.append((,))
cv_score_arrays = []
names = []
mean_cv_scores = []
seed = 7
scoring = 'accuracy'
best_estimator = ""
counter = 0
for name, estimator in estimators:
cross_validator = model_selection.KFold(n_splits = 10, random_state = 7)
cv_score_array = model_selection.cross_val_score(estimator, X_train, y_train, cv = cross_validator, scoring = 'accuracy')
names.append(name)
cv_score_arrays.append(cv_score_array)
mean_cv_scores.append(cv_score_array.mean())
msg = "%s: %f (%f)" % (name, cv_score_array.mean(), cv_score_array.std())
if cv_score_array.mean() > counter:
best_estimator = name
counter = cv_score_array.mean()
print(msg)"""
""" # using boxplot visualize cross_val_score array for each of the algorithms selected
figure1 = plt.figure()
figure1.suptitle()
ax = figure1.add_subplot(111)
plt.boxplot(cv_score_arrays)
ax.set_xticklabels(names)
plt.show()"""
""" # choose model with highest cross_val_score
print("choice of algorithm is {} with a mean cv_score of {}".format(best_estimator, max(mean_cv_scores)))
"""
# MODEL BOOSTING (define a function for this purpose)
# create gridsearchCV object
# fit to training data
# return object.best_estimator_
# REGRESSION
# create object (ONLY IF MODEL BOOSTING NOT USED ABOVE)
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 300, random_state =0)
# fit (ONLY IF MODEL BOOSTING NOT USED ABOVE)
regressor.fit(X_train, y_train)
# call model boosting function (IF MODEL BOOSTING WAS USED ABOVE)
# predict
y_predicted = regressor.predict (X_test)
### REPORT ###
# test performance
print(accuracy_score(y_test, y_predicted))
print(confusion_matrix(y_test, y_predicted))
print(classification_report(y_test, y_predicted))
# visualise results
# plt.scatter()
# plt.plot()
| true
|
b8bc713c5f760995eef235140af2fb0b72c2d4d4
|
Python
|
AndresBena19/rolly_interpreter
|
/XETLast/ast.py
|
UTF-8
| 8,045
| 2.6875
| 3
|
[] |
no_license
|
from __future__ import division
from datetime import datetime
from XETLlexer.tokens import DATE_FORMATS_VALUES
from uuid import uuid4
class Equality:
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
class Statement(Equality):
pass
class Aexp(Equality):
pass
class Bexp(Equality):
pass
class Sexp(Equality):
pass
class AssignStatement(Statement):
def __init__(self, name, aexp):
self.name = name
self.aexp = aexp
self.key = uuid4()
def __repr__(self):
return 'AssignStatement(%s, %s)' % (self.name, self.aexp)
def eval(self, env):
value = self.aexp.eval(env)
env[self.name] = value
class CompoundStatement(Statement):
def __init__(self, first, second):
self.first = first
self.second = second
self.key = uuid4()
def __repr__(self):
return 'CompoundStatement(%s, %s)' % (self.first, self.second)
def eval(self, env):
self.first.eval(env)
self.second.eval(env)
class IfStatement(Statement):
def __init__(self, condition, true_stmt, false_stmt):
self.condition = condition
self.true_stmt = true_stmt
self.false_stmt = false_stmt
self.key = uuid4()
def __repr__(self):
return 'IfStatement({}, {}, {})'.format(self.condition, self.true_stmt, self.false_stmt)
def eval(self, env):
condition_value = self.condition.eval(env)
if condition_value:
self.true_stmt.eval(env)
else:
if self.false_stmt:
self.false_stmt.eval(env)
class IntAexp(Aexp):
def __init__(self, value):
self.value = value
self.key = uuid4()
def __repr__(self):
return 'IntAexp({})'.format(self.value)
def eval(self, env):
return self.value
class DateAexp(Aexp):
def __init__(self, value, format=None):
self.value = value
self.format = format
self.key = uuid4()
def __repr__(self):
return 'DateAexp({})'.format(self.value)
def eval(self, env):
if self.format in env:
self.format = env[self.format]
if self.value in env:
self.value = env[self.i]
return datetime.strptime(self.value, DATE_FORMATS_VALUES.get(self.format))
class FloatAexp():
def __init__(self, value):
self.value = value
self.key = uuid4()
def __repr__(self):
return 'FloatAexp({})'.format(self.value)
def eval(self, env):
return self.value
class BoolAexp():
def __init__(self, value):
self.value = value
self.key = uuid4()
def __repr__(self):
return 'BoolAexp({})'.format(self.value)
def eval(self, env):
return self.value
class StringAexp():
def __init__(self, value):
self.value = value
self.key = uuid4()
def __repr__(self):
return 'StringAexp({})'.format(self.value)
def eval(self, env):
return self.value
class VarAexp(Aexp):
def __init__(self, value):
self.value = value
self.key = uuid4()
def __repr__(self):
return 'VarAexp({})'.format(self.value)
def eval(self, env):
if self.value in env:
return env[self.value]
else:
return 0
class BinopAexp(Aexp):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
self.key = uuid4()
def __repr__(self):
return 'BinopAexp({}, {}, {})'.format(self.op, self.left, self.right)
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
if self.op == '+':
value = left_value + right_value
elif self.op == '-':
value = left_value - right_value
elif self.op == '*':
value = left_value * right_value
elif self.op == '/':
value = left_value / right_value
else:
raise RuntimeError('unknown operator: ' + self.op)
return value
class RelopBexp(Bexp):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def __repr__(self):
return 'RelopBexp({}, {}, {})'.format(self.op, self.left, self.right)
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
if self.op == '<':
value = left_value < right_value
elif self.op == '<=':
value = left_value <= right_value
elif self.op == '>':
value = left_value > right_value
elif self.op == '>=':
value = left_value >= right_value
elif self.op == '=':
value = left_value == right_value
elif self.op == '!=':
value = left_value != right_value
else:
raise RuntimeError('unknown operator: ' + self.op)
return value
class AndBexp(Bexp):
def __init__(self, left, right):
self.left = left
self.right = right
self.key = uuid4()
def __repr__(self):
return 'AndBexp({}, {})'.format(self.left, self.right)
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
return left_value and right_value
class OrBexp(Bexp):
def __init__(self, left, right):
self.left = left
self.right = right
self.key = uuid4()
def __repr__(self):
return 'OrBexp({}, {})'.format(self.left, self.right)
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
return left_value or right_value
class NotBexp(Bexp):
def __init__(self, exp):
self.exp = exp
self.key = uuid4()
def __repr__(self):
return 'NotBexp({})'.format(self.exp)
def eval(self, env, memorized=None):
value = self.exp.eval(env, memorized)
if isinstance(value, ErrorExpr):
return value
else:
return not value
class SliceExpr(Sexp):
def __init__(self, value, start, end):
self.value = value
self.start = start
self.end = end
self.key = uuid4()
def __repr__(self):
return 'SlicedSexp({})'.format(self.value)
def eval(self, env, memorized=None):
pass
class ConcatExpr(Sexp):
def __init__(self, string_1, string_2, string_3):
self.string_1 = string_1
self.string_2 = string_2
self.string_3 = string_3
self.key = uuid4()
def __repr__(self):
return 'ConcatSexp({})'.format(self.string_1)
def eval(self, env, memorized=None):
pass
def transform_number(self, value):
try:
value_decimal = float(value)
if value_decimal.is_integer():
return int(value_decimal)
else:
return str(value_decimal)
except Exception as e:
return value
class SplitExtractExpr(Sexp):
def __init__(self, data_text, simbol, segment):
self.data_text = data_text
self.simbol = simbol
self.segment = segment
self.key = uuid4()
def __repr__(self):
return 'SplitExtractExpr({})'.format(self.data_text)
def eval(self, env, memorized=None):
pass
class LenExpr(Sexp):
def __init__(self, value):
self.value = value
self.key = uuid4()
def __repr__(self):
return 'LenSexp({})'.format(self.value)
def eval(self, env, memorized=None):
pass
class ErrorExpr(Sexp):
def __init__(self, value):
self.value = value
self.type = "ERROR"
self.key = uuid4()
def __repr__(self):
return 'ErrorSexp({})'.format(self.value)
def eval(self, env, memorized=None):
pass
| true
|
3f430e64e1c86f07c721293e3d3f46fb8a0c36d0
|
Python
|
shuaiweixiaozi/sklearn_examples_experiment
|
/data_clear/nan_value_clear/nan_value_clear.py
|
UTF-8
| 1,175
| 3.6875
| 4
|
[] |
no_license
|
from io import StringIO
import pandas as pd
csv_data = '''A,B,C,D
1.0,2.0,3.0,4.0
5.0,6.0,,8.0
0.0,11.0,12.0,'''
csv_data = pd.read_csv(StringIO(csv_data))
# ไฝฟ็จisnullๆนๆณ่ฟๅไธไธชๅผไธบๅธๅฐ็ฑปๅ็DataFrame๏ผๅคๆญๆฏไธชๅ
็ด ๆฏๅฆ็ผบๅคฑ๏ผๅฆๆ็ผบๅคฑไธบTrue
# ็ถๅไฝฟ็จsumๆนๆณ๏ผๅพๅฐDataFrameไธญๆฏไธๅ็็ผบๅคฑๅผไธชๆฐ
print(csv_data.isnull().sum())
# ๅป้คๅซๆnanๅ
็ด ็่ก่ฎฐๅฝ
print(csv_data.dropna())
# ๅป้คๅซๆnanๅ
็ด ็ๅ่ฎฐๅฝ
print(csv_data.dropna(axis=1))
# ๅชๅปๆ้ฃไบๆๆๅผไธบnan็่ก
print(csv_data.dropna(how='all'))
# ๅปๆ้ฃไบ้็ผบๅคฑๅผๅฐไบ4ไธช็่ก
print(csv_data.dropna(thresh=4))
# ๅปๆ้ฃไบๅจ็นๅฎๅๅบ็ฐnan็่ก
print(csv_data.dropna(subset=['C']))
# ไฝฟ็จๅๅผๆฟไปฃ็ผบๅคฑๅผ
from sklearn.preprocessing import Imputer
# axis=1: ่ฎก็ฎๆฏไธชๆ ทๆฌ็ๆๆ็นๅพ็ๅนณๅๅผใ
# strategy๏ผๅๅผๅ
ๆฌmedianใmost_frequent. most_frequentๅฏนไบๅค็ๅ็ฑปๆฐๆฎ็ฑปๅ็็ผบๅคฑๅผๅพๆ็จใ
imr = Imputer(missing_values='NaN', strategy='mean', axis=0)
imr = imr.fit(csv_data)
imputed_data = imr.transform(csv_data)
print(imputed_data)
| true
|
28b7d4331021833857dabd53d23a91ae0f0bc3db
|
Python
|
sunnyyeti/Leetcode-solutions
|
/1320 Minimum Distance to Type a Word Using Two Fingers.py
|
UTF-8
| 2,833
| 3.984375
| 4
|
[] |
no_license
|
# You have a keyboard layout as shown above in the XY plane, where each English uppercase letter is located at some coordinate, for example, the letter A is located at coordinate (0,0), the letter B is located at coordinate (0,1), the letter P is located at coordinate (2,3) and the letter Z is located at coordinate (4,1).
# Given the string word, return the minimum total distance to type such string using only two fingers. The distance between coordinates (x1,y1) and (x2,y2) is |x1 - x2| + |y1 - y2|.
# Note that the initial positions of your two fingers are considered free so don't count towards your total distance, also your two fingers do not have to start at the first letter or the first two letters.
# Example 1:
# Input: word = "CAKE"
# Output: 3
# Explanation:
# Using two fingers, one optimal way to type "CAKE" is:
# Finger 1 on letter 'C' -> cost = 0
# Finger 1 on letter 'A' -> cost = Distance from letter 'C' to letter 'A' = 2
# Finger 2 on letter 'K' -> cost = 0
# Finger 2 on letter 'E' -> cost = Distance from letter 'K' to letter 'E' = 1
# Total distance = 3
# Example 2:
# Input: word = "HAPPY"
# Output: 6
# Explanation:
# Using two fingers, one optimal way to type "HAPPY" is:
# Finger 1 on letter 'H' -> cost = 0
# Finger 1 on letter 'A' -> cost = Distance from letter 'H' to letter 'A' = 2
# Finger 2 on letter 'P' -> cost = 0
# Finger 2 on letter 'P' -> cost = Distance from letter 'P' to letter 'P' = 0
# Finger 1 on letter 'Y' -> cost = Distance from letter 'A' to letter 'Y' = 4
# Total distance = 6
# Example 3:
# Input: word = "NEW"
# Output: 3
# Example 4:
# Input: word = "YEAR"
# Output: 7
# Constraints:
# 2 <= word.length <= 300
# Each word[i] is an English uppercase letter.
class Solution:
def minimumDistance(self, word: str) -> int:
self.cache = {}
def minDisHelper(letter1,letter2,start_ind):
if start_ind == len(word):
return 0
if letter1 > letter2:
letter1,letter2 = letter2,letter1
if (letter1,letter2,start_ind) in self.cache:
return self.cache[(letter1,letter2,start_ind)]
r1,c1 = divmod(ord(letter1)-ord('A'),6)
r2,c2 = divmod(ord(letter2)-ord('A'),6)
target_letter = word[start_ind]
rt,ct = divmod(ord(target_letter)-ord('A'),6)
dis1 = abs(r1-rt)+abs(c1-ct)+minDisHelper(target_letter,letter2,start_ind+1)
dis2 = abs(r2-rt)+abs(c2-ct)+minDisHelper(target_letter,letter1,start_ind+1)
min_dis = min(dis1,dis2)
self.cache[(letter1,letter2,start_ind)] = min_dis
return min_dis
ans = min(minDisHelper(word[0],char,0) for char in set(word))
return ans
| true
|
51002c6c090d96eb31eb3e1ec55e6728557c8918
|
Python
|
kazuhumikobayashi/tp-paperwork
|
/application/domain/model/immutables/status.py
|
UTF-8
| 1,263
| 2.90625
| 3
|
[] |
no_license
|
from enum import Enum
class Status(Enum):
start = 1
placed = 2
received = 3
done = 4
failure = 99
@property
def name(self):
if self._value_ == self.start.value:
return '01:ๅฅ็ด้ๅง'
elif self._value_ == self.placed.value:
return '02:็บๆณจๅฎไบ'
elif self._value_ == self.received.value:
return '03:ๅๆณจๅฎไบ'
elif self._value_ == self.done.value:
return '04:ๅฅ็ดๅฎไบ'
else:
return '99:ๅคฑๆณจ'
@staticmethod
def get_status_for_select():
ret = [('', '')]
type_list = Status.get_status_for_multi_select()
ret.extend(type_list)
return ret
@staticmethod
def get_status_for_multi_select():
return [(str(status.value), status.name) for status in Status]
@staticmethod
def parse(value):
if isinstance(value, str):
try:
value = int(value)
except ValueError:
pass
for status in Status:
if status.value == value:
return status
return None
def is_done(self):
return self == Status.done
def __str__(self):
return str(self._value_)
| true
|
29d36678f5df361bfa4dca642c81d88a0f91a8b7
|
Python
|
shaunrong/image-fun
|
/colorHistograms/grayScale.py
|
UTF-8
| 491
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import cv2
import matplotlib.pyplot as plt
__author__ = 'Shaun Rong'
__version__ = '0.1'
__maintainer__ = 'Shaun Rong'
__email__ = 'rongzq08@gmail.com'
image = cv2.imread('grant.jpg')
cv2.imshow('image', image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
print hist
plt.figure()
plt.title('Grayscale Histogram')
plt.xlabel('Bins')
plt.ylabel('# of Pixels')
plt.plot(hist)
plt.xlim([0, 256])
plt.show()
| true
|
354c8b70812e06e8d16f31e6c2add4b4ecaefcff
|
Python
|
maluethi/laser_plot
|
/plot_test_template.py
|
UTF-8
| 294
| 2.9375
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('./mythesis.mplstyle')
x = np.linspace(0,2*np.pi, 100)
y = np.sin(x)
fig, ax = plt.subplots(2, 1)
ax[0].plot(x, np.sin(x))
ax[1].plot(x, np.cos(x))
ax[1].set_xlabel('Phase')
for a in ax:
a.set_ylabel('Ampli')
plt.show()
| true
|
e2e65a94b402a76356404975a85245e69f462356
|
Python
|
mrliuzhao/OpenCVNotebook-Python
|
/22TemplateMatch.py
|
UTF-8
| 1,590
| 2.9375
| 3
|
[] |
no_license
|
import cv2
import numpy as np
img = cv2.imread(r".\resources\dog1.jpg", cv2.IMREAD_COLOR)
template = cv2.imread(r".\resources\dog1Face.png", cv2.IMREAD_COLOR)
h, w = template.shape[:2] # rows->h, cols->w
# ๅจๅพ็ไธ่ฟ่กๆจกๆฟๅน้
ใๅน้
่ฟ็จ็ฑปไผผValidๆจกๅผ็ๅท็งฏ๏ผๅณๅฐๆจกๆฟไฝไธบๆ ธๅจๅพ็ไธ่ฟ่กๆปๅจ๏ผไฝฟ็จไธๅๆนๆณ่ฎก็ฎๆปๅจ็ชๅฃไธๅๅพไธๆจกๆฟ็็ธไผผๅบฆ๏ผๅ
ทไฝ่ฎก็ฎๆนๆณๆๅค็ง๏ผ
# TM_SQDIFFๆนๆณ๏ผๅๅพไธๆจกๆฟๆฏไธชๅ็ด ็ๅนณๆนๅทฎไนๅ๏ผ่ฟๅ็ๅผ่ถๅฐๅน้
็จๅบฆ่ถๅฅฝ๏ผ
# TM_CCORRๆนๆณ๏ผๅๅพไธๆจกๆฟๆฏไธชๅ็ด ็ไน็งฏไนๅ๏ผ่ฟๅๅผ่ถๅคง่กจๆๅน้
็จๅบฆ่ถๅฅฝ๏ผ
# TM_CCOEFFๆนๆณ๏ผๅๅพไธๆจกๆฟไน้ด็็ธๅ
ณ็ณปๆฐ๏ผ่ฟๅๅผๅจ-1,1ไน้ด๏ผ่ถๅคงๅ่ฏฅๅบๅ่ถ็ธไผผ
# ไธ็งๆนๆณ้ฝๆๅฏนๅบ็ๅฝไธๅๆนๆณ๏ผๅณๅฐ็ปๆๅฝไธๅ่ณ[0,1]ๅบ้ด
# ็ฑไบ็ฑปไผผValidๆจกๅผ็ๅท็งฏ๏ผๅ ๆญคๅพ็ๅคงๅฐไธบ(W,H)๏ผๆจกๆฟๅคงๅฐไธบ(w,h)๏ผ่ฟๅ็็ฉ้ตๅคงๅฐไธบ(W-w+1, H-h+1)
res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF)
# ้่ฟminMaxLocๅฏปๆพๅน้
็ปๆ็ฉ้ตไธญๆๅคงๅผๅๆๅฐๅผ๏ผไปฅๅๅ
ถๅฏนๅบ็ไฝ็ฝฎ
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# ็ปๆ็ฉ้ต็ๆๅคงๅผไฝ็ฝฎๅจๅฏนๅบๅๅพ็ๅทฆไธ่ง
left_top = max_loc
right_bottom = (left_top[0] + w, left_top[1] + h)
cv2.rectangle(img, left_top, right_bottom, color=(0, 255, 255), thickness=2)
tempImg = np.zeros(img.shape, np.uint8)
tempImg[:template.shape[0], :template.shape[1], :] = template
cv2.imshow('Image Template Match', cv2.hconcat((img, tempImg)))
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
9dfb635cf74278a19764867bdb80c8a9a3de178a
|
Python
|
decebel/dataAtom_alpha
|
/bin/plug/py/sources/weby/WikipediaDataCommand.py
|
UTF-8
| 2,608
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import os, sys
basePlugPath = os.path.join("..", "..")
sys.path.insert(0, os.path.join(basePlugPath, "api"))
sys.path.insert(0, os.path.join(basePlugPath, "external"))
# TODO - know difference between module import vs package import?
import drawingboard
from pattern.web import Wikipedia
import pprint
pp = pprint.PrettyPrinter(indent=4)
print sys.modules["pattern.web"]
#print sys.modules["DataCommand"]
#pp.pprint(sys.modules)
class WikipediaDataCommand(drawingboard.DataCommand):
def __init__(self, **kwargs):
drawingboard.DataCommand.__init__(self, **kwargs)
print "init called "
#self.drawingboard.DataCommand
self.args = {}
def load_resources(self, **kwargs):
"""sets initial status of loading icon. then loads the icon. then sets various other things and
as it does this, it will keep calling status message.
"""
print "loading "
#self.set_icon("GUI/icons/blogger.png")
# trying to figure out the icon to use based on display name
self.set_display_name("blogger")
self.set_initialized(True)
#def start(self, **kwargs):
def start(self, **kwargs):
"""Configures the command.
- sets the display name of the command
- sets initial status string
- sets a default icon - NO. Default Icon setup should happen well before this stage. Maybe a load api.
- sets is_initialized to return true, once all is well. TODO: Should we check for a net connection?
Note: all these arguments can also be set by callin set_params with key=value pairs.
is_initialized will return true when all the required argum(ents are ready
"""
#self.set_display_name(self, kwargs.get("name", "wikipedia"))
self.args["engine"] = Wikipedia(language="en")
def set_params(self, **kwargs):
pass
def get_params(self, **kwargs):
pass
#def submit_command(self, port, **commandArgs):
def execute(self, **commandArgs):
searchString = commandArgs.get("search", "life of pi") #from:decebel (from:username is also supported)
print("searching for {0}: ").format(searchString)
timeout = commandArgs.get("timeout", 25)
cached = commandArgs.get("cached", False)
engine = self.args["engine"]
return "skip skip"
article = engine.search(searchString, cached=cached, timeout=timeout)
print article.title
for s in article.sections:
print s.title.upper()
print
print s.content
print
return article.title
def main():
wp = WikipediaDataCommand(dummy="dummy")
wp.start(en="english")
res = wp.execute(search="Like of Pi")
#for k in res:
# print "key={0}".format(k)
pp.pprint(res)
if __name__ == '__main__':
main()
| true
|
b7912fd6a64924a429a419fda080a58c92ce5d00
|
Python
|
wooloba/LeetCode861Challenge
|
/650. 2 Keys Keyboard.py
|
UTF-8
| 685
| 3.328125
| 3
|
[] |
no_license
|
####################
# Yaozhi Lu #
# Aug 19 2018 #
####################
# Origin: https://leetcode.com/problems/2-keys-keyboard/description/
class Solution(object):
def minSteps(self, n):
"""
:type n: int
:rtype: int
"""
#1. DP
if n == 0:
return n
mem = [i+1 for i in range(n)]
mem[0] = 0
for i in range(1,len(mem)):
for j in range(i-1,1,-1):
if (i+1)%j == 0:
mem[i]= mem[j-1] + (i+1)//j
break
return mem[n-1]
def main():
so = Solution()
print so.minSteps(10)
if __name__ == '__main__':
main()
| true
|
6d1ff823af557831a46afed102666c564448f626
|
Python
|
Hubert-Guzowski/RealTimePoseEstimation
|
/src/mesh.py
|
UTF-8
| 1,153
| 3
| 3
|
[] |
no_license
|
from csv_reader import CsvReader
import numpy as np
class Triangle:
def __init__(self, V0: np.ndarray, V1: np.ndarray, V2: np.ndarray):
self.V0 = V0
self.V1 = V1
self.V2 = V2
class Ray:
def __init__(self, P0: np.ndarray, P1: np.ndarray):
self.P0 = P0
self.P1 = P1
class Mesh:
def __init__(self, num_vertex=0, num_triangles=0, list_vertex=None, list_triangles=None):
if list_triangles is None:
list_triangles = []
if list_vertex is None:
list_vertex = []
self.num_vertex = num_vertex
self.num_triangles = num_triangles
self.list_vertex = list_vertex
self.list_triangles = list_triangles
def load(self, path):
csv_reader = CsvReader(path)
self.list_vertex, self.list_triangles = csv_reader.read_ply()
self.list_triangles = np.array(self.list_triangles)
self.list_vertex = np.array(self.list_vertex)
self.num_vertex = len(self.list_vertex)
self.num_triangles = len(self.list_triangles)
# M = Mesh()
# M.load('test.ply')
# print(M.list_vertex)
# print(M.list_triangles)
| true
|
f1f3ee8f560fe8cf1a419c6742335dea133ca1c2
|
Python
|
MitsurugiMeiya/Leetcoding
|
/้ฎ้ข/bishi.py
|
UTF-8
| 236
| 2.921875
| 3
|
[] |
no_license
|
import sys
if __name__ == "__main__":
# ่ฏปๅ็ฌฌไธ่ก็n
data =[]
while True:
line = sys.stdin.readline().strip()
if not line:
break
data.append(line)
| true
|
c81b3c09cedfcda0476684b9fe1c19de3cd85282
|
Python
|
Gedanke/graduationProject
|
/data/dealData/car.py
|
UTF-8
| 695
| 2.796875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from core.dealData import *
"""
ๅฐ original_path ่ทฏๅพ็txtๆไปถ๏ผไปฅ separator ไธบๅๅฒ็ฌฆ๏ผไปฅ attribute_name ไธบๅๅ(ๅซๆ ็ญพ)
ไฝฟ็จ TransformData ็ฑป๏ผๅพๅฐไธtxtๆไปถๅไธ่ทฏๅพไธ็csvๆไปถ
"""
original_path = "../originalDataSet/car/car.txt"
separator = " "
attribute_name = [
"buying", "maint", "doors", "persons", "lug_boot", "safety", "Label"
]
def fun1():
"""
ไฝฟ็จ TransformData ็ฑป๏ผ่ฐ็จไธๆฌกๅณๅฏ
:return:
"""
t = TransformData(original_path, separator, attribute_name)
'''ไฝฟ็จ mine_deal() ๆ่
standard_data() ๆนๆณ้ฝๅฏ'''
t.mine_deal()
if __name__ == "__main__":
''''''
# fun1()
| true
|
be022b2e43b7fc0dfbf1ac314df17a06d877071b
|
Python
|
manuelpepe/FacebookWorkPoster
|
/FacebookWorkPoster_en.py
|
UTF-8
| 4,139
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
import os
import time
import fb
# You can get a token on https://developers.facebook.com/tools/explorer
TOKEN = 'YourToken'
def is_valid_line(line):
if not line.startswith('//') and not line.startswith(':'):
return True
return False
def read_projects():
""" Open the project files and return an list with
all the projects """
with open('projects.txt', 'r') as f:
return [line.split(':')[0] for line in f if is_valid_line(line) and line != '']
def print_projects(projects):
""" Print all the elements in the projects list """
print()
for index, project in enumerate(projects):
print("{0} - {1}".format(index, project))
print()
def save_progress(project_name, time):
""" Adds the time the user have worked on the project
and returns the values needed for sharing with facebook """
with open('projects.txt', 'r+') as f:
content = f.readlines()
for index, line in enumerate(content):
if is_valid_line(line) and project_name in content:
total_hours = int(content[i].split(':')[1].strip(' ')) + time
content[i] = "{0}: {1}".format(project_name, total_hours)
line = i
f.seek(0)
f.write('\n'.join(content))
print("Project '{0}' saved in line {1}. Total hours: {2}".format(project_name, line, total_hours))
return {'name': project_name,
'total': total_hours}
def share_facebook(data):
""" Share the post on facebook """
msg = 'I\'ve been working in the {1} project for the last {0} hours.\n I worked {2} hours in this project!'.format(data['hours'], data['name'], data['total'])
facebook = fb.graph.api(TOKEN)
facebook.publish(cat = 'feed', id = 'me', message = msg)
print('\n %' % msg)
print(' Shared! \n')
def main():
exit = False
print("Welcome to Facebook Simple Work Poster")
print("\nYour projects are:")
projects = read_projects()
while not exit:
print_projects(projects)
project_selection = input('In with project are you going to work? (number): ')
project_name = projects[int(project_selection)]
user_input = input('Do you want to work on "{0}" ? (Y/n): '.format(project_name)).strip().lower()
if user_input == 'y' or user_input == 'yes':
print("The counter has started! Get to work now!")
start_time = time.time()
while not exit:
user_input = input('When you want to stop just write stop (don\'t worry, i won\'t count that time): ').strip().lower()
if user_input == 'stop':
exit = True
elapsed = time.gmtime(time.time() - start_time)
elapsed_hours = elapsed.tm_hour
elapsed_minutes = elapsed.tm_min
if elapsed_minutes >= 30:
elapsed_hours += 1
print("You have been working in this project {0} hours.".format(elapsed_hours))
user_input = input('Do you want to save your progress? (Y/n): ').strip().lower()
if user_input == 'y' or user_input == 'yes':
facebook_data = save_progress(project_name, elapsed_hours)
user_input = input('Want to share the progress on facebook? (Y/n): ').strip().lower()
if user_input == 'y' or user_input == 'yes':
facebook_data['hours'] = elapsed_hours
share_facebook(facebook_data)
exit = True
elif user_input == 'n' or user_input == 'no':
print('Ok!')
exit = True
else:
print('Command not valid.')
else:
exit = False
user_input = input('Do you want to select another project? (Y/n): ').strip().lower()
exit = (user_input == 'n' or user_input == 'no')
print("Have a good day!")
if __name__ == '__main__':
main()
| true
|
8b82a39aa2900f064ec79da0f17fc9d63ef929bb
|
Python
|
wlgn123/pcap_parser
|
/python/pcap_parser.py
|
UTF-8
| 22,862
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# os ํจํค์ง
import os
# ์์คํ
ํจํค์ง
import sys
# ํ์ด์ฌ ์๊ท๋จผํธ๋ฅผ ์ํด์ฌ์ฉ
import argparse
# ์์ผ ํต์ ๋ชจ๋ ๋ถ๋ฌ์ค๊ธฐ
from socket import AF_INET, socket, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
# ํ
์คํธ ํฌ๋งท ๊ด๋ จ import
from TextFormat import bcolors, MENU_PRINT_FORMAT, TITLE_PRINT_FORMAT
# Pcap ํด๋์ค
from Pcap import Pcap, PcapPacketHeader, PcapPacketData
import traceback
from tqdm import tqdm
# In[245]:
# ์์ผ ์๋ฒ ํด๋์ค(๋ฆฌ์๋ฒ)
class SocketServer:
sock = None
ip = None
connected = False
def __init__(self, ip = None, port=59595):
try:
if(ip is None):
print("IP๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.")
else:
self.ip = ip
# socket ์ค์
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((self.ip, port))
sock.settimeout(60)
self.sock = sock
print("ํต์ ๋๊ธฐ๋ฅผ ์ํ ์์ผ์ด {}:{} ๋ฅผ ํตํด ์ด๋ ธ์ต๋๋ค.".format(self.ip, port))
except Exception:
print("์์ผ์ด ์ข
๋ฃ๋์์ต๋๋ค.")
def connect(self):
try:
client = None
self.sock.listen(0)
print("CONNECT WAIT...")
client, addr = self.sock.accept()
client.settimeout(60)
print("CONNECTED FROM {}".format(addr))
self.connected = True
return client
except Exception:
print("์๋ชป๋ ์ฃผ์์
๋๋ค.")
self.connected = False
return None
def send(self, client, data):
client.sendall(data.encode())
def wait_pcap(self):
file_pcap = Pcap(None)
client = self.connect()
if(not(self.connected)):
return
# ๋ฐ์ json ํ์ผ์ ์ ์ฅํ ํ์ผ๋ช
file_name = None
try:
BUF_SIZE = 1024
command = client.recv(4)
if(command == b'FILE'):
data = client.recv(BUF_SIZE)
file_name = data.decode('utf-8')
if(b'{' in data):
file_name = file_name.split('[')[0]
data = data[len(file_name):]
else:
data = b''
print("## " + file_name)
file_name = "recived_"+ file_name
with open(file_name, 'wb') as f:
f.write(data)
while True:
data = client.recv(BUF_SIZE)
if not data:
break
if(b'EOF' in data):
f.write(data[:-3])
break
f.write(data)
print("ํ์ผ ์์ ์ฑ๊ณต - ์ ์ฅ๋ ํ์ผ๋ช
: {}".format(file_name))
file_pcap.json_to_pcap(file_name=file_name)
file_pcap.loaded = True
client.send(b"EOF")
check = int(client.recv(1))
if(check == 1):
# prograss Bar๋ฅผ ์ํ ๊ฐ์ฒด
pbar = tqdm(total=file_pcap.cnt)
with open(file_name, 'r') as f:
line = f.readlines()
count = 1
un_match_cnt = 0
un_match_index = []
while(True):
count = count + 1
recv = client.recv(10)
if(b'EOF' in recv):
break
packet_len = int(recv.strip())
recv_json = client.recv(packet_len).decode('utf-8')
now_json = line[count]
while(len(recv_json) < packet_len):
recv_json += client.recv(packet_len- len(recv_json)).decode('utf-8')
recv_header = PcapPacketHeader()
recv_header.json_to_obj(recv_json)
recv_data = PcapPacketData(recv_header.incl_len)
recv_data.json_to_obj(recv_json)
now_header = PcapPacketHeader()
now_header.json_to_obj(now_json)
now_data = PcapPacketData(now_header.incl_len)
now_data.json_to_obj(now_json)
header_check_result, header_check_dict = now_header.get_diff(recv_header)
data_check_result, data_check_dict = now_data.get_diff(recv_data)
if(header_check_result or data_check_result):
un_match_cnt = un_match_cnt + 1
un_match_index.append(count-1)
client.send(b"1")
self.send(client, "{:<10}".format(len(line[count])))
self.send(client, line[count])
recv_header.print_info()
if(header_check_result):
now_header.print_info()
for key in header_check_dict:
if(header_check_dict[key]):
print("### {0} ๋ถ์ผ์น ###".format(key))
if(data_check_result):
recv_data.print_info()
now_data.print_info()
for key in data_check_dict:
if(data_check_dict[key]):
print("### {0} ๋ถ์ผ์น ###".format(key))
continue_check = client.recv(1)
if(continue_check == b'0'):
# EOF ์ ํธ๋ฐ๊ธฐ
EOF = client.recv(3)
break
else:
client.send(b"0")
pbar.update()
print("###########################################")
print("### ํจํท ๋น๊ต ๊ฒฐ๊ณผ ###")
print("ํจํท ๊ฐฏ์ {}, ๋ถ์ผ์น ํจํท {} , ๋ถ์ผ์น ํจํท ๋ฒํธ {}".format(count-2, un_match_cnt, un_match_index))
return file_pcap
except ConnectionAbortedError as e:
print("์ฐ๊ฒฐ ์ค๋จ")
print(str(e))
except ConnectionRefusedError as e:
print("์ฐ๊ฒฐ ๋์ค ๋ฌธ์ ๊ฐ ๋ฐ์ํ์ต๋๋ค. : ConnectionRefusedError")
print(str(e))
except ConnectionResetError as e:
print("์ฐ๊ฒฐ์ด ์ด๊ธฐํ ๋์์ต๋๋ค. : ConnectionResetError")
print(str(e))
except ConnectionError:
print("์ฐ๊ฒฐ ๋์ค ๋ฌธ์ ๊ฐ ๋ฐ์ํ์ต๋๋ค. : ConnextionError")
print(str(e))
except Exception as e:
print(str(e))
print(traceback.format_exc())
finally:
client.close()
self.sock.close()
self.connected = False
return file_name
# ์์ผ ํด๋ผ์ด์ธํธ ํด๋์ค(์ผ๋)
class SocketClient:
sock = None
connected = False
def __init__(self, host, port=59595):
sock = socket(AF_INET, SOCK_STREAM)
self.host = host
self.port = port
self.sock = sock
def connect(self):
try:
print("WAIT CONNECTION")
self.sock.connect((self.host, self.port))
self.connected = True
print("CONNECTION SUCCESS")
except Exception as e:
print("์ฐ๊ฒฐํ ์ ์๋ ์ฃผ์์
๋๋ค.")
def send(self, data):
if(self.connected):
self.sock.sendall(data.encode())
def send_file(self, file_name):
if(self.connected):
try:
# ๋ฒํผ ์ฌ์ด์ฆ ์ง์
BUF_SIZE = 1024
# ํ์ผ ์ ์ก ์ ํธ ๋ณด๋ด๊ธฐ
self.send("FILE")
# ํ์ผ ์ด๋ฆ ์ ์ก
self.send(file_name)
# ํ์ผ ์ด๊ธฐ
f = open(file_name, 'rb')
l = f.read(BUF_SIZE)
# ํ์ผ์ ๋ด์ฉ์ ๋ฒํผ์ฌ์ด์ฆ ๋งํผ ๋ฐ๋ณต ํต์ , EOF(End Of File)์ผ ๊ฒฝ์ฐ ๋ฐ๋ณต๋ฌธ ์ข
๋ฃ
while(l):
self.sock.send(l)
l = f.read(BUF_SIZE)
# ํ์ผ ์ ์ก ์ข
๋ฃ ์ ํธ ๋ณด๋ด๊ธฐ
self.sock.send(b"EOF")
# ์๋ฒ์ธก์ผ๋ก๋ถํฐ ํ์ผ์ ์ก์ด ์๋ฃ๋์๋์ง ์ ํธ ๋ฐ๊ธฐ
eof = self.sock.recv(BUF_SIZE)
# ์๋ฒ์ธก์ผ๋ก๋ถํฐ ํ์ผ์ ์ ์๋ฃ ์ ํธ๊ฐ ์ ๋์ฐฉํ๋์ง ํ์ธ
if(eof == b'EOF'):
print("ํ์ผ ์ก์ ์ฑ๊ณต")
else:
print("ํ์ผ ์ก์ ์คํจ, ๋ค์ ์๋ํด์ฃผ์ธ์.")
raise Exception()
check = True
while(True):
select = input("ํจํท ๋น๊ต 1, ํต์ ์ข
๋ฃ 0 : ")
if(select == "1"):
break
elif(select == "0"):
check = False
break
else:
print("์๋ชป๋ ์
๋ ฅ์
๋๋ค. ๋ค์ ์
๋ ฅํด์ฃผ์ธ์.")
continue
if(not(check)):
self.send("0")
else:
self.send("1")
with open(file_name, 'r') as f:
line = f.readlines()
count = 1
un_match_cnt = 0
un_match_index = []
for i in range(2, len(line)-1):
count = count + 1
self.send("{:<10}".format(len(line[i])))
self.send(line[i])
check = int(self.sock.recv(1))
if(check == 1):
recv = self.sock.recv(10)
packet_len = int(recv.strip())
now_json = self.sock.recv(packet_len).decode('utf-8')
while(len(now_json) < packet_len):
now_json += self.sock.recv(packet_len- len(now_json)).decode('utf-8')
recv_json = line[i]
recv_header = PcapPacketHeader()
recv_header.json_to_obj(recv_json)
recv_data = PcapPacketData(recv_header.incl_len)
recv_data.json_to_obj(recv_json)
now_header = PcapPacketHeader()
now_header.json_to_obj(now_json)
now_data = PcapPacketData(now_header.incl_len)
now_data.json_to_obj(now_json)
header_check_result, header_check_dict = now_header.get_diff(recv_header)
data_check_result, data_check_dict = now_data.get_diff(recv_data)
if(header_check_result or data_check_result):
un_match_cnt = un_match_cnt + 1
un_match_index.append(count-1)
recv_header.print_info()
if(header_check_result):
now_header.print_info()
for key in header_check_dict:
if(header_check_dict[key]):
print("{0} ๋ถ์ผ์น".format(key))
if(data_check_result):
recv_data.print_info()
now_data.print_info()
for key in data_check_dict:
if(data_check_dict[key]):
print("{0} ๋ถ์ผ์น".format(key))
continue_check = True
while(True):
continue_select = input("๊ณ์ํ๊ธฐ 1, ํต์ ์ข
๋ฃ 0 : ")
if(continue_select == "1"):
self.send("1")
break
elif(continue_select == "0"):
self.send("0")
continue_check = False
break
else:
print("์๋ชป๋ ์
๋ ฅ์
๋๋ค. ๋ค์ ์
๋ ฅํด์ฃผ์ธ์.")
continue
if(not(continue_check)):
break
print("###########################################")
print("### ํจํท ๋น๊ต ๊ฒฐ๊ณผ ###")
print("ํจํท ๊ฐฏ์ {}, ๋ถ์ผ์น ํจํท {} , ๋ถ์ผ์น ํจํท ๋ฒํธ {}".format(count-2, un_match_cnt, un_match_index))
self.send("EOF")
except ConnectionAbortedError as e:
print("์ฐ๊ฒฐ ์ค๋จ")
print(str(e))
except ConnectionRefusedError as e:
print("์ฐ๊ฒฐ ๋์ค ๋ฌธ์ ๊ฐ ๋ฐ์ํ์ต๋๋ค. : ConnectionRefusedError")
print(str(e))
except ConnectionResetError as e:
print("์ฐ๊ฒฐ์ด ์ด๊ธฐํ ๋์์ต๋๋ค. : ConnectionResetError")
print(str(e))
except ConnectionError:
print("์ฐ๊ฒฐ ๋์ค ๋ฌธ์ ๊ฐ ๋ฐ์ํ์ต๋๋ค. : ConnextionError")
print(str(e))
except Exception as e:
print(str(e))
print(traceback.format_exc())
except Exception as e:
print(str(e))
finally:
f.close()
self.close()
def close(self):
self.sock.close()
self.connected = False
# In[245]:
# ์ฌ์ฉ์ ํ
์คํธ ์ธํฐํ์ด์ค ํด๋์ค
class Tui:
# ๋ฉ์ธ๋ฉ๋ด ๋์
๋๋ฆฌ
MENU = {
'1':'1. ํต์ ๋๊ธฐ(์์ )',
'2':'2. ํต์ ํ๊ธฐ(์ก์ )',
'3':'3. ํ์ผ ๋ด์ฉ ํ์ธ',
'4':'4. ํ๋ก๊ทธ๋จ ์ข
๋ฃ'
}
# pcap ํด๋์ค
pcap = ''
def __init__(self, file_path):
# pcap ํด๋์ค ์ด๊ธฐํ
self.pcap = Pcap(file_path)
# ๋ฉ์ธ๋ฃจํ ์งํ
self.main()
# ๋ฉ๋ด ์ถ๋ ฅ
def show_menus(self, menus, use_menus=['1','2','3','4']):
# ํ์ดํ ์ถ๋ ฅ
title_str = MENU_PRINT_FORMAT.format(" ๋ฉ๋ด๋ฅผ ์ ํํ์ธ์. ")
print(title_str)
# ๊ฐ ๋ฉ๋ด๋ค์ ํฌ๋งท์ ๋ง๊ฒ ์ถ๋ ฅ
for menu in menus:
if(menu not in use_menus):
continue
print("{1}#{2}{0}{1}#{2}".format(menus[menu].center(len(title_str)-9),bcolors.OKBLUE, bcolors.ENDC))
print("{1}{0}{2}".format("-" * (len(title_str)-1),bcolors.OKBLUE, bcolors.ENDC))
print()
# ๋ฉ๋ด ์ ํ
def select_menu(self, menu_list, desc='๋ฉ๋ด๋ฅผ ์ ํํ์ธ์.'):
# ์ ๋๋ก๋ input์ด ๋ค์ด์ฌ๋ ๊น์ง ๋ฐ๋ณต
while(True):
# ์
๋ ฅ ๋ฐ๊ธฐ
# desc ๊ฐ None์ผ๊ฒฝ์ฐ ํ
์คํธ ์ฌ์ง์
if(desc is None):
desc = "๋ฉ๋ด๋ฅผ ์ ํํ์ธ์."
select = input("{} : ".format(desc))
# ๋ง์ฝ ์ ํด์ ธ์๋ ๋ฉ๋ด๋ฆฌ์คํธ์ ํฌํจ๋์ง์๋ ๊ฐ์ด ๋ค์ด์ฌ ๊ฒฝ์ฐ ๋ฐ๋ณต
if(select not in menu_list):
print("๋ค์ ์ ํํด ์ฃผ์ธ์.")
continue
break
# ์ ํ๋ ๋ฉ๋ด๋ฒํธ ๋ฐํ
return select
# ๋ฉ์ธ ๊ธฐ๋ฅ
def main(self):
if(self.pcap.loaded):
print("")
# self.pcap.save()
else:
print()
print("--pcap ๋ช
๋ น์ ํตํด pcapํ์ผ์ ๋ถ๋ฌ์ค์ง ์์์ต๋๋ค.")
print("'ํต์ ํ๊ธฐ(์ก์ )'์ 'ํ์ผ ๋ด์ฉ ํ์ธ' ๋ฉ๋ด๋ฅผ ์ด์ฉํ ์ ์์ต๋๋ค.")
# ์ฌ์ฉ์๊ฐ ํ๋ก๊ทธ๋จ์ ์ข
๋ฃํ ๋ ๊น์ง ๋ฐ๋ณต
while(True):
# ๋ฉ๋ด ์ ํ
if(self.pcap.loaded):
# ๋ฉ์ธ ๋ฉ๋ด ์ถ๋ ฅ
self.show_menus(self.MENU)
select = self.select_menu(['1','2','3','4'])
else:
# ๋ฉ์ธ ๋ฉ๋ด ์ถ๋ ฅ (1๋ฒ ๋ฉ๋ด์ 4๋ฒ ๋ฉ๋ด๋ง)
self.show_menus(self.MENU, ['1','4'])
select = self.select_menu(['1', '4'])
# ํต์ ๋๊ธฐ(์์ )
if(select == '1'):
ip = input("ํ์ฌ ์ปดํจํฐ์ IP๋ฅผ ์
๋ ฅํด์ฃผ์ธ์. : ")
reciver = SocketServer(ip)
pcap = reciver.wait_pcap()
if(pcap is not None):
self.pcap = pcap
del reciver
# ํต์ ํ๊ธฐ(์ก์ )
if(select == '2'):
# IP ์
๋ ฅ ์์ฒญ
ip = input("์๋ ์ปดํจํฐ์ IP๋ฅผ ์
๋ ฅํด์ฃผ์ธ์. : ")
# ์์ผ ํด๋ผ์ด์ธํธ ์์ฑ ( IP: ์ฌ์ฉ์ ์
๋ ฅ, ํฌํธ : 59595๋ก ํต์ผ)
sender = SocketClient(ip)
# ์์ผ ์ฐ๊ฒฐ
sender.connect()
# json ํ์ผ ์ ์ก
sender.send_file(self.pcap.json_file_name)
del sender
# ํ์ผ ๋ด์ฉํ์ธ
if(select == '3'):
self.show_pcap_data()
# ํ๋ก๊ทธ๋จ ์ข
๋ฃ
if(select == '4'):
sys.exit(1)
# pcap ํ์ผ ๋ด์ฉ ํ์ธ ๋ฉ๋ด
def show_pcap_data(self):
# binaryํ์ผ ์ ๋ณด ์ถ๋ ฅ
self.pcap.binary.print_info()
# ๊ธ๋ก๋ฒํค๋ ์ ๋ณด ์ถ๋ ฅ
self.pcap.global_header.print_info()
# ์๋ธ๋ฉ๋ด ๋์
๋๋ฆฌ
SUB_MENU = {
'1':'1. ํจํท ํ์ธํ๊ธฐ',
'2':'2. ์ด์ ๋ฉ๋ด๋ก ์ด๋'
}
page_per_packets = 4
now_page = 1
tot_len = self.pcap.cnt
tot_page = int(tot_len / page_per_packets)
# ๋ง์ง๋ง ํ์ด์ง๊ฐ ํ์ด์ง๋ณ ํจํท ๊ฐฏ์๋ก ๋๋๋์ง ํ์ธ( 3๊ฐ, 2๊ฐ, ๋ฑ..)
last = page_per_packets % tot_page
if(last > 0):
tot_page += 1
# ํ์ด์ง ์์ ๋ฒํธ
start = 0
# ํ์ด์ง ์ข
๋ฃ ๋ฒํธ
end= page_per_packets
# ์ฌ์ฉ์๊ฐ ์ด์ ๋ฉ๋ด๋ก ๋ณต๊ทํ๊ธฐ ์ ๊น์ง ๋ฌดํ๋ฐ๋ณต
while(True):
# ์๋ธ๋ฉ๋ด ์ถ๋ ฅ
self.show_menus(SUB_MENU)
# ์๋ธ๋ฉ๋ด ์ ํ
select = self.select_menu(['1','2'])
# ํจํทํ์ธ ๋ฉ๋ด
if(select == '1'):
# ์ฌ์ฉ์๊ฐ ์ด์ ๋ฉ๋ด๋ก ๋ณต๊ทํ๊ธฐ ์ ๊น์ง ๋ฌดํ๋ฐ๋ณต
while(True):
# ๋ฒ์ ์ถ๋ ฅ
self.pcap.print_packet_range(start, end)
# ์ด์ ํ์ด์ง, ๋ค์ํ์ด์ง, ์ด์ ๋ฉ๋ด์ค ์ ํ
select = self.select_menu(menu_list=['1','2','3','4','5'], desc="({} / {}) 1: ์ฒซ๋ฒ์งธํ์ด์ง 2: ์ด์ ํ์ด์ง 3: ๋ค์ํ์ด์ง 4: ๋ง์ง๋งํ์ด์ง 5: ์ด์ ๋ฉ๋ด".format(now_page, tot_page))
os.system('cls')
# ์ฒซ๋ฒ์งธ ํ์ด์ง ์ด๋
if(select == '1'):
now_page = 1
start = 0
end = page_per_packets
# ๋ง์ง๋ง ํ์ด์ง ์ด๋
elif(select == '4'):
now_page = tot_page
start = ((tot_page * page_per_packets) - page_per_packets)
end = (tot_page * page_per_packets) -1
# ์ด์ ํ์ด์ง์ผ ๊ฒฝ์ฐ
elif(select == '2'):
# ๋งจ์ฒซ ํ์ด์ง ์ผ ๊ฒฝ์ฐ
if(start == 0):
print("์ฒซ๋ฒ์งธ ํ์ด์ง ์
๋๋ค")
else:
start -= page_per_packets
end -= page_per_packets
now_page -= 1
# ๋ค์ํ์ด์ง์ผ ๊ฒฝ์ฐ
elif(select == '3'):
if(now_page >= tot_page):
print("๋ง์ง๋ง ํ์ด์ง ์
๋๋ค.")
else:
start += page_per_packets
end += page_per_packets
if(end >= tot_len):
end = tot_len
now_page += 1
# ์ด์ ๋ฉ๋ด์ผ ๊ฒฝ์ฐ
else:
break
# ์ด์ ๋ฉ๋ด๋ก ์ด๋
if(select == '2'):
return
# In[250]:
# argsparse ์์ฑ
parser = argparse.ArgumentParser(description="pcap File Parser v0.3, create by ํ์งํ, ์ ๋ค์ด, ์ก์ํ, ๊น๊ฐ๊ฒธ, ๊ณ ์ฑํ, ์ฅ์ธ๊ธฐ")
# pcap ํ์ผ ์ธ์ ์ถ๊ฐ
parser.add_argument('--pcap', metavar='pcap_file_path', type=str, required=False, help='pcapํ์ผ์ ๊ฒฝ๋ก๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.')
# json ํ์ผ ์ธ์ ์ถ๊ฐ
parser.add_argument('--json', metavar='json_file_path', type=str, required=False, help='jsonํ์ผ์ ๊ฒฝ๋ก๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.')
# ์ฌ์ฉ์๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ args
args = parser.parse_args()
# pcap ์ธ์๋ฅผ ํตํด Tui ๊ฐ์ฒด ์์ฑ
Tui(args.pcap)
| true
|
b4c5dffa22ceb9fc4ee2eb19aff5e7539213cf17
|
Python
|
Sel14/Proyecto-
|
/Carpeta/proyecto clienteCasiFinal.py
|
UTF-8
| 3,913
| 2.578125
| 3
|
[] |
no_license
|
from Tkinter import *
import socket
import threading
from threading import *
mi_socket=socket.socket()
raiz=Tk()
raiz.title("Cocina")
frame=Frame(raiz, width=500, height=400)
frame.pack(fill="both",expand="True")
#---import socket---#
while True:
try:
mi_socket.connect(("localhost",8000))
break
except:
continue
#-------------------------------Funciones--------------------------------#
def listen():
global mi_socket
da=StringVar()
while True:
try:
da=mi_socket.recv(1024)
while True:
if da=="1":
while True:
try:
da=mi_socket.recv(1024)
if da=="2" or da=="3" or da=="4" or da=="5":
break
else:
impresion.insert(END,da)
except:
continue
elif da=="2":
while True:
try:
da=mi_socket.recv(1024)
if da=="1" or da=="3" or da=="4" or da=="5":
break
else:
impresion2.insert(END,da)
except:
continue
elif da=="3":
while True:
try:
da=mi_socket.recv(1024)
if da=="2" or da=="1" or da=="4" or da=="5":
break
else:
impresion3.insert(END,da)
except:
continue
elif da=="4":
while True:
try:
da=mi_socket.recv(1024)
if da=="2" or da=="3" or da=="1" or da=="5":
break
else:
impresion4.insert(END,da)
except:
continue
elif da=="5":
while True:
try:
da=mi_socket.recv(1024)
if da=="2" or da=="3" or da=="4" or da=="1":
break
else:
impresion5.insert(END,da)
except:
continue
else:
continue
except:
continue
def remove(list):
list.delete(1,END)
def send(numero):
global mi_socket
while True:
try:
mi_socket.send("La orden de la mesa "+numero+" esta lista")
break
except:
continue
#----------- --------------------grafica--------------------------------#
comanda=Label(frame,text="comandas")
comanda.grid(row=0,column=0)
impresion=Listbox(frame)
impresion.grid(row=1,column=1)
impresion.insert(0,"1")
boton=Button(frame, text="Notificar", command=lambda:send("1"))
boton.config(cursor="hand2")
boton.pack()
boton.grid(row=2,column=1)
botonD=Button(frame, text="remove", command=lambda:remove(impresion))
botonD.config(cursor="hand2")
botonD.pack()
botonD.grid(row=3,column=1)
impresion2=Listbox(frame)
impresion2.grid(row=1,column=2)
impresion2.insert(0,"2")
boton=Button(frame, text="Notificar", command=lambda:send("2"))
boton.config(cursor="hand2")
boton.pack()
boton.grid(row=2,column=2)
botonD2=Button(frame, text="remove", command=lambda:remove(impresion2))
botonD2.config(cursor="hand2")
botonD2.pack()
botonD2.grid(row=3,column=2)
impresion3=Listbox(frame)
impresion3.grid(row=1,column=3)
impresion3.insert(0,"3")
boton=Button(frame, text="Notificar", command=lambda:send("3"))
boton.config(cursor="hand2")
boton.pack()
boton.grid(row=2,column=3)
botonD3=Button(frame, text="remove", command=lambda:remove(impresion3))
botonD3.config(cursor="hand2")
botonD3.pack()
botonD3.grid(row=3,column=3)
impresion4=Listbox(frame)
impresion4.grid(row=4,column=1)
impresion4.insert(0,"4")
boton=Button(frame, text="Notificar", command=lambda:send("4"))
boton.config(cursor="hand2")
boton.pack()
boton.grid(row=5,column=1)
botonD4=Button(frame, text="remove", command=lambda:remove(impresion4))
botonD4.config(cursor="hand2")
botonD4.pack()
botonD4.grid(row=6,column=1)
impresion5=Listbox(frame)
impresion5.grid(row=4,column=2)
impresion5.insert(0,"5")
boton=Button(frame, text="Notificar", command=lambda:send("5"))
boton.config(cursor="hand2")
boton.pack()
boton.grid(row=5,column=2)
botonD5=Button(frame, text="remove", command=lambda:remove(impresion5))
botonD5.config(cursor="hand2")
botonD5.pack()
botonD5.grid(row=6,column=2)
if __name__ == "__main__" :
Thread(target=listen).start()
raiz.mainloop()
| true
|
5bb5e42e02a13a83d2840bfd5a3600780c61f7ad
|
Python
|
selvesandev/python-ess
|
/input-output/io.py
|
UTF-8
| 339
| 3.390625
| 3
|
[] |
no_license
|
file = open('file.txt')
print(file.read())
file.seek(0)
print(file.read())
file.seek(0);
print(file.readlines())
file.close()
# No Need to close the file
with open('file.txt') as myNewFile:
contents = myNewFile.read()
print(contents)
with open('file.txt', mode='a') as myFileWrite:
myFileWrite.write('Hello from python\n')
| true
|