blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
78f87abe8dbfb1e6419d09859f01e53b757a3276 | Python | TuGiu/DATA_Analysis | /surface_wave_rate.py | UTF-8 | 3,478 | 3.171875 | 3 | [] | no_license | # 函数插值--波动率曲面构造
from scipy import interpolate
#dir(interpolate)[:5]
# print interpolate.spline.__doc__
import numpy as np
from matplotlib import pylab
import seaborn as sns
from CAL.PyCAL import *
font.set_size(20)
x = np.linspace(1.0, 13.0, 7)
y = np.sin(x)
#pylab.figure(figsize = (12,6))
# pylab.scatter(x,y, s = 85, marker='x', color = 'r')
# pylab.title(u'$f(x)$离散点分布', fontproperties = font)
xnew = np.linspace(1.0, 13.0, 500)
ynewLinear = interpolate.spline(x,y,xnew,order = 1)
ynewLinear[:5]
ynewCubicSpline = interpolate.spline(x,y,xnew,order = 3)
ynewCubicSpline[:5]
ynewReal = np.sin(xnew)
ynewReal[:5]
#pylab.figure(figsize = (16,8))
#pylab.plot(xnew,ynewReal)
#pylab.plot(xnew,ynewLinear)
#pylab.plot(xnew,ynewCubicSpline)
#pylab.scatter(x,y, s = 160, marker='x', color = 'k')
#pylab.legend([u'真实曲线', u'线性插值', u'样条曲线', u'$f(x)$离散点'], prop = font)
#pylab.title(u'$f(x)$不同插值方法拟合效果:线性插值 v.s 样条插值', fontproperties = font)
# 波动率矩阵(Volatilitie Matrix):
import pandas as pd
pd.options.display.float_format = '{:,>.2f}'.format
dates = [Date(2015,3,25), Date(2015,4,25), Date(2015,6,25), Date(2015,9,25)]
strikes = [2.2, 2.3, 2.4, 2.5, 2.6]
blackVolMatrix = np.array([[0.32562851, 0.29746885, 0.29260648, 0.27679993],
[ 0.28841840, 0.29196629, 0.27385023, 0.26511898],
[ 0.27659511, 0.27350773, 0.25887604, 0.25283775],
[ 0.26969754, 0.25565971, 0.25803327, 0.25407669],
[ 0.27773032, 0.24823248, 0.27340796, 0.24814975]])
table = pd.DataFrame(blackVolMatrix * 100, index = strikes, columns = dates, )
table.index.name = u'行权价'
table.columns.name = u'到期时间'
#print u'2015.3.3-10:00波动率矩阵'
#table
# 获取方差矩阵(Variance Matrix):
evaluationDate = Date(2015,3,3)
ttm = np.array([(d - evaluationDate) / 365.0 for d in dates])
varianceMatrix = (blackVolMatrix**2) * ttm
varianceMatrix
interp = interpolate.interp2d(ttm, strikes, varianceMatrix, kind = 'linear')
interp(ttm[0], strikes[0])
sMeshes = np.linspace(strikes[0], strikes[-1], 400)
tMeshes = np.linspace(ttm[0], ttm[-1], 200)
interpolatedVarianceSurface = np.zeros((len(sMeshes), len(tMeshes)))
for i, s in enumerate(sMeshes):
for j, t in enumerate(tMeshes):
interpolatedVarianceSurface[i][j] = interp(t,s)
interpolatedVolatilitySurface = np.sqrt((interpolatedVarianceSurface / tMeshes))
print u'新权价方向网格数:', np.size(interpolatedVolatilitySurface, 0)
print u'到期时间方向网格数:', np.size(interpolatedVarianceSurface, 1)
#pylab.figure(figsize = (16,8))
#pylab.plot(sMeshes, interpolatedVarianceSurface[:, 0])
#pylab.scatter(x = strikes, y = blackVolMatrix[:,0], s = 160,marker = 'x', color = 'r')
#pylab.legend([u'波动率(线性插值)', u'波动率(离散)'], prop = font)
#pylab.title(u'到期时间为2015.3.25期权波动率', fontproperties = font)
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
maturityMesher, strikeMesher = np.meshgrid(tMeshes, sMeshes)
pylab.figure(figsize =(16,9))
ax = pylab.gca(projection = '3d')
surface = ax.plot_surface(strikeMesher, maturityMesher, interpolatedVolatilitySurface*100, cmap = cm.jet)
pylab.colorbar(surface,shrink=0.75)
pylab.title(u'', fontproperties = font)
pylab.xlabel('strike')
pylab.ylabel("maturity")
ax.set_zlabel(r"volatility(%)") | true |
44bb6e35c4adcee71e46a08133e671cf0a9cfba9 | Python | rwillingeprins/advent_of_code | /2020/day16.py | UTF-8 | 3,011 | 3.078125 | 3 | [] | no_license | def parse_train_ticket_notes(file_path):
with open(file_path) as file:
rules_string, my_ticket_string, other_tickets_string = file.read().split('\n\n', 3)
ranges_per_field = {}
for rule_line in rules_string.splitlines():
field, ranges_string = rule_line.split(': ', 2)
ranges = []
for range_string in ranges_string.split(' or '):
minimum, maximum = range_string.split('-')
ranges.append((int(minimum), int(maximum)))
ranges_per_field[field] = ranges
my_ticket = [int(value) for value in my_ticket_string.splitlines()[1].split(',')]
other_tickets = [[int(value) for value in line.split(',')] for line in other_tickets_string.splitlines()[1:]]
return ranges_per_field, my_ticket, other_tickets
def value_in_any_range(value, ranges):
return any(minimum <= value <= maximum for minimum, maximum in ranges)
def get_error_rate_per_ticket(tickets, valid_ranges):
error_rates = []
for ticket in tickets:
error_rate = 0
for value in ticket:
if not value_in_any_range(value, valid_ranges):
error_rate += value
error_rates.append(error_rate)
return error_rates
def day16a():
ranges_per_field, _, other_tickets = parse_train_ticket_notes('input/day16.txt')
all_ranges = {min_max for ranges in ranges_per_field.values() for min_max in ranges}
return sum(get_error_rate_per_ticket(other_tickets, all_ranges))
def day16b():
ranges_per_field, my_ticket, other_tickets = parse_train_ticket_notes('input/day16.txt')
all_ranges = {min_max for ranges in ranges_per_field.values() for min_max in ranges}
ticket_error_rates = get_error_rate_per_ticket(other_tickets, all_ranges)
field_value_indices = range(len(my_ticket))
possible_field_sets = [set(ranges_per_field.keys()) for _ in field_value_indices]
for ticket_index, ticket_values in enumerate(other_tickets):
if ticket_error_rates[ticket_index] > 0:
continue
for value_index, value in enumerate(ticket_values):
for field in list(possible_field_sets[value_index]):
if not value_in_any_range(value, ranges_per_field[field]):
possible_field_sets[value_index].remove(field)
unknown_field_value_indices = set(field_value_indices)
value_index_per_field = {}
while unknown_field_value_indices:
for value_index in list(unknown_field_value_indices):
possible_field_sets[value_index] -= set(value_index_per_field.keys())
if len(possible_field_sets[value_index]) == 1:
(field,) = possible_field_sets[value_index]
value_index_per_field[field] = value_index
unknown_field_value_indices.remove(value_index)
answer = 1
for field, value_index in value_index_per_field.items():
if field.startswith('departure'):
answer *= my_ticket[value_index]
return answer
print(day16a())
print(day16b())
| true |
5d90764d8aa8088db5c62c4b14d40ed92516061d | Python | jilingyu/homework4 | /script3.py | UTF-8 | 168 | 3.390625 | 3 | [] | no_license | my_list = [1,2,3,4,5,6,7,8,9]
even_list = []
odd_list = []
for i in my_list:
if i%2 == 0:
even_list.append([{i}])
else:
odd_list.append([{i}])
| true |
4f35959a404dc31d4311b67a9f815fd93c083ac7 | Python | mzvk/590ideas | /python/simple/snow.py | UTF-8 | 1,113 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# Snow in the CLI!
# Issues: if screen has big height, when flakes are colored
# screen starts to jitter.
# intro weight to slow-start
# shading for slow-start?
# MZvk 2019
import sys, time, os, random
flaketype = '*#.,` -~oO0'
flakeclr = ['97', '37', '90', '30', '96', '94']
def getflake():
lshft = random.randint(0, 3)
return '{}{}{}'.format(' ' * lshft, flaketype[random.randint(0, len(flaketype)) % len(flaketype)], ' ' * (3 - lshft))
def trimline(line):
return ''.join(['\033[{}m{}\033[0m'.format(flakeclr[random.randint(0, len(flakeclr)) % len(flakeclr)], chrs) if chrs != ' ' else chrs for chrs in line[:cols]])
def genline():
return ''.join([getflake() for flake in xrange((cols + 1)/4)])
rows, cols = [int(x) for x in os.popen('stty size', 'r').read().split()]
if rows * cols > 8000:
print "Screen to big to display without jitter"
sys.exit()
linebuffer = [' ' * cols] * rows
try:
while 1:
linebuffer.pop(0)
linebuffer.append(trimline(genline()))
print '\n'.join(reversed(linebuffer))
time.sleep(0.25)
except KeyboardInterrupt:
sys.exit()
| true |
b4312e5e4de1590eea1afa5bd1759f96daa24ea5 | Python | Enselic/git-repo-language-trends | /src/git_repo_language_trends/_internal/progress.py | UTF-8 | 1,712 | 2.921875 | 3 | [
"MIT"
] | permissive | import sys
import time
RATE_LIMIT_INTERVAL_SECONDS = 0.1
class Progress:
def __init__(self, args, total_commits):
self.args = args
self.current_commit = 1
self.total_commits = total_commits
self.last_print = None
def print_state(self, current_file, total_files):
if (self.args.no_progress):
return
if (not sys.stderr.isatty):
return
# If we recently printed, bail out. Always print if this is the last file we
# are processig however, since otherwise output seems "incomplete" to a human.
if self.is_rate_limited() and current_file < total_files:
return
if self.total_commits == 1:
commit_part = ""
else:
# "commit 12/345 "
commit_part = f"commit {padded_progress(self.current_commit,self.total_commits)} "
# "file 67/890"
file_part = f"file {padded_progress(current_file,total_files)}"
# "Counting lines in commit 12/345 file 67/890"
print(
f"Counting lines in {commit_part}{file_part}\r",
file=sys.stderr,
end='',
)
# Avoid writing large amounts of data to stderr, which can slow down execution significantly
def is_rate_limited(self):
now = time.time()
if self.last_print is not None and now < self.last_print + RATE_LIMIT_INTERVAL_SECONDS:
return True
self.last_print = now
return False
def commit_processed(self):
self.current_commit += 1
def padded_progress(current_commit, total_commits):
pad = len(str(total_commits))
return f"{current_commit:>{pad}}/{total_commits}"
| true |
b3c535a5cfc747a1859ecfb16c0496e3804dab80 | Python | asyaasha/strategyLearner | /experiment1.py | UTF-8 | 2,727 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Student Name: Asiya Gizatulina (replace with your name)
"""
import ManualStrategy as mst
import StrategyLearner as stg
from marketsimcode import compute_portvals
import numpy as np
import random
def author():
return 'agizatulina3'
if __name__ == '__main__':
random.seed(300)
np.random.seed(300)
start_value = 100000
start = '01-01-2008'
end = '12-31-2009'
symbol = 'JPM'
manual_str = mst.ManualStrategy()
learner_str = stg.StrategyLearner()
learner_str.addEvidence(symbol, start, end, start_value)
manual_trades = manual_str.testPolicy(symbol, start, end, start_value)
learner_trades = learner_str.testPolicy(symbol, start, end, start_value)
# Modify to make it work with compute_portvals
learner_trades['Date'] = learner_trades.index
learner_trades['Symbol'] = symbol
learner_trades['Order'] = 'BUY'
learner_trades['Shares'] = learner_trades[symbol]
learner_trades.reset_index(drop = True, inplace = True)
learner_df = learner_trades.copy()
for i in range(0, len(learner_df)):
if learner_df.iloc[i, 0] < 0:
amount = learner_df.iloc[i, 4] * -1
learner_df.iloc[i, 3] = 'SELL'
learner_df.iloc[i, 4] = amount
# Print learner_trades
# Get portfolio values
manualVal = compute_portvals(manual_trades, start_val = start_value)
learnerVal = compute_portvals(learner_df, start_val = start_value)
# Normalize
manualVal = manualVal / manualVal.ix[0]
learnerVal = learnerVal / learnerVal.ix[0]
# COMMENT OUT FOR PLOT
# Plot
manual_str.plot(manualVal, learnerVal, 'experiment1.pdf', 'Manual Strategy vs Learner Strategy')
# Calculate stats manual
port_val = start_value * manualVal
cum_return = manualVal.ix[-1] / manualVal.ix[0] - 1
daily_ret = manualVal / manualVal.shift(1) - 1
std = daily_ret.std()
avg = daily_ret.mean()
print 'Manual Strategy Portfolio Cumulative: %f' % cum_return
print 'Manual Strategy Portfolio Std of Daily Returns: %f' % std
print 'Manual Strategy Portfolio Mean of Daily Returns: %f' % avg
# Calculate stats learner
port_val = start_value * learnerVal
cum_return = learnerVal.ix[-1] / learnerVal.ix[0] - 1
daily_ret = learnerVal / learnerVal.shift(1) - 1
std = daily_ret.std()
avg = daily_ret.mean()
print 'Strategy Learner Portfolio Cumulative: %f' % cum_return
print 'Strategy Learner Portfolio Std of Daily Returns: %f' % std
print 'Strategy Learner Portfolio Mean of Daily Returns: %f' % avg
| true |
fbf5232c6a3ccdab138105d713f10837a2100e88 | Python | liqMix/Multilayer-MNIST-Fashion | /Dataset.py | UTF-8 | 1,235 | 2.765625 | 3 | [] | no_license | import numpy as np
import struct as st
# Copies the MNIST data into memory
class Dataset():
def __init__(self):
self.data = []
self.labels = None
with open('train-images-idx3-ubyte', mode='rb') as file:
file.seek(0)
magic = st.unpack('>4B', file.read(4))
nImg = st.unpack('>I', file.read(4))[0]
nR = st.unpack('>I', file.read(4))[0]
nC = st.unpack('>I', file.read(4))[0]
nBytesTotal = nImg * nR * nC * 1 # since each pixel data is 1 byte
data = np.asarray(st.unpack('>' + 'B' * nBytesTotal, file.read(nBytesTotal))).reshape((nImg, nR, nC))
for d in data:
self.data.append(np.append((np.ravel(d) / 255), 1))
self.data = np.array(self.data)
with open('train-labels-idx1-ubyte', mode='rb') as file:
file.seek(0)
magic = st.unpack('>4B', file.read(4))
nR = st.unpack('>I', file.read(4))[0]
self.labels = np.zeros((nR, 1))
nBytesTotal = nR * 1
self.labels = np.asarray(st.unpack('>' + 'B' * nBytesTotal, file.read(nBytesTotal))).reshape((nR, 1))
| true |
ff0d318f866a16f74df514136fd68351e4be7204 | Python | Norbaeocystin/Controlberry | /Controlberry/camera.py | UTF-8 | 1,950 | 2.78125 | 3 | [
"MIT"
] | permissive | '''
short snippet
to take photos
to disable red light
add this line :
disable_camera_led=1
to this file
sudo nano /boot/config.txt
'''
import logging
from io import BytesIO
from time import sleep
from picamera import PiCamera
from picamera.exc import PiCameraMMALError, PiCameraError
logging.basicConfig(level=logging.INFO, format = '%(asctime)s %(name)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
def get_image_as_bytes():
'''
returns image as bytes from Pi Camera
there few options how to process it
one is to get stored image (which is stored as bytes) as string
for example by function
from bson.json_util import dumps
def get_picture():
picture = Pictures.find_one().get('PICTURE')
return dumps(picture).replace('{"$binary": ','').replace("}",'').replace('"','')
Returned string can be directly used for example as src for image if you add this:
.src = "data:image/jpeg;base64," + your string
In python you can get image with this short code from string:
import base64
import io
from PIL import Image
# to show it in Jupyter notebook
from IPython.display import display
# code below is important
msg = base64.b64decode(pict)
buf = io.BytesIO(msg)
img = Image.open(buf)
# to show it in jupyter notebook inline
display(img)
Directly from binary field:
picture_bytes = Pictures.find().skip(40).limit(1).next().get('PICTURE')
buf = io.BytesIO(picture_bytes)
img = Image.open(buf)
'''
stream = BytesIO()
try:
camera = PiCamera()
camera.start_preview()
sleep(1.3)
camera.capture(stream, format='jpeg')
stream.seek(0)
camera.close()
return stream.getvalue()
except (PiCameraMMALError, PiCameraError):
logger.error('Enable Camera in raspi-cnfig or check if camera is connected')
pass
| true |
75500975b2e537fd1aa19b72fb182be7b959e404 | Python | bajracae/CS325-GroupAssignment2 | /temp.py | UTF-8 | 747 | 3.734375 | 4 | [] | no_license | def change_greedy(coins, value):
n = len(coins)
# coin_counts is the counts of coins you are using
# coin_counts[i] = count(coin[i])
coin_counts = [0]*len(coins)
# Implement the greedy version as described in pdf
# Remember to add to coin_counts[i] for coins[i], when appropriate
######################
### YOUR CODE HERE ###
######################
i = n - 1;
while (i >= 0):
while(value >= coins[i]):
value -= coins[i];
coin_counts.append(coins[i]);
i -= 1
# return the counts
# will be converted in get_result_greedy()
return coin_counts
coins = [1, 10, 25, 50]
value = 40
print(change_greedy(coins, value));
| true |
09d724054a9a32d82e8b376cc3b75a1d0a3a385a | Python | Ing-Josef-Klotzner/python | /_fast_sort.py | UTF-8 | 520 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python3
from sys import stdin
def main ():
alp = "".join (chr (x) for x in range (97, 123))
read = stdin.readline
t = int (read ())
for t_ in range (t):
order = read ().rstrip ()
word = read ().rstrip ()
ao = {a : o for a, o in zip (alp, order)}
oa = {o : a for a, o in ao.items ()}
wordoa = "".join (oa [x] for x in word)
srtword = "".join (sorted (wordoa))
print ("".join (ao [x] for x in srtword))
if __name__ == "__main__": main () | true |
3d6bf78707ce6ce82bb56272c7d31d765e30fc85 | Python | shehla/house-traffic-profiler | /googlemaps/delay_controller.py | UTF-8 | 2,846 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | from termcolor import colored
import Queue
import time
import statistics
import googlemaps
import googlemaps.route_manager as route_manager
traffic_models = ['optimistic', 'pessimistic', 'best_guess']
gmaps = googlemaps.Client(key='AIzaSyDNIxQQlAu-LzbpCQhvJDMKtPgborYIO7w')
class DelayController(object):
def __init__(self, route, min_delay=60.0, max_delay=900.0, max_elements=5, THRESHOLD=5, traffic_modes=traffic_models):
self.route_id = route['route_id']
self.route = route
self.min_delay = min_delay
self.max_delay = max_delay
self.current_delay = min_delay
self.queue = Queue.Queue()
self.max_elements = max_elements
self.THRESHOLD = THRESHOLD
self.run_at = time.time()
self.traffic_models = traffic_models
def check_and_run(self):
resp = []
if time.time() >= self.run_at:
print colored('\nRoute:{0}'.format(self.route['route_id']), 'red')
for model in traffic_models:
time_in_mins = route_manager.get_time(self.route['from'], self.route['to'], model)
if not time_in_mins:
# if you didn't get a response, wait and then continue
time.sleep(10)
continue
resp.append({
model: time_in_mins,
})
if model == 'best_guess':
self.update_queue(time_in_mins)
self.run_at += self.current_delay
return resp
def update_queue(self, travel_time):
if self.queue.qsize() == self.max_elements:
# remove the oldest record
self.queue.get()
# add the new record
self.queue.put(travel_time)
self.adjust_delay()
def adjust_delay(self):
std_dev = statistics.pstdev(list(self.queue.queue))
mean = statistics.mean(list(self.queue.queue))
header = '*** DelayController:{0} mean:{1} std:{2} '.format(
self.route_id,
mean,
std_dev,
)
if std_dev / mean * 100.0 > self.THRESHOLD:
if self.current_delay / 2 >= self.min_delay:
self.current_delay /= 2.0
header = '{0} HIGH ACTIVITY delay:{1}'.format(header, self.current_delay)
else:
self.current_delay = self.min_delay
header = '{0} MAXED out delay:{1}'.format(header, self.current_delay)
if std_dev <= 0.01:
if self.current_delay * 1.5 <= self.max_delay:
self.current_delay *= 2.0
header = '{0} low activity delay:{1}'.format(header, self.current_delay)
else:
self.current_delay = self.max_delay
header = '{0} NO activity delay:{1}'.format(header, self.current_delay)
print(header)
| true |
7ebbe24844196367151c04ecf5551402219fbc76 | Python | arpitrajput/20_Days_of_Code | /Day_7_Palindromic_Substrings_Count.py | UTF-8 | 269 | 3.125 | 3 | [] | no_license | class Solution:
# @param A : string
# @return an integer
def solve(self, s):
count = 0
for i in range(len(s)):
for j in range(i,len(s)):
if s[i:j] == s[j:i:-1]:
count += 1
return count
| true |
a53a00d729ff85e32f4fd2b776acec2b2219d4a2 | Python | AlexandreInsua/ExerciciosPython | /exercicios_parte06/exercicio02.py | UTF-8 | 490 | 4.25 | 4 | [] | no_license | # 2) Realiza una función llamada area_circulo() que devuelva el área de un círculo a partir de un radio.
# Calcula el área de un círculo de 5 de radio:
# Nota: El área de un círculo se obtiene al elevar el radio a dos y
# multiplicando el resultado por el número pi. Puedes utilizar el valor 3.14159 como pi o
# importarlo del módulo math:
import math
def area_circulo(radio):
return radio ** 2 * math.pi
print("A área do círculo é {:.2f}".format(area_circulo(5)))
| true |
6f5abc22170e5e286017c201a05168ad5dc458cb | Python | skcsteven/Python-Team-Project | /Teamproject_edgeblurnoise.py | UTF-8 | 1,718 | 3.203125 | 3 | [] | no_license |
'''
===============================================================================
ENGR 133 Fa 2020
Assignment Information
Assignment: Python Team Project (edge blur/smoothing)
Author(s): Steve Chen, chen3626@purdue.edu
Eric Mesina, emesina@purdue.edu
Danny Mcnulty, mcnulty1@purdue.edu
Kyle Nematz, knematz@purdue.edu
Team ID: LC4-09
===============================================================================
'''
import numpy as np
''' function that will smooth image'''
def blur(image):
'''create empty numpy array with same shape as the image we are processing'''
result = np.empty(image.shape)
'''create the matrix that we will use to weight each value in a 5x5 window'''
gauss_matrix = np.array([[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, 36, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]])
'''iterate through the array with 5x5 window and perform thw gaussian smoothing average'''
for i in range(3, len(image[0]) - 3):
for j in range(3, len(image) - 3):
result[j][i] = np.average(image[j-2:j+3, i-2:i+3], weights=gauss_matrix, axis=(0, 1))
return result
'''
===============================================================================
ACADEMIC INTEGRITY STATEMENT
I have not used source code obtained from any other unauthorized
source, either modified or unmodified. Neither have I provided
access to my code to another. The project I am submitting
is my own original work.
===============================================================================
'''
| true |
4f21855c9233d6edf07933e434c5fe089d5023ec | Python | InFinity54/CESI_Python_LoLDDragon | /riotapi/config/apikey.py | UTF-8 | 968 | 3.03125 | 3 | [] | no_license | import os
from assets.colors.fore import ForeColor
from assets.font import FontStyle
# Retourne la clé à utiliser pour utiliser les API de Riot Games, depuis le fichier de configuration dédié
def get_riot_api_key():
file_path = os.path.join(os.path.abspath(os.getcwd()), "config/apikey.txt")
if os.path.exists(file_path):
apikey_file = open(file_path, "r")
apikey_file_lines = apikey_file.readlines()
if len(apikey_file_lines) > 0:
apikey = apikey_file_lines[0]
if (apikey.startswith("RGAPI-")):
return apikey
else:
print(ForeColor.Yellow + "Clé d'API Riot Games incorrecte." + FontStyle.Normal)
else:
print(ForeColor.Red + "Aucune clé d'API Riot Games renseignée." + FontStyle.Normal)
else:
print(ForeColor.Red + "Fichier de configuration de la clé d'API Riot Games introuvable." + FontStyle.Normal)
return False
| true |
953140f24bd48ed948bff4fdbb4dccaeec8f5976 | Python | VijiKrishna3/100_Days_of_Coding_Other | /Day 002 - Medium/py_solution.py | UTF-8 | 1,127 | 3.96875 | 4 | [
"MIT"
] | permissive | # Node class
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def countSingleRec(root, c):
if root is None:
return True
left = countSingleRec(root.left, c)
right = countSingleRec(root.right, c)
if left == False or right == False:
return False
if root.left and root.data != root.left.data:
return False
if root.right and root.data != root.right.data:
return False
c[0] += 1
return True
# countSingle function
def countSingle(root):
count = [0]
countSingleRec(root, count)
return count[0]
# Driver Code
if __name__ == "__main__":
""" Testing out tree:
0
/ \
1 0
/ \
1 0
/ \
1 1 """
root = Node(0)
root.left = Node(1)
root.right = Node(0)
root.right.left = Node(1)
root.right.right = Node(0)
root.right.left.left = Node(1)
root.right.left.right = Node(1)
print("Unival tree count: " , countSingle(root))
| true |
3546522dbaef527dcd72c9fa62f90275271838c1 | Python | ta09472/algo | /binary_search_01.py | UTF-8 | 827 | 3.03125 | 3 | [] | no_license | # import sys
# n = int(input())
# lst = set(list(map(int, input().split())))
# m = int(input())
# request = sorted(list(map(int, input().split())))
#
# for i in request:
# if i in lst:
# print("yes", end = " ")
# else:
# print("no", end = " ")
n = int(input())
w_item = sorted(list(map(int, input().split())))
m = int(input())
request = list(map(int, input().split()))
def binary_search(array, target, start, end):
while start <= end:
mid = (start + end) // 2
if array[mid] == target:
return mid
elif array[mid] > target:
end = mid - 1
else:
start = mid + 1
return None
for i in request:
ans = binary_search(w_item, i,0,n-1)
if ans != None:
print("yes", end = " ")
else:
print("no", end = " ")
| true |
77849a31c3c77d270334f31cf1b4891d3e930706 | Python | jinurajan/Datastructures | /LeetCode/top_interview_qns/trees/symmetric_tree.py | UTF-8 | 1,939 | 4.59375 | 5 | [] | no_license | """
Symmetric Tree
Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
For example, this binary tree [1,2,2,3,4,4,3] is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following [1,2,2,null,3,null,3] is not:
1
/ \
2 2
\ \
3 3
Follow up: Solve it both recursively and iteratively.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution1:
def isSymmetric(self, root: TreeNode) -> bool:
if not root or (not root.left and not root.right):
return True
def is_symmetric(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
if root1.val != root2.val:
return False
return is_symmetric(root1.left, root2.right) and is_symmetric(root1.right, root2.left)
return is_symmetric(root.left, root.right)
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
if not root or (not root.left and not root.right):
return True
q = []
q.append(root.left)
q.append(root.right)
while q:
r1 = q.pop()
r2 = q.pop()
if not r1 and not r2:
continue
if not r1 or not r2:
return False
if r1.val != r2.val:
return False
q.append(r1.left)
q.append(r2.right)
q.append(r1.right)
q.append(r2.left)
return True
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(2)
root.left.left = TreeNode(3)
root.left.right = TreeNode(4)
root.right.right = TreeNode(3)
root.right.left = TreeNode(4)
print(Solution().isSymmetric(root))
| true |
87f42f803e3d399ab8b6adca0edcf952337cbe99 | Python | testerkurio/python_learning | /Automate_the_boring_stuff_with_Python/3.11.2_collatzTry.py | UTF-8 | 396 | 4 | 4 | [] | no_license | def collatz(number):
if (number%2) == 0:
print(number//2)
return(number//2)
else:
print(3*number+1)
return(3*number+1)
#将while循环也包含在try里面,这样有错误就不运行while循环了,直接跳出
try:
number = int(input('Enter number: \n'))
while True:
number = collatz(number)
if number == 1:
break
except ValueError:
print('You must enter a int number.') | true |
b09c13488d8b379e6508fc7123236cc86f4b9a54 | Python | quangbk2010/SelfTraining | /MachineLearning/MLCoban/GradientDescent/GradientDescent.py | UTF-8 | 1,197 | 4 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 25 10:57:35 2017
@author: quang
Input: f(x) = x**2 + 5*sin (x)
Ouput: find the global minimum by using Gradient descent (based on local minimum)
- More reference:
+ https://phvu.net/2012/07/08/gradient-based-learning/
"""
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals # Eg. in python2: 8/7 = 1, but in python 3: 8/7=1.142857
#import math
import numpy as np
import matplotlib.pyplot as plt
# Compute the gradient of f(x)
def grad (x):
return 2*x + 5*np.cos (x)
# compute the value of f(x)
def cost (x):
return x**2 + 5*np.sin(x)
# Gradient descent main
def myGD1(eta, x0):
x = [x0]
for it in range (100):
x_new = x[-1] -eta * grad(x[-1])
if abs(grad(x_new)) < 1e-3: # when the gradient is small enough, x_new approximates to x[-1]
break
x.append (x_new)
return (x, it)
(x1, it1) = myGD1(.1, -5)
print ('With x1 = %f, cost = %f, obtained after %d iterations' %(x1[-1], cost(x1[-1]), it1))
(x2, it2) = myGD1(.1, 5)
print ('With x1 = %f, cost = %f, obtained after %d iterations' %(x2[-1], cost(x2[-1]), it2))
| true |
702c4cdc113956d21ad45c488123290469da7610 | Python | xingdashuai/fdg | /lianxi02_fixture_method.py | UTF-8 | 714 | 2.9375 | 3 | [] | no_license | import unittest
import time
# 定义测试类 --- 必须继承unittest.TestCase
class TestFixture(unittest.TestCase):
# 重写父类的setUp方法--方法级别Fixture -- 测试方法-执行前自动执行
def setUp(self):
# 重写父类的tearDown方法--方法级别Fixture -- 测试方法-执行后自动执行
# 定义测试方法 --- 方法名必须是test开头
print("打开浏览器...")
def tearDown(self):
# print("用例1_验证码不能为空验证")
print("关闭浏览器...")
# def test_02(self):
# print("打开浏览器...")
def test_01(self):
print("用例2_密码不能为空验证")
print("关闭浏览器...")
| true |
1d8deb40a861a0ac5eb67a25b3a62c23399a4ebc | Python | ssshhhrrr/Network_Formation_Game | /games/products/score_system/AbstractScoreComputationSystem.py | UTF-8 | 738 | 3 | 3 | [] | no_license | from abc import ABC, abstractmethod
from ..evaluation_system import AbstractNodeEvaluationSystem
class AbstractScoreComputationSystem(ABC):
"""
"""
@abstractmethod
def computeNodeScore(self, graph, node, evalSytem : AbstractNodeEvaluationSystem):
raise NotImplementedError
def computeAllScores(self, graph, evalSystem : AbstractNodeEvaluationSystem):
""" Compute the score of each node of the graph and set nodes "score" attribute
Parameters
-----
graph(nx.Graph) : graph at which the score of each node is computed
"""
nodes = list(graph.nodes)
for n in nodes:
graph.nodes[n]['score'] = self.computeNodeScore(graph, n, evalSystem) | true |
8ca5e72f92c7a4bc0ecb95088ccd0cb34a492b13 | Python | ulyssesz/Old-Website | /handle_incoming_email.py | UTF-8 | 2,418 | 2.515625 | 3 | [] | no_license | import logging, email
import wsgiref.handlers
import exceptions
from google.appengine.api import mail
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
class LogSenderHandler(InboundMailHandler):
def receive(self, mail_message):
logging.info("================================")
logging.info("Received a mail_message from: " + mail_message.sender)
#logging.info("The email subject: " + mail_message.subject)
logging.info("The email was addressed to: " + str.join(str(mail_message.to), ', '))
try:
logging.info("The email was CC-ed to: " + str.join(mail_message.cc, ', '))
except exceptions.AttributeError :
logging.info("The email has no CC-ed recipients")
try:
logging.info("The email was send on: " + str(mail_message.date))
except exceptions.AttributeError :
logging.info("The email has no send date specified!!!")
plaintext_bodies = mail_message.bodies('text/plain')
html_bodies = mail_message.bodies('text/html')
for content_type, body in html_bodies:
decoded_html = body.decode()
logging.info("content_type: " + content_type)
logging.info("decoded_html: " + decoded_html)
attachments = []
# hasattr(a, 'property')
# http://stackoverflow.com/questions/610883/how-to-know-if-an-object-has-an-attribute-in-python
try:
if mail_message.attachments :
if isinstance(mail_message.attachments[0], basestring):
attachments = [mail_message.attachments]
else:
attachments = mail_message.attachments
except exceptions.AttributeError :
logging.info("This email has no attachments.")
logging.info("number of attachments: " + str(len(attachments)))
for filename, content in attachments:
#logging.info("plaintext_bodies: " + plaintext_bodies)
logging.info("filename: " + filename)
content
logging.info("--------------------------------")
def main():
application = webapp.WSGIApplication([LogSenderHandler.mapping()], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main() | true |
2b5bcd4492f473f823a6eb04792c13d2765aacf8 | Python | KhanZaeem/Random-Forest | /RandomForest3.py | UTF-8 | 1,011 | 3.125 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
import seaborn as sn
data = read_csv('train.csv')
#Extract attribute names from the data frame
feat = data.keys()
print(feat)
#feat_labels = feat.get_values()
#Extract data values from the data frame
dataset = data.values
print('Shape = ',dataset.shape)
X = dataset[:,0:94]
y = dataset[:,94]
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)
model = RandomForestClassifier(n_estimators=20)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print('Score = ', score)
y_predicted = model.predict(X_test)
cm = confusion_matrix(y_test, y_predicted)
print('Confusion Matrix')
print(cm)
plt.figure(figsize=(10,7))
sn.heatmap(cm, annot=True)
plt.xlabel('Predicted')
plt.ylabel('Truth')
plt.show()
| true |
45d330cf99464cda94b073db8c251271c79b5a5d | Python | oscar60310/GoofyBot | /irc/room.py | UTF-8 | 1,701 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import random
class room:
def __init__(self,twitch):
self.twitch = twitch
def handle_msg(self,room,confrom,msgs):
if msgs[0] == '!':
args = msgs.split(' ')
if args[0] == '!暱稱':
if len(args) != 2:
self.twitch.send_to_room(room,"!暱稱 [你想要的名子]")
return
if self.twitch.botroom.setting.canEditNickInRoom(room):
self.twitch.botroom.setting.change_nick(room,confrom,args[1])
self.twitch.send_to_room(room,"Hi %s,我記得你了!" % args[1])
else:
self.twitch.send_to_room(room,"這個聊天室目前不開放自由修改暱稱喔")
elif args[0] == '!猜拳':
if not self.twitch.botroom.setting.cando(room,'guess'):
return
if len(args) != 2:
self.twitch.send_to_room(room,":p")
return
com = random.choice(['石頭','剪刀','布'])
if args[1] == '石頭' or args[1] == '剪刀' or args[1] == '布':
if com == args[1]:
self.twitch.send_to_room(room,'我出 %s ,平手QQ' % com)
elif com == '石頭':
self.guess(args[1] == '剪刀',com,room,confrom)
elif com == '剪刀':
self.guess(args[1] == '布',com,room,confrom)
else:
self.guess(args[1] == '石頭',com,room,confrom)
else:
self.twitch.send_to_room(room,"%s 有人作弊啦" % room)
def guess(self,win,com,room,who):
if win:
self.twitch.send_to_room(room,'我出 %s ,輸了吧XD' % com)
else:
self.twitch.send_to_room(room,'我出 %s ,可惡...' % com)
self.twitch.send_to_room(room,'/timeout %s 1' % who) | true |
9b830da82718ff7a4d13f5d6a605adce2d8fa304 | Python | mcurry51/EBEC | /sum_average.py | UTF-8 | 1,039 | 4.34375 | 4 | [] | no_license | ################################################################################
# Author: Michael Curry
# Date: 02/28/2021
# This program sums up the value given by the individual,
# then sums and takes the average of all the inputted values.
################################################################################
starting_num = float(input('Enter a non-negative number (negative to quit): '))
sum_list = [] # Empty list to append to
if starting_num < 0: # Checks to see if inputted number is a positive number
print('No input.')
else:
while starting_num >= 0: # Starts the 'input value' process
sum_list.append(starting_num)
starting_num = float(input('Enter a non-negative number (negative to quit): '))
sum_total = sum(sum_list) # Taking sum of list
sum_total_float = f'{float(sum(sum_list)):.2f}'
average = sum_total / len(sum_list) # Averaging the list
average_float = f'{float(sum_total / len(sum_list)):.2f}'
print(f'Sum = {sum_total_float}')
print(f'Average = {average_float}')
| true |
2c72dc037b62b5cfbe7e26f9a4421151d24f6ad3 | Python | bendmorris/rosalind | /stronghold/fib.py | UTF-8 | 121 | 2.875 | 3 | [] | no_license | data = raw_input()
n, k = [int(x) for x in data.split()]
a, b, = 1, 1
for _ in range(n-2):
a, b = b, (a*k)+b
print b
| true |
9c2ff3459f71a2cb1a4e50e2f65b5e55f1dfd4e6 | Python | rheehot/ProblemSolving_Python | /baekjoon_1072.py | UTF-8 | 595 | 3.390625 | 3 | [] | no_license | '''
Problem Solving Baekjoon 1072
Author: Injun Son
Date: October 18, 2020
'''
from collections import deque
import sys
import math
X, Y = map(int, input().split())
Z = math.floor(100 * Y / X)
low, high = 0, 1000000000
result = 0
#만약 현재 승률이 99라면 절대 100은 될수 없다. 이미 패한 기록이 있기 때문
if Z >= 99:
print(-1)
else:
while low <= high:
mid = (low + high) //2
tx, ty = X+mid, Y+mid
if math.floor(100*ty / tx) > Z:
high = mid -1
result = mid
else:
low = mid +1
print(result)
| true |
b95ffd970b175ec220f668bf23ae55fe6ad14fcf | Python | kujaw/Rosalind | /RNA.py | UTF-8 | 290 | 2.609375 | 3 | [] | no_license | __author__ = 'kujaw'
import time
start = time.perf_counter()
start2 = time.process_time()
with open('rosalind_rna.txt', 'r') as f:
rna = str(f.read())
print(str.replace(rna, 'T', 'U'))
print("Timer: ", time.perf_counter() - start)
print("Timer2: ", time.process_time() - start2) | true |
ca5d029cce4524f81e72dbd200984df900e5cf05 | Python | bradyz/sandbox | /algorithms/number-theory/factor.py | UTF-8 | 463 | 3.640625 | 4 | [] | no_license | def prime_factors(n):
p = []
i = 2
while n > 1:
if n % i == 0:
p.append(i)
n //= i
else:
i += 1
return sorted(p)
def factors(n):
f = set()
i = 1
while i * i <= n:
if n % i == 0:
f.add(n // i)
f.add(i)
i += 1
return sorted(f)
if __name__ == "__main__":
print(prime_factors(10))
print(prime_factors(128))
print(factors(128))
| true |
5491368ebe165a4f551f69bacd3bc61ce79238ec | Python | edithabofra/Python-HyperionDev | /Loop1.py | UTF-8 | 239 | 4.3125 | 4 | [] | no_license | # Prompt user to enter a number
num = int(input("Enter a number: "))
# Set initial value to zero
i = 0
# Using while look print out all even numbers from 1 to the number entered by user
while i <= num:
print(str(i))
i += 2 | true |
c931017280525856f6a718bda55b319624848a15 | Python | yiqin/HH-Coding-Interview-Prep | /Use Python/ContainsDuplicateII.py | UTF-8 | 542 | 3.1875 | 3 | [] | no_license | class Solution(object):
"""docstring for Solution"""
def containsNearbyDuplicate(self, nums, k):
numsDict = dict()
for i in range(len(nums)):
if nums[i] in numsDict:
indexes = numsDict[nums[i]]
# print(indexes, i)
for j in range(len(indexes)):
diff = i - indexes[j]
# print(diff)
if diff <= k:
return True
indexes.append(i)
else:
numsDict[nums[i]] = [i]
return False
nums = [1, 2, 3, 4, 5, 6, 4]
solution = Solution()
result = solution.containsNearByDuplicate(nums, 5)
print(result) | true |
b8759acc17e78f432b329a57e50aca893ab2aca0 | Python | jrsaavedra1022/jrsaavedra-python | /clase5-tipo-datos.py | UTF-8 | 234 | 4.09375 | 4 | [] | no_license | #con la opcio type pueda base el tipo d dato
print(f"Tipo entero: {type(2)}")
print(f"Tipo string: {type('hola')}")
print(f"Tipo float: {type(2.5)}")
print(f"Tipo boolean: {type(True)}")
# print(f"Tipo object: {type(["hola", 12])}") | true |
66346dfa62c54d4f012a38bb129e2014145279dc | Python | cosmozhang/ACE-KL | /lime_ae.py | UTF-8 | 6,578 | 2.515625 | 3 | [] | no_license | # lime_ae.py
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import lime
from lime import lime_tabular
from ae_tf import AutoEncoder_tf
from sklearn import preprocessing
import pandas as pd
import sys
from scipy.stats import spearmanr, describe
def plot_ranking(filename, tuples1, tuples2):
fig, ax = plt.subplots()
ax.scatter(tuples1, tuples2, c = range(12), marker='s')
for i in range(12):
ax.annotate(i, (tuples1[i]+0.25, tuples2[i]+0.25))
plt.xlim((-1,12))
plt.ylim((-1,12))
# plt.legend(loc = 0)
plt.xticks(np.arange(-1, 13))
plt.yticks(np.arange(-1, 13))
plt.xlabel("Lime Variable Ranking")
plt.ylabel("AE Variable Ranking")
rho, p = spearmanr(tuples1, tuples2)
plt.title('rho: ' + str(rho) + ', p: '+str(p))
plt.savefig(filename+'_pt.png', format = 'png')
def plot_magnitude(lime_res, direc_res, scale_value, filename):
lime_mag = np.asarray([np.absolute(lime_res[i][1]) for i in range(12)])/np.sum([np.absolute(lime_res[i][1]) for i in range(12)])
direc_mag = np.asarray([np.sqrt(direc_res[i][1]) for i in range(12)])
# value_mag = np.asarray([np.absolute(scale_value[i]) for i in range(12)])/np.sum([np.absolute(scale_value[i]) for i in range(12)])
fig, ax = plt.subplots()
index = np.arange(12)
bar_width = 0.33
opacity = 0.8
rects1 = plt.bar(index, lime_mag, bar_width,
alpha=opacity,
color='b',
label='Lime')
rects2 = plt.bar(index + bar_width, direc_mag, bar_width,
alpha=opacity,
color='g',
label='Direc')
rects3 = plt.bar(index + 2*bar_width, np.abs(scale_value), bar_width,
alpha=opacity,
color='r',
label='scale')
plt.xlabel('Variable Index')
plt.ylabel('Contribution')
# plt.title('Scores by person')
plt.xticks(index + 1.5*bar_width, index)
plt.legend()
plt.tight_layout()
plt.savefig(filename+'_mag.png', format = 'png')
def gen_syndata(rng, input_dim):
mean = np.asarray([2, 0, 53, 185, 27, 15172, 195, 29, 16166, 13, 2, 793])
# cov = np.identity(100)
rawdata = rng.poisson(mean, size = (4000, input_dim)) +1
# dataX = rng.multivariate_normal(mean, cov, 5000)
return rawdata
def perturb(rng, input_dim, data, perturb_ind=1):
# mean = np.arange(input_dim-1, -1, -1)
mean = np.zeros(input_dim)
mean[perturb_ind] = mean[perturb_ind]+10000
# print mean
# cov = np.eye(100)*5.0
noise = rng.poisson(mean, size = (1000, input_dim)).astype("float32")
# noise = rng.multivariate_normal(mean, cov, 1000)
positive_data = data + noise
# print data[10]
# print positive_data[10]
return positive_data
def main():
epochs = 100
batch_size = 400
input_dim = 12
hidden_dim = 6
rng = np.random.RandomState(12345)
csv_in_file_name = sys.argv[1]
test_id = int(sys.argv[2])
perturb_ind = int(sys.argv[3])
try:
with tf.device("/gpu:0"):
print "Using gpu!"
ae = AutoEncoder_tf(rng, input_dim, hidden_dim)
except:
with tf.device("/cpu:0"):
print "Using cpu!"
ae = AutoEncoder_tf(rng, input_dim, hidden_dim)
# Train
# min_max_scaler = preprocessing.MinMaxScaler()
# rawdataX = gen_syndata(rng, input_dim)
rawdataX = pd.read_csv(csv_in_file_name, header=None).as_matrix()
# '''
# print 'before', rawdataX[10]
raw_testX_positive = perturb(rng, input_dim, rawdataX[3000:], perturb_ind)
# dataX = preprocessing.scale(np.concatenate((rawdataX, raw_testX_positive), axis = 0))
mean_std_scaler = preprocessing.StandardScaler().fit(rawdataX.astype(np.float))
trainX = mean_std_scaler.transform(rawdataX.astype(np.float))
# dataX = preprocessing.normalize(rawdataX, norm='l2')
# trainX = dataX[:4000]
# testX_positive = dataX[4000:]
testX_positive = mean_std_scaler.transform(raw_testX_positive)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print 'Training AutoEncoder...'
for epoch in range(epochs):
rng.shuffle(trainX)
for batch_ind in range(10):
batch_xs = trainX[batch_ind*batch_size: (batch_ind+1)*batch_size]
# print batch_xs[0]
train_loss = ae.train(batch_xs, sess)
# print 'epoch, loss = {}: {}'.format(epoch, train_loss)
print 'Trained AutoEncoder.'
# print 'loss (train) = ', ae.predict([trainX[0]])
feature_names = [str(x) for x in range(input_dim)]
explainer = lime_tabular.LimeTabularExplainer(trainX, feature_names = feature_names, class_names=['Normal'], verbose=True)
# test_id = 8
# examed_example = trainX[3000+test_id]
# examed_example = testX_positive[test_id]
examed_example = rawdataX[test_id] + np.asarray([100, 8, 100, 172, 30, 30000, 200, 31, 1000, 14, 0, 800])
scaled_examed_example = mean_std_scaler.transform(examed_example.reshape(1, -1).astype(np.float)).flatten()
print scaled_examed_example
print 'Training LIME...'
exp = explainer.explain_instance(scaled_examed_example, ae.calas, labels=[0], num_features=12)
print 'Trained LIME.'
# print exp.as_map()[0]
lime_res = sorted(exp.as_map()[0], key=lambda x: x[0])
sorted_lime_res = sorted(lime_res, key=lambda x: np.absolute(x[1]), reverse = True)
print "lime", sorted_lime_res
lime_ind_ord = [ele[0] for ele in sorted_lime_res]
# print lime_ind_ord
lime_to_figure = [lime_ind_ord.index(u) for u in range(12)]
# print lime_to_figure
# print scaled_examed_example
# print ae.predict(np.asarray([scaled_examed_example]))[0]
direc_res = [(i, v) for i, v in enumerate((scaled_examed_example-ae.predict(np.asarray([scaled_examed_example]))[0])**2)]
sorted_direc_res = sorted(direc_res, key=lambda x: x[1], reverse = True)
print "AE", sorted_direc_res
direc_ind_ord = [ele[0] for ele in sorted_direc_res]
# print direc_ind_ord
direc_to_figure = [direc_ind_ord.index(u) for u in range(12)]
# print direc_to_figure
# plot_ranking(str(test_id), lime_to_figure, direc_to_figure)
plot_magnitude(lime_res, direc_res, scaled_examed_example, str(test_id))
if __name__ == '__main__':
main()
| true |
cbdd3493a109ec0868fc615c467a824fb6b144ef | Python | Jiwon0801/MachineLearning | /Keras/ANN.py | UTF-8 | 840 | 2.921875 | 3 | [] | no_license | from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
import sim_data
x_data, y_data = sim_data.load_data()
#분류기
model = Sequential()
model.add(Dense(3, input_shape=(2,), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
# 훈련
history= model.fit(x_data, y_data, epochs=100, verbose=0)
# 평가 - compile()에 지정한 metrics로 평가 리턴
loss, accuracy = model.evaluate(x_data, y_data, batch_size=100)
print(loss, accuracy)
# 기존 방식의 평가
targets = np.argmax(y_data, axis=1)
predicts = np.argmax(model.predict(x_data), axis=1)
print('Targets:', targets )
print('Predictions:', predicts )
print('accuracy : ', np.equal(targets, predicts).astype('float32').mean()) | true |
d39352375fe3acbede7298bb2d22bbad3d7aed77 | Python | Fabrolly/TrenoBot | /telegram-bot/telegram_bot/tests_integration/test_trip_search.py | UTF-8 | 9,734 | 2.890625 | 3 | [] | no_license | import re
import datetime
import unittest
from telegram_bot.tests_integration.test_utility_methods import *
from telegram_bot.bot import create_db
def extract_assert_hour(text, time):
solutions = text.split("!")
cond = len(solutions) == 2
dt = datetime.datetime.today()
start_date = str(dt.day) + "-" + str(dt.month) + "-" + str(dt.year) + " " + time
start_date = datetime.datetime.strptime(start_date, "%d-%m-%Y %H:%M")
for solution in solutions:
dates = re.findall(r"\d{2}-\d{2}-\d{4}", solution)
times = re.findall(r"\d{2}:\d{2}", solution)
date = dates[0] + " " + times[1]
date = datetime.datetime.strptime(date, "%d-%m-%Y %H:%M")
cond = cond and (date >= start_date)
return cond
def extract_assert_day(text, msg_date):
dates = re.findall(r"\d{2}-\d{2}-\d{4}", text)
cond = len(dates) == 2
if msg_date[0] != "0" and msg_date[1] == "-":
msg_date = msg_date[:0] + "0" + msg_date[0:]
if msg_date[-2] == "-" and msg_date[-2] != "0":
msg_date = msg_date[:-1] + "0" + msg_date[-1:]
msg_date = msg_date + "-" + str(datetime.datetime.today().year)
msg_date = datetime.datetime.strptime(msg_date, "%d-%m-%Y")
for date in dates:
date = datetime.datetime.strptime(date, "%d-%m-%Y")
cond = cond and (msg_date <= date)
return cond
class TestTripSearch(unittest.TestCase):
def setUp(self):
create_db(drop_tables=True)
# TEST FUNCTIONALITY - RICERCA UN TRENO
def test_ricerca_treno(self):
test_search_msg = ["Ricerca da Milano a Roma", "Ricerca Milano Roma"]
for msg in test_search_msg:
response = call_mute_mp(msg)
self.assertTrue(isinstance(response, tuple))
self.assertTrue(isinstance(response[0], list))
self.assertTrue(len(response[0]) <= 2)
self.assertTrue(isinstance(response[1], list))
self.assertTrue(
text_in_msg(
" ".join(response[0]),
["Soluzione", "Treno", "Durata", "Milano", "Roma"],
)
)
self.assertTrue(
text_in_buttons(
response[1],
["Aggiungi 1111 alla lista", "Aggiungi 2222 alla lista"],
True,
)
)
# TEST FUNCTIONALITY - RICERCA UN TRENO ERROR
def test_ricerca_treno_error_stazione(self):
test_search_msg = [
"Ricerca da Cosenza a Oggiono il 31-12",
"Ricerca da Milano a Roma il 31-12",
]
for msg in test_search_msg:
response = call_mute_mp(msg)
self.assertTrue(isinstance(response, tuple))
key_word = [
"Le API di viaggiotreno non sono in grado di rispondere a questa richeiesta anche se il tuo comando e' valido.",
"Il motivo e' sconosciuto e da attributire a viaggiatreno.it",
"Cerca qui il tuo treno: http://www.trenitalia.com/",
"Quando sai il numero del tuo treno torna qui!",
]
self.assertTrue(text_in_msg(response[0], key_word))
self.assertTrue(text_in_buttons(response[1], ["Menu' principale"]))
# TEST FUNCTIONALITY - RICERCA UN TRENO CON ERROR IN STAZIONE PARTENZA O ARRIVO
def test_ricerca_treno_error_partenza_arrivo(self):
test_msg = ["Ricerca da Peslago a Lecco", "Ricerca da Lecco a Peslago"]
for msg in test_msg:
response = call_mute_mp(msg)
self.assertTrue(isinstance(response, tuple))
cond_departure = text_in_msg(
response[0], ["Departure station not existing"]
)
cond_destination = text_in_msg(
response[0], ["Destination station not existing"]
)
self.assertTrue(cond_departure or cond_destination)
self.assertTrue(
text_in_buttons(response[1], ["Riprova", "Menu' principale"],)
)
# TEST FUNCTIONALITY - RICERCA UN TRENO CON DATA E/O ORA SBAGLIATE
def test_ricerca_treno_data_ora_error(self):
test_msg = [
"Ricerca da Milano a Roma il 30-2",
"Ricerca da Milano a Roma il 31-11",
"Ricerca da Milano a Roma il 0-8",
"Ricerca da Milano a Roma il 8-0",
"Ricerca da Milano a Roma il 15-13",
"Ricerca da Milano a Roma il 1013",
"Ricerca da Milano a Roma alle 25:30",
"Ricerca da Milano a Roma alle 20:65",
"Ricerca da Milano a Roma alle 1530",
]
for msg in test_msg:
response = call_mute_mp(msg)
self.assertTrue(isinstance(response, tuple))
key_word = [
u"Attenzione, l'ora o la data inserita è <b>errata</b>",
"Input ricevuto =",
"La preghiamo di riprovare.",
]
self.assertTrue(text_in_msg(response[0], key_word))
self.assertEqual(response[1], "")
# TEST FUNCTIONALITY - RICERCA UN TRENO CON ORA
def test_ricerca_treno_ora(self):
test_times = ["15:00", "00:00", "9:15", "07:10", "13:47", "20:00", "23:55"]
test_msg = ["Ricerca da Milano a Roma alle ", "Ricerca Milano Roma alle "]
for msg in test_msg:
for start_time in test_times:
response = call_mute_mp(msg + start_time)
self.assertTrue(isinstance(response, tuple))
self.assertTrue(isinstance(response[0], list))
self.assertTrue(len(response[0]) <= 2)
self.assertTrue(isinstance(response[1], list))
self.assertTrue(extract_assert_hour("!".join(response[0]), start_time))
self.assertTrue(
text_in_msg(
" ".join(response[0]),
["Soluzione", "Treno", "Durata", "Milano", "Roma"],
)
)
self.assertTrue(
text_in_buttons(
response[1],
["Aggiungi 1111 alla lista", "Aggiungi 2222 alla lista"],
True,
)
)
# TEST FUNCTIONALITY - RICERCA UN TRENO CON GIORNO
# def test_ricerca_treno_giorno(self):
# test_dates = [
# "5-2",
# "05-8",
# "6-09",
# "07-04",
# "10-3",
# "4-10",
# "11-12",
# "10-04",
# "03-12",
# "10-10",
# "28-2",
# "30-11",
# "15-08",
# ]
# test_msg = ["Ricerca da Milano a Roma il ", "Ricerca Milano Roma il "]
# if datetime.datetime.now().hour <= 15:
# test_msg.append("Ricerca da Roma Tiburtina a Milano Lambrate il ")
# for msg in test_msg:
# for date in test_dates:
# response = call_mute_mp(msg + date)
# self.assertTrue(isinstance(response, tuple))
# self.assertTrue(isinstance(response[0], list))
# self.assertTrue(len(response[0]) <= 2)
# self.assertTrue(isinstance(response[1], list))
# self.assertTrue(extract_assert_day(" ".join(response[0]), date))
# self.assertTrue(
# text_in_msg(
# " ".join(response[0]),
# ["Soluzione", "Treno", "Durata", "Milano", "Roma"],
# )
# )
# self.assertTrue(
# text_in_buttons(
# response[1],
# ["Aggiungi 1111 alla lista", "Aggiungi 2222 alla lista"],
# True,
# )
# )
# # TEST FUNCTIONALITY - RICERCA UN TRENO CON ORA & GIORNO
# def test_ricerca_treno_ora_giorno(self):
# start_time = "10:30"
# day = (datetime.datetime.now() + datetime.timedelta(days=1)).day
# date = str(day) + "-" + str(datetime.datetime.now().month)
# test_msg = [
# "Ricerca da Milano a Roma alle " + start_time + " il " + date,
# "Ricerca da Milano a Roma il " + date + " alle " + start_time,
# "Ricerca Milano Roma alle " + start_time + " il " + date,
# "Ricerca Milano Roma il " + date + " alle " + start_time,
# "Ricerca Da Roma Tiburtina a Milano Lambrate alle "
# + start_time
# + " il "
# + date,
# "Ricerca Da Roma Tiburtina a Milano Lambrate il "
# + date
# + " alle "
# + start_time,
# ]
# for msg in test_msg:
# response = call_mute_mp(msg)
# self.assertTrue(isinstance(response, tuple))
# self.assertTrue(isinstance(response[0], list))
# self.assertTrue(len(response[0]) <= 2)
# self.assertTrue(isinstance(response[1], list))
# self.assertTrue(extract_assert_hour("!".join(response[0]), start_time))
# self.assertTrue(extract_assert_day(" ".join(response[0]), date))
# self.assertTrue(
# text_in_msg(
# " ".join(response[0]),
# ["Soluzione", "Treno", "Durata", "Milano", "Roma"],
# )
# )
# self.assertTrue(
# text_in_buttons(
# response[1],
# ["Aggiungi 1111 alla lista", "Aggiungi 2222 alla lista"],
# True,
# )
# )
# launch unit test cases
if __name__ == "__main__":
unittest.main()
| true |
bb89184da25eb94859f11f247b20e94dd93565fa | Python | ericchen15/wizard-ordering | /input_generator.py | UTF-8 | 846 | 2.90625 | 3 | [] | no_license | from string import ascii_lowercase
from random import shuffle
from numpy.random import choice
names = []
for c in ascii_lowercase:
names.append(c)
names.append(c.upper())
size = 50
num_constraints = 366
names = names[:size]
shuffle(names)
def random_constraint(names):
sample = choice(len(names), 3, replace=False)
if sample[0] < sample[2] < sample[1] or sample[0] > sample[2] > sample[1]:
temp = sample[0]
sample[0] = sample[2]
sample[2] = temp
return [names[sample[0]], names[sample[1]], names[sample[2]]]
with open('input50.in', 'w') as f:
f.write(str(size) + '\n')
f.write(names[0])
for name in names[1:]:
f.write(' ' + name)
f.write('\n')
f.write(str(num_constraints))
for _ in range(num_constraints):
constraint = random_constraint(names)
f.write('\n' + constraint[0] + ' ' + constraint[1] + ' ' + constraint[2]) | true |
3e5965fcd7463ac64e791c6bfb435bffa657aae1 | Python | m3hm3taydin/codewars | /python/6-kyu/stop-gninnips-my-sdrow.py | UTF-8 | 283 | 3.28125 | 3 | [] | no_license | def spin_words(sentence):
sent_list = sentence.split(' ')
result = ''
for sent in sent_list:
if len(sent) < 5:
result = "{} {}".format(result, sent)
else:
result = "{} {}".format(result, sent[::-1])
return result[1:]
| true |
da2d7b030f16fd2348d9fba9572b6e516c3d69d9 | Python | meshalalsultan/Extrct_Keyword_from_text | /extract.py | UTF-8 | 429 | 3.015625 | 3 | [] | no_license | from rake_nltk import Rake
rake_nltk_var = Rake()
text = """ I am a programmer from India, and I am here to guide you
with Data Science, Machine Learning, Python, and C++ for free.
I hope you will learn a lot in your journey towards Coding,
Machine Learning and Artificial Intelligence with me."""
rake_nltk_var.extract_keywords_from_text(text)
keyword_extracted = rake_nltk_var.get_ranked_phrases()
print(keyword_extracted) | true |
06b796e773eabde42d738c0c9f749afab444e882 | Python | gutentag1026/HackHighSchool | /Parseltongue_02/02_numtypes.py | UTF-8 | 650 | 3.65625 | 4 | [] | no_license | import sys
num_one = (int)(sys.argv[1])
num_two = (int)(sys.argv[2])
dividend = num_one / num_two
remainder = num_one % num_two
a = 5
b = 56.99
c = 9.322e-36j
def data_type(x):
if type(x) == int:
return "Integer"
elif type(x) == float:
return "Float"
elif type(x) == complex:
return "Complex"
print("%i divided by %i equals %i remainder %i" % (num_one, num_two, dividend, remainder))
print("Variable a contains : %i which is of type: %s" % (a, data_type(a)))
print("Variable b contains :", b, "which is of type:", data_type(b))
print("Variable c contains :", complex(c), "which is of type:", data_type(c))
| true |
ea47f10433d4acce7000e6be85337e01ceb90364 | Python | texttxet/code | /hashlib练习/md5爆破弱口令.py | UTF-8 | 1,458 | 3.296875 | 3 | [] | no_license | # conding:utf-8
#! /bin/bash/python3
import hashlib
def ruo_md5():
text = open("password.txt").read()
ruokou = text.splitlines(s)
# print (ruokou)
for s in ruokou:
# print (s)
md5 = hashlib.md5()
md5.update(bytes(s, encoding="utf-8"))
en_pd = md5.hexdigest()
print ("{:>16}".format(s),":",en_pd)
def de_md5(known_md5):
text = open("password.txt").read()
ruokou = text.split("\n")
# print (ruokou)
for s in ruokou:
# print (s)
md5 = hashlib.md5()
md5.update(bytes(s, encoding="utf-8"))
en_ruopd = md5.hexdigest()
if known_md5 == en_ruopd:
print ("密文".rjust(len(known_md5), " "),":","明文".ljust(len(s)))
print (known_md5,":","{:<16}".format(s),)
break
else:
continue
print ("该md5值不在本人字典里")
def en_md5(pd):
# pd = input("输入想要md5加密的密码:")
md5 = hashlib.md5()
md5.update(pd.encode("utf-8"))
en_pd = md5.hexdigest()
print ("明文".rjust(16, " "), ":", "密文".ljust(len(en_pd), " "))
print ("{:>16}".format(pd),":",en_pd)
if __name__ == "__main__":
# ruo_md5() # 要打印的弱口令md5值
# known_md5 = input("请输入已知弱口令的md5值:")
# de_md5(known_md5) # 这两行,解密md5值
pd = input("请输入想要md5加密的密码:")
en_md5(pd) # 这两行进行md5加密
| true |
42d3c528da95b38499ad2688dd9cbf047feb5399 | Python | franksun319/JointPatentsSpider4CNKI | /produce_adjacent_matrix.py | GB18030 | 958 | 2.5625 | 3 | [] | no_license | # -*- coding: cp936 -*-
"""
ɸרڽӾ
"""
import os
import sys
from csv_reader import CsvReader
from graph_maker import GraphMaker
default_encoding = "utf-8"
if default_encoding != sys.getdefaultencoding():
reload(sys)
sys.setdefaultencoding(default_encoding)
YEAR = ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016']
INPUT_CSV = u'2007-2016ҽҩҵ/2007-2016ȫҽҩר.csv'
PREFIX_LABEL = 'AM_ҽҩ'
if __name__ == '__main__':
assert (os.path.exists(INPUT_CSV)), 'Such file \"' + INPUT_CSV + '\" does not exists!'
for y in YEAR:
y = str(y)
my_reader = CsvReader(INPUT_CSV, start_time=y + '-01-01', end_time=y + '-12-31')
print u'' + y + u'ڽӾ'
my_graph_maker = GraphMaker(my_reader.joint_applicant_list())
my_graph_maker.write_adjacent_matrix(PREFIX_LABEL + '_' + y + '.csv')
| true |
c84043191fe8d3e458eadc4cee5b2453b4a127a0 | Python | Suja-K/Spo2_evaluation | /spo2evaluation/modelling/healthwatcher/healthwatcher.py | UTF-8 | 2,343 | 2.78125 | 3 | [] | no_license | #method of Scully et al.: "Physiological Parameter monitoring from optical recordings with a mobile phone"
import torchvision
from matplotlib import pyplot as plt
def health_watcher(ppg_blue, ppg_blue_std, ppg_red, ppg_red_std, fps, smooth = True):
#print(meta)
#fps = meta['video_fps']
A = 100 # From "determination of spo2 and heart-rate using smartphone camera
B = 5
#TODO curve fitting for A and B. If we have more data, we can do a linear regression that best fits all patients
#TODO Add all mp4 from figshare
#TODO Add all ground truth from figshare
spo2 = (A - B*(ppg_red_std / ppg_red )/(ppg_blue_std / ppg_blue))
if smooth:
secs_to_smooth = 10
frames_to_smooth = int(10*fps)
spo2_smooth = [spo2[i:i+frames_to_smooth].mean() for i in range(len(spo2)-frames_to_smooth)]
else:
spo2_smooth = spo2
x = [i for i in range(len(spo2_smooth))]
# plt.figure()
# plt.plot(x, spo2_smooth)
# plt.show()
return spo2_smooth.mean()
def health_watcher_old(video, meta):
#v, _, meta = torchvision.io.read_video('../data/S98T89.mp4', pts_unit="sec") ## assumes it's being run from `healthwatcher` directory
print(meta)
fps = meta['video_fps']
blue=0
green=1
red=2
print(meta)
video.resize_(video.shape[0], video.shape[1]*video.shape[2], video.shape[3]) # smash width and height together
bc, gc, rc = video[:,:,blue].float(), video[:,:,green].float(), video[:,:,red].float() # get separate channels
bc_mean = bc.mean(dim=1) # calc mean and std for each channel
bc_std = bc.std(dim=1)
rc_mean=rc.mean(dim=1)
rc_std = rc.std(dim=1)
A=100 # From "determination of spo2 and heart-rate using smartphone camera
B=5
#TODO curve fitting for A and B. If we have more data, we can do a linear regression that best fits all patients
#TODO Add all mp4 from figshare
#TODO Add all ground truth from figshare
spo2 = (A - B*(rc_std / rc_mean )/(bc_std / bc_mean)).numpy()
secs_to_smooth = 10
frames_to_smooth = int(10*fps)
spo2_smooth = [spo2[i:i+frames_to_smooth].mean() for i in range(len(spo2)-frames_to_smooth)]
x = [i for i in range(len(spo2_smooth))]
plt.figure()
plt.plot(x, spo2_smooth)
plt.show()
| true |
b0a5835621e3acf2d793f81003051e98078ebbbe | Python | fhan90521/algorithm | /leetcode/leetcode-743.py | UTF-8 | 824 | 2.75 | 3 | [] | no_license | class Solution:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
graph = collections.defaultdict(list)
for a,b,c in times:
graph[a].append((b,c))
queue=[(k,0)]
total_fee={}
total_fee[k]=0
while queue:
v = queue.pop(0)
for w in graph[v[0]]:
if w[0] not in total_fee:
fee=total_fee[v[0]]+w[1]
total_fee[w[0]]=fee
queue.append(w)
else:
fee=total_fee[v[0]]+w[1]
if(total_fee[w[0]]>fee):
total_fee[w[0]]=fee
queue.append(w)
print(total_fee)
if(len(total_fee)==n): return max(total_fee.values())
return -1 | true |
7a02507bde927a142a137d1597e1b6732ccc3d9a | Python | jia80H/python-learning-experience | /06 函数和装饰器/06 装饰器.py | UTF-8 | 1,935 | 4.1875 | 4 | [] | no_license | """ 装饰器的基本使用 """
def cal_time(fn):
print('我是外部函数,我被调用了')
print('fn={}'.format(fn))
def inner():
import time
start = time.time()
fn()
end = time.time()
print('耗时', (end - start), 's')
return inner
@cal_time
# 上行代码的第一件事是调用cal_time;
# 第二件事是把被装饰的函数传给fn
def cal():
x = 0
for i in range(1, 10000000):
x += i
print(x)
# 第三件事:当再次调用cal函数时,这是的cal函数已经不是上面的cal
print('装饰后的cal={}'.format(cal))
# 装饰后的cal=<function cal_time.<locals>.inner at 0x00000223FD0369D0>
cal()
""" 装饰器详解 """
# 看懂这个
def cal_time(fn):
print('我是外部函数,我被调用了')
print('fn={}'.format(fn))
def inner(s):
import time
start = time.time()
k = fn(s)
end = time.time()
print('耗时', (end - start), 's')
return k
return inner
@cal_time
def cal(n):
x = 0
for i in range(1, n):
x += i
return x # 和之前不一样
print(cal(1000000))
""" 装饰器的使用 """
# 更改需求 但不改变源代码
# 开放封闭原则 :
# 软件实体应该是可扩展,而不可修改的。
# 也就是说,对扩展是开放的,而对修改是封闭的。
# 新需求,需要防沉迷
# 先把装饰器结构搭建出来
def can_play(fn):
def inner():
pass
return inner
# 原来的需求
def play_game(name, game):
print('{}正在玩儿{}'.format(name, game))
# 成品
def can_play(fn):
def inner(x, y, *args, **kwargs):
print(args)
if args[0] >= 18:
fn(x, y)
else:
print('未成年')
return inner
@can_play
def play_game(name, game):
print('{}正在玩儿{}'.format(name, game))
play_game('Mlfoy', 'TFT', 22)
| true |
02acdb14880402ba131d06ecd0f63e714c12b25a | Python | ezinall/StellarPy | /stellarpy/core.py | UTF-8 | 5,822 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from datetime import datetime
class Star:
def __init__(self, object_name, m, color=(1, 1, 0, 1)):
self.name = object_name
self.m = m # масса
self.X = [0]
self.Y = [0]
self.Z = [0]
self.color = color
# pos = gl.GLScatterPlotItem(pos=array([0, 0, 0]), color=color, size=10)
# pos.setGLOptions('translucent')
# plot_wid.addItem(pos)
class Body:
def __init__(self, object_name, major, m, a, e, i, w, O=0, M=0, at=0, JD=2451545.0, color=(.5, .5, .5, 1)):
"""
:param str object_name: name, название
:param tuple color: color point, цвет точки
:param JD: julian date, юлианская дата 2451545.0
:param major: center of mass, центр масс
:param float m: mass, масса
:param float at: наклон оси
:param float a: большая полуось КМ
:param float e: ексцентриситет ε e=c/a
:param float i: отклонение°
:param float w: аргумент перицентра° ω
:param float O: долгота восходящего узла° Ω N
:param float M: средняя аномалия° M=E-e*sin(E)
∂ φ
"""
self.name = object_name
self.color = color
self.major = major
self.m = m
self.at = at
self.JD = JD
self.orbit = True
self.guide = True
self.size = None
self.width = None
k = G * (self.major.m + m) # µ гравитационный параметр
n = np.sqrt(k / a ** 3) # среднее движение
self.T = 2 * np.pi / n # период обращения sqrt(((4 * pi**2)/(G * (SUN.M + m))) * a**3) Кеплер 3
x, y, z = [], [], []
E = np.radians(M)
self.age = int(999 / 365 * (datetime.now() - get_g_d(JD)).days)
for count in range(self.age):
while abs((M + e * np.sin(E)) - E) > 0.00001: # последовательные приближения для эксцентрической аномалии
E = M + e * np.sin(E)
M += n * 1 # M = n(t−t0)+M0
r = a * (1 - e * np.cos(E)) # радиус-вектор
sin_v = (np.sqrt(1 - e ** 2) * np.sin(E)) / (1 - e * np.cos(E))
cos_v = (np.cos(E) - e) / (1 - e * np.cos(E))
sin_u = np.sin(np.radians(w)) * cos_v + np.cos(np.radians(w)) * sin_v
cos_u = np.cos(np.radians(w)) * cos_v - np.sin(np.radians(w)) * sin_v
x.append(r * (cos_u * np.cos(np.radians(O)) - sin_u * np.sin(np.radians(O)) * np.cos(np.radians(i))))
y.append(r * (cos_u * np.sin(np.radians(O)) + sin_u * np.cos(np.radians(O)) * np.cos(np.radians(i))))
z.append(r * (sin_u * np.sin(np.radians(i))))
# V1 = sqrt(r / p) * e * sinv
# V2 = sqrt(r / p) * (1 + e * cosv)
self.X = list(reversed(x)) if self.at > 90 else x
self.Y = list(reversed(y)) if self.at > 90 else y
self.Z = list(reversed(z)) if self.at > 90 else z
# self.X = x
# self.Y = y
# self.Z = z
# F = G * SUN.M * self.m / r ** 2 # сила гравитационного притяжения
# p = a * (1 - e ** 2) # фокальный параметр
# b = sqrt(a * p) # малая полуось
# Rper = (1 - e) * a # радиус перегелия
# Rafe = (1 + e) * a # радиус афелия
# φ = (24 * pi**3 * a**2) / (T**2 * C**2 * (1 - e**2))
# φ = (6 * pi * G * SUN.M) / (C**2 * a * (1 - e**2))
if self.m > 1e25:
self.outer_planets()
elif self.m > 1e23:
self.planet()
elif self.m > 1e20:
self.dwarf_planet()
else:
self.small_body()
def star(self):
pass
def planet(self, orbit=True, guide=True, size=8, width=1):
return self.paint(orbit=orbit, guide=guide, size=size, width=width)
def outer_planets(self, orbit=True, guide=True, size=10, width=1):
return self.paint(orbit=orbit, guide=guide, size=size, width=width)
def dwarf_planet(self, orbit=True, guide=True, size=6, width=1):
return self.paint(orbit=orbit, guide=guide, size=size, width=width)
def small_body(self, orbit=False, guide=False, size=1.5, width=.25):
return self.paint(orbit=orbit, guide=guide, size=size, width=width)
def satellite(self, orbit=False, guide=False, size=3, width=1):
pass
def paint(self, orbit=True, guide=True, size=4.0, width=1.0):
self.orbit = orbit
self.guide = guide
self.size = size
self.width = width
def get_g_d(j_d):
a = int(j_d + 32044)
b = int((4 * a + 3) / 146097)
c = a - int((146097 * b) / 4)
d = int((4 * c + 3) / 1461)
e = c - int((1461 * d) / 4)
m = int((5 * e + 2) / 153)
day = e - int((153 * m + 2) / 5) + 1
month = m + 3 - 12 * int((m / 10))
year = 100 * b + d - 4800 + int((m / 10))
return datetime(year, month, day)
def get_j_d(day, month, year):
a = int((14 - month) / 12)
b = year + 4800 - a
c = month + 12 * a - 3
return day + int((153 * c + 2) / 5) + 365 * b + int(b / 4) - int(b / 100) + int(b / 400) - 32045
G = 6.67408e-11 # граивтационная постоянная м3·с−2·кг−1 или Н·м2·кг−2
au = 149597870.700 # а.е. астрономическая единица
C = 299792458 # скорость света м/с
| true |
e052dda6beecdb2a8047bf87eb0c343f7c5e3fab | Python | qmnguyenw/python_py4e | /geeksforgeeks/python/python_all/74_16.py | UTF-8 | 1,989 | 4.03125 | 4 | [] | no_license | Python – Key with all Characters in String
Sometimes, while working with Python Strings, we can have problem in which we
need to extract all the keys that have all the characters in the character
value list. This kind of problem has application has many domains such as day-
day programming. Let’s discuss a way in which this problem can be solved.
**Method : Usingall() \+ dictionary comprehension**
The combination of above functionalities can be used to solve this problem. In
this, we use all() to perform check in whole of dictionary and extraction of
items using items().
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Key with all Characters in String
# Using all() + dictionary comprehension
# initializing dictionary
test_dict = { 'gfg' : ['a', 'b', 'c', 'd', 'g'],
'is' : ['b', 'f', 'e'],
'best' : ['c', 'd', 'g'],
'for' : ['n', 'z'],
'CS' : ['g', 'd'] }
# printing original dictionary
print("The original dictionary is : " + str(test_dict))
# initializing keys
test_str = 'gd'
# Key with all Characters in String
# Using all() + dictionary comprehension
res = list({key for key, val in test_dict.items()
if all(chr in val for chr in test_str)})
# printing result
print("The keys list : " + str(res))
---
__
__
**Output :**
> The original dictionary is : {‘is’: [‘b’, ‘f’, ‘e’], ‘best’: [‘c’, ‘d’,
> ‘g’], ‘for’: [‘n’, ‘z’], ‘CS’: [‘g’, ‘d’], ‘gfg’: [‘a’, ‘b’, ‘c’, ‘d’, ‘g’]}
> The keys list : [‘best’, ‘CS’, ‘gfg’]
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| true |
8d968802872aed2943cf77089505141fa323f17b | Python | liqkjm/scrapyd-GUI | /ScrapydTools.py | UTF-8 | 5,418 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import json
import sqlite3
import os
import shutil
__author__ = "chenyansu"
class ScrapydTools(object):
""" 后端:Scrapyd API 的封装调用
日后修改方向:函数打散,不使用类继承。
"""
def __init__(self, baseUrl ='http://127.0.0.1:6800/'):
self.baseUrl = baseUrl
def get_server_status(self):
""" 获取服务器状态 """
r = requests.get(self.baseUrl + 'daemonstatus.json')
print(r.text)
return eval(r.text)
def get_project_list(self):
""" 获取项目列表 """
r = requests.get(self.baseUrl + 'listprojects.json')
print(r.text)
return eval(r.text)
def get_project_spider(self,project):
""" 获取某项目的所有爬虫 """
listspdUrl = self.baseUrl + 'listspiders.json?project=%s' % project
r = requests.get(listspdUrl)
print(r.text)
return eval(r.text)
def get_project_version(self, project):
""" 获取某项目所有版本(重复提交项目会增加版本) """
listspdvUrl=self.baseUrl + 'listversions.json?project=%s' % project
r = requests.get(listspdvUrl)
print(r.text)
return eval(r.text)
def get_job_list(self, project):
""" 获取所有爬虫(各种状态) """
listjobUrl=self.baseUrl + 'listjobs.json?project=%s' % project
r=requests.get(listjobUrl)
print(r.text)
return eval(r.text)
def start_spider(self, project, spider):
""" 开始爬虫,返回jobid """
schUrl = self.baseUrl + 'schedule.json'
dictdata ={ "project":project,"spider":spider}
r= requests.post(schUrl, data= dictdata)
print(r.text)
return eval(r.text)
def stop_spider(self, project, jobid):
""" 根据jobid停止爬虫 """
cancelUrl = self.baseUrl + 'cancel.json'
dictdata = {"project":project ,"job":jobid}
r = requests.post(cancelUrl, data=dictdata)
print(r.text)
return eval(r.text)
def del_project_by_version(self, project, version):
""" 根据版本删除项目"""
delverUrl = self.baseUrl + 'delversion.json'
dictdata={"project":project ,"version": version }
r = requests.post(delverUrl, data= dictdata)
print(r.text)
return eval(r.text)
def del_project(self, project):
""" 删除项目 """
delProUrl = self.baseUrl + 'delproject.json'
dictdata = {"project":project}
r = requests.post(delProUrl, data= dictdata)
print(r.text)
return eval(r.text)
def server_manager(self, action="server_list", name=None, address=None):
"""
将利用sqlite创建并使用server表,对此增删改查
提供 server_select,server_list,server_add, server_del 方法
"""
# 数据库自检
if os.path.exists("tool.db") == False:
conn = sqlite3.connect('tool.db')
cursor = conn.cursor()
# 如果表不存在则创建表
create_tb_cmd = """
CREATE TABLE IF NOT EXISTS SERVER
(NAME TEXT PRIMARY KEY,
ADDRESS TEXT);
"""
cursor.execute(create_tb_cmd)
print("创建数据库tool.db并生成SERVER表")
else:
# 开启sqlite3链接
conn = sqlite3.connect('tool.db')
cursor = conn.cursor()
# 通过action设定获取键值,获取所有值,插入键值,删除功能
if action == "server_select":
cursor.execute("SELECT ADDRESS FROM SERVER WHERE NAME=='%s';" %name)
result = cursor.fetchone()[0] #<class 'str'>
elif action == "server_list":
cursor.execute("SELECT NAME FROM SERVER")
result = [x[0] for x in cursor.fetchall()] #<class 'list'>
elif action == "server_add":
try:
cursor.execute("INSERT INTO SERVER (NAME, ADDRESS) VALUES ('%s', '%s');" %(name, address))
result = None #<class 'NoneType'>
except sqlite3.IntegrityError:
result = "EXIST" #<class 'str'>
elif action == "server_del":
cursor.execute("DELETE from SERVER WHERE NAME=='%s';" %name)
result = None #<class 'NoneType'>
else:
result = "ILLEGAL OPERATION" #<class 'str'>
# 关闭链接
cursor.close()
conn.commit()
conn.close()
return result
def project_add(self, project_name, project_address):
if os.path.exists(project_address+"scrapyd-deploy") == False:
try:
shutil.copy("scrapyd-deploy", project_address)
except:
print("没有复制权限")
old_address = os.getcwd()
os.chdir(project_address)
os.system("scrapyd-deploy -p %s" %project_name)
os.chdir(old_address)
if __name__ == "__main__":
st = ScrapydToolsNet()
# st.get_server_status()
# st.get_project_list()
# st.get_project_spider("tutorial")
# st.get_project_spider_version("tutorial")
# st.get_job_list("tutorial")
# st.start_spider(project = "tutorial", spider="author")
# st.del_project_by_version(project="tutorial",version="1520788750" )
# st.del_project("tutorial") | true |
44de9fe78c21992e43834aa98460c96471589517 | Python | Krst0o/backpropagation | /main.py | UTF-8 | 2,722 | 2.71875 | 3 | [] | no_license | import matplotlib.pyplot as plt
from Examples import *
from variables import *
pygame.init()
button_text = pygame.font.SysFont('Comic Sans MS', 20)
ex = Examples()
e = ex.generate(5000)
x_min = np.min(e[0])
x_max = np.max(e[0])
x_train = (np.array(e[0]) - x_min) / (x_max - x_min) * 0.8 + 0.1
y_train = np.array(e[1]) / np.pi * 0.8 + 0.1
NN = Neural_Network(hidden_size=5)
screen.fill(WHITE)
pygame.display.flip()
while running:
screen.blit(robot_img, (0, 0))
learn_text = button_text.render(learn_button_text, False, (0, 0, 0))
learn_button = pygame.draw.rect(screen, (200, 200, 200), (screen.get_width()/2-32, 360, 64, 32))
screen.blit(learn_text, learn_button)
pygame.draw.line(screen, BLACK, (0, 351), (440, 351), width=1)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if 0 <= pygame.mouse.get_pos()[1] <= 350:
screen.fill(WHITE)
pygame.draw.circle(screen, (255, 0, 0), (pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]), 4)
clicked_x = pygame.mouse.get_pos()[0] - img_width
clicked_y = pygame.mouse.get_pos()[1] - img_height / 2
clicked_y *= -1.0
clicked_x = (clicked_x + arm_length * 2) / (arm_length * 4)
clicked_x *= 0.8 + 0.1
clicked_y = (clicked_y + arm_length * 2) / (arm_length * 4)
clicked_y *= 0.8 + 0.1
predicted_angles = NN.forward((clicked_x, clicked_y))
arm_pts = find_line_points(predicted_angles[0], predicted_angles[1])
pygame.draw.line(screen, BLACK, (img_width, img_height / 2), (arm_pts[0].x, arm_pts[0].y), width=3)
pygame.draw.line(screen, BLACK, (arm_pts[0].x, arm_pts[0].y), (arm_pts[1].x, arm_pts[1].y), width=3)
if 360 <= pygame.mouse.get_pos()[1] <= 392 and (screen.get_width()/2-32) <= pygame.mouse.get_pos()[0] <= (screen.get_width()/2+32):
for i in range(15000):
NN.train(x_train, y_train)
err = NN.errors
plt.plot(range(len(err)), err)
plt.savefig('errors.png')
fig, ax = plt.subplots()
ax.axis('equal')
for (x, y) in e[0]:
plt.scatter(x, y, marker='o')
plt.savefig('e_0.png')
fig, ax = plt.subplots()
ax.axis('equal')
for (x, y) in e[1]:
plt.scatter(x, y, marker='o')
plt.savefig('e_1.png')
learn_button_text = "LEARNED"
| true |
8b4128184838d5d0e37eb9c41a97ff1bc2610426 | Python | utsavmajhi/CS384_1801ME61 | /Assignments/Assignment_5/tutorial05.py | UTF-8 | 11,818 | 2.734375 | 3 | [] | no_license | import os
import re
os.system("cls")
def rename_FIR(folder_name):
# rename Logic
if(os.path.exists('Subtitles/'+folder_name)):
print("Season Number Padding:")
seasonpad=int(input())
print("Episode Number Padding")
episodepad=int(input())
listoldname=[]
listnewnames=[]
listfiletype=[]
for f in os.scandir('Subtitles/'+folder_name):
if(f.is_file()):
originalname=f.name
split=re.findall(r'\d+',originalname)
#epiosde number fetched
episodeno=split[0]
pattern=re.compile(r'.mp4')
if(re.search(pattern,originalname)):
filetype='.mp4'
else:
filetype='.srt'
if(episodepad-len(split[0])>=0):
for i in range(0,episodepad-len(split[0])):
episodeno='0'+str(episodeno)
finaltitle=folder_name+' - '+"Episode "+str(episodeno)
listoldname.append(originalname)
listnewnames.append(finaltitle)
listfiletype.append(filetype)
for i in range(0,len(listoldname)):
if(os.path.exists('Subtitles/'+folder_name+'/'+listnewnames[i]+listfiletype[i])):
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i]+'2'+listfiletype[i])
else:
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i]+listfiletype[i])
print("Done renaming")
pass
def rename_Game_of_Thrones(folder_name):
# rename Logic
if(os.path.exists('Subtitles/'+folder_name)):
print("Season Number Padding:")
seasonpad=int(input())
print("Episode Number Padding")
episodepad=int(input())
listoldname=[]
listnewnames=[]
for f in os.scandir('Subtitles/'+folder_name):
if(f.is_file()):
originalname=f.name
#to get all the relevanr info about the string by regex
split=re.split(r'[ - ]',originalname)
fursplit=re.split(r'[x]',split[4])
#got season no and episode no
episodeno=fursplit[1]
seasonno=fursplit[0]
namesplit=re.split(r'[-]',originalname)
#print(namesplit[2])
pattern=re.compile(r'.mp4')
if(re.search(pattern,namesplit[2])):
prsplit=re.split(r'[.]',namesplit[2])
#fetched prime name of episode
episodename=prsplit[0]
filetype='.mp4'
else:
prsplit=re.split(r'[.]',namesplit[2])
#fetched prime name of episode
episodename=prsplit[0]
filetype='.srt'
#Forming filename accrding to instructions
if(seasonpad-len(fursplit[0])>=0):
for i in range(0,seasonpad-len(fursplit[0])):
seasonno='0'+str(seasonno)
if(episodepad-len(fursplit[1])>=0):
for i in range(0,episodepad-len(fursplit[1])):
episodeno='0'+str(episodeno)
if(seasonpad==1):
seasonno=int(seasonno)
if(episodepad==1):
episodeno=int(episodeno)
finaltitle=folder_name+' - '+"Season "+str(seasonno)+" Episode "+str(episodeno)+' -'+episodename+filetype
#print(finaltitle)
listoldname.append(originalname)
listnewnames.append(finaltitle)
#APPLYING RENAMING FUNCTION TO ACTUAL FILE
for i in range(0,len(listoldname)):
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i])
print("Done renaming")
pass
def rename_Sherlock(folder_name):
# rename Logic
if(os.path.exists('Subtitles/'+folder_name)):
print("Season Number Padding:")
seasonpad=int(input())
print("Episode Number Padding")
episodepad=int(input())
listoldname=[]
listnewnames=[]
for f in os.scandir('Subtitles/'+folder_name):
if(f.is_file()):
originalname=f.name
split=re.split(r'[.E]',originalname)
#for season number
fursplit=re.split(r'[S]',split[1])
seasonno=fursplit[1]
#for episode no
article = re.sub(r'[E]', '(', originalname)
splitepi=re.split(r'[(]',article)
fursplit2=re.split(r'[.]',splitepi[1])
#fetched episode number
episodeno=fursplit2[0]
if(seasonpad-len(fursplit[0])>=0):
for i in range(0,seasonpad-len(fursplit[1])):
seasonno='0'+str(seasonno)
if(episodepad-len(fursplit[1])>=0):
for i in range(0,episodepad-len(fursplit2[0])):
episodeno='0'+str(episodeno)
if(seasonpad==1):
seasonno=int(seasonno)
if(episodepad==1):
episodeno=int(episodeno)
#fetching filetype
filetype=fursplit2[len(fursplit2)-1]
finaltitle=folder_name+' - '+"Season "+str(seasonno)+" Episode "+str(episodeno)+'.'+filetype
listoldname.append(originalname)
listnewnames.append(finaltitle)
for i in range(0,len(listoldname)):
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i])
print("Done renaming")
pass
def rename_Suits(folder_name):
# rename Logic
if(os.path.exists('Subtitles/'+folder_name)):
print("Season Number Padding:")
seasonpad=int(input())
print("Episode Number Padding")
episodepad=int(input())
listoldname=[]
listnewnames=[]
listfiletype=[]
for f in os.scandir('Subtitles/'+folder_name):
if(f.is_file()):
originalname=f.name
split=re.split(r'[-]',originalname)
#get season no & episode no
seasplit=re.split(r'[x]',split[1])
seno1=re.split(r'[ ]',seasplit[1])
episodeno=seno1[0]
seno2=re.split(r'[ ]',seasplit[0])
seasonno=seno2[1]
newlist=[]
for i in range(2,len(split)):
if(i==2):
newlist.append(split[i])
else:
newlist[0]=str(newlist[0])+str(split[i])
advsplit=re.split(r'\.HDTV|\.720p|\.en|TBA',newlist[0])
#fetched episode name
episodename=advsplit[0]
pattern=re.compile(r'.mp4')
if(re.search(pattern,originalname)):
filetype='.mp4'
else:
filetype='.srt'
#print(seasplit[0])
if(seasonpad-len(seno2[1])>=0):
for i in range(0,seasonpad-len(seno2[1])):
seasonno='0'+str(seasonno)
if(episodepad-len(seno1[0])>=0):
for i in range(0,episodepad-len(seno1[0])):
episodeno='0'+str(episodeno)
if(seasonpad==1):
seasonno=int(seasonno)
if(episodepad==1):
episodeno=int(episodeno)
if(episodename!=' '):
finaltitle=folder_name+' - '+"Season "+str(seasonno)+" Episode "+str(episodeno)+' -'+episodename
else:
finaltitle=folder_name+' - '+"Season "+str(seasonno)+" Episode "+str(episodeno)
listoldname.append(originalname)
listnewnames.append(finaltitle)
listfiletype.append(filetype)
for i in range(0,len(listoldname)):
if(os.path.exists('Subtitles/'+folder_name+'/'+listnewnames[i]+listfiletype[i])):
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i]+'2'+listfiletype[i])
else:
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i]+listfiletype[i])
print("Done renaming")
pass
def rename_How_I_Met_Your_Mother(folder_name):
# rename Logic
if(os.path.exists('Subtitles/'+folder_name)):
print("Season Number Padding:")
seasonpad=int(input())
print("Episode Number Padding")
episodepad=int(input())
listoldname=[]
listnewnames=[]
listfiletype=[]
for f in os.scandir('Subtitles/'+folder_name):
if(f.is_file()):
originalname=f.name
split=re.split(r'[-]',originalname)
#extract season no
seasplit=re.split(r'[x]',split[1])
seasonno=int(seasplit[0])
seasonno=str(seasonno)
episodeno=int(seasplit[1])
episodeno=str(episodeno)
reqname=split[len(split)-1]
advsplit=re.split(r'\.HDTV|\.720p|\.en|.1080p',reqname)
episodename=advsplit[0].strip()
if(seasonpad-len(seasonno)>=0):
for i in range(0,seasonpad-len(seasonno)):
seasonno='0'+str(seasonno)
if(episodepad-len(episodeno)>=0):
for i in range(0,episodepad-len(episodeno)):
episodeno='0'+str(episodeno)
if(seasonpad==1):
seasonno=int(seasonno)
if(episodepad==1):
episodeno=int(episodeno)
pattern=re.compile(r'.mp4')
if(re.search(pattern,originalname)):
filetype='.mp4'
else:
filetype='.srt'
finaltitle=folder_name+' - '+"Season "+str(seasonno)+" Episode "+str(episodeno)+' - '+episodename
listoldname.append(originalname)
listnewnames.append(finaltitle)
listfiletype.append(filetype)
for i in range(0,len(listoldname)):
if(os.path.exists('Subtitles/'+folder_name+'/'+listnewnames[i]+listfiletype[i])):
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i]+str(i+1)+listfiletype[i])
else:
os.rename('Subtitles/'+folder_name+'/'+listoldname[i], 'Subtitles/'+folder_name+'/'+listnewnames[i]+listfiletype[i])
print("Done renaming")
pass
print("Enter the no corresponding to the Webseries:")
print("1.FIR")
print("2.Game of Thrones")
print("3.How I met Your Mother")
print("4.Sherlock")
print("5.Suits")
name=int(input())
if(name==2):
rename_Game_of_Thrones("Game of Thrones")
else:
if(name==4):
rename_Sherlock("Sherlock")
else:
if(name==5):
rename_Suits('Suits')
else:
if(name==3):
rename_How_I_Met_Your_Mother('How I Met Your Mother')
else:
if(name==1):
rename_FIR("FIR")
else:
print("Not Found in Present Database or wrong input")
| true |
ed9716cd2e4b8ef963180380b4afbe85202227e0 | Python | robertdigital/sorting | /animation.py | UTF-8 | 3,455 | 3.578125 | 4 | [
"MIT"
] | permissive | import os
import shutil
import imageio as imo
import matplotlib.pyplot as plt
def arrays_2_images(arrays: list, title: str, frame_path: str='data/') -> list:
"""Converts a list of arrays to a list of images.
These are temporarily stored in the directory 'frame_path' and are created
from the matplotlib library.
Params:
arrays (list[float or int]): a
title: title of the matplotlib plot
frame_path: directory in which the images will be stored.
Return:
images (list[str]): returns a list of filenames at which the images are located.
"""
indices = range(len(arrays[0]))
m = len(arrays)
images = []
for i in range(m):
values = arrays[i]
filename = 'frame_' + str(i) + '.png'
path = frame_path + filename
array_2_barplot(list(indices), values, title, path=path)
images.append(path)
return images
def array_2_barplot(x: list, y: list, title: str, path: str='/array.png') -> None:
"""Generates a barplot from an array and saves it in the given path.
Given a list x and y values, a title of the plot, and a place to save the
plot this function creates a barplot and saves it in the given path.
Params:
x (list[int or float]): a list of numbers for the x-axis
y (list[int or float]): a list of numbers for the y-axis
title (str): a title for the barplot
path: a path to save the barplot to
"""
plt.clf()
plt.bar(x, y)
plt.xlabel('Index')
plt.ylabel('Value')
plt.title(title)
plt.legend()
plt.savefig(path)
def images_2_gif(frames: list, path: str='') -> None:
"""Creates a gif from a list of images.
Given a list of paths for frame files and a location to store the gif, this
function creates a gif from the images and saves the gif to the argument
'path'.
Params:
frames (list[str]): a list of filenames
path: a path to save the gif to
"""
with imo.get_writer(path + '.gif', mode='I') as writer:
for frame in frames:
image = imo.imread(frame)
writer.append_data(image)
def arrays_2_gif(arrays: list, title: str='', path: str='', frame_path: str='') -> None:
"""Creates a gif out of a list of arrays.
Given a list of arrays, a title, a path, and a frame path, this function
creates a gif with the given title saved to the argument 'path' and uses
the 'frame_path' argument to store temporary frames of the barplots for
gif.
Params:
arrays (list[list[float or int]]): a list of arrays
title: a title for the gif
path: a path to save the gif to
frame_path: a path to temporarily the frames for the gif.
"""
images = arrays_2_images(arrays, title=title, frame_path=frame_path)
images_2_gif(images, path=path)
clear_folder(folder=frame_path)
def clear_folder(folder: str) -> None:
"""Clears an entire folder.
Given a folder name, this function will delete all the files that are in
that folder.
Params:
folder (str): the path for the folder to be deleted.
"""
for file in os.listdir(folder):
file_path = os.path.join(folder, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
| true |
55f42f75f1e457a875d1862ce90ab16744d9fb40 | Python | Bhasheyam/ALgorithms-PythonSolved | /SubArrayMatcher.py | UTF-8 | 1,051 | 3.984375 | 4 | [] | no_license | # given two set of array- Array A is greater than Array B if the first non-match element of the array A is greater than the element of the Array B.
# if the length of the substring is given then we need to use a sliding window to find all the possible consecutive sub-set of the given array.
# for the given example it is [1,4,3,2] and [4,3,2,5]
# then according to first point all the sub-set need to be solved and one greatest subset should be returned
def solution(A, K):
# write your code in Python 2.7
i=0
collection=[]
while(i<len(A)-K+1):
collection.append(A[i:i+K])
i+=1
k=1
print(collection)
greater=collection[0]
while(k<len(collection)):
b=0
while(b<len(collection[k])):
if(collection[k][b]==greater[b]):
b+=1
continue
elif(collection[k][b]>greater[b]):
greater=collection[k]
break
else:
break
k+=1
return greater
print(solution([1,4,3,2,5], 4))
| true |
1c24b9cbb473c2df10064906ed092c44c6efdb12 | Python | yanggali/group_model | /vertex.py | UTF-8 | 387 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: UTF-8 -*-
class Vertex:
verCount = 0
def __init__(self, name, type):
self.name = name
self.type = type
Vertex.verCount += 1
def __hash__(self):
seed = 131
result = 0
key = str(self.type)+"_"+str(self.name)
for k in key:
result = result*seed + ord(k)
return result | true |
98d36b83e5b3b675e389c80d5525ffb9e93738d8 | Python | jaredgrambihler/NumberNet | /importData.py | UTF-8 | 810 | 3.296875 | 3 | [] | no_license | """
Module to import data from MNIST dataset.
Dependencies:
python-mnist
"""
from mnist import MNIST
def importData(dir = './mnist'):
"""
Returns data of MNIST dataset.
Uses the python-mnist module to import this data.
If the directory of your data is different than /mnist,
this method call can be edited.
"""
try:
#creates mndata object from mnist data
mndata = MNIST(dir)
#loads train data
trainImages, trainLabels = mndata.load_training()
#loads test data
testImages, testLabels = mndata.load_testing()
#returns as 4 lists
return trainImages, trainLabels, testImages, testLabels
except FileNotFoundError:
print('Need to get MNIST data or change directory.')
return None
return None | true |
6a3be82cfeb20b5cecdc6fb88a4f970291905aa6 | Python | j6mes/springer2018-hatespeech-bridging-gaps | /src/hatemtl/features/preprocessing.py | UTF-8 | 1,170 | 3.140625 | 3 | [] | no_license | import re
def pp_lowercase(text):
return text.lower()
def pp_strip_hashtags(text):
return ' '.join(re.sub("(\#[A-Za-z0-9]+)"," HASHTAG ",text).split())
def pp_strip_usernames(text):
return ' '.join(re.sub("(@[A-Za-z0-9\_]+)"," USERNAME ",text).split())
def pp_strip_url(text):
return ' '.join(re.sub("(https?\:\/\/[^\s]+)"," URL ",text).split())
def pp_replace_numbers(text):
return re.sub(r'[0-9]+', 'NUMBER', text)
def pp_strip_nonalphanum(text):
return re.sub(r'[\W\s]+', ' ', text)
def pp_placeholders_singlechar(text):
text = re.sub('HASHTAG', '#', text)
text = re.sub('USERNAME', '@', text)
text = re.sub('URL', '$', text)
text = re.sub('NUMBER', 'D', text)
return text
def preprocess(text,tools=list([pp_lowercase,
pp_strip_hashtags,
pp_strip_usernames,
pp_strip_url,
pp_strip_nonalphanum,
pp_replace_numbers,
pp_placeholders_singlechar])):
for tool in tools:
text = tool(text)
return text
| true |
52e5b29cbe0a3339b38f8bea7e8546de2c8b45d8 | Python | spacetelescope/stistools | /stistools/calstis.py | UTF-8 | 7,283 | 2.75 | 3 | [
"BSD-2-Clause"
] | permissive | #! /usr/bin/env python
import os
import sys
import getopt
import glob
import subprocess
from stsci.tools import parseinput, teal
__doc__ = """
Calibrate STIS data.
The input raw files should be in the default directory. This is not
always necessary, but it will always work. For spectroscopic data, if
a path is specified for the input file, the wavecal file may not be
found unless the wavecal file name (including path) was explicitly
specified.
Examples
--------
In Python without TEAL:
>>> import stistools
>>> stistools.calstis.calstis("o66p01020_raw.fits", verbose=True,
... trailer="o66p01020.trl")
In Python with TEAL:
>>> from stistools import calstis
>>> from stsci.tools import teal
>>> teal.teal("calstis")
From command line::
% ./calstis.py -v -s o66p01020_raw.fits out/
% ./calstis.py -r
"""
__taskname__ = "calstis"
__version__ = "3.4"
__vdate__ = "13-November-2013"
__author__ = "Phil Hodge, STScI, November 2013."
def main(args):
if len(args) < 1:
prtOptions()
print("At least a raw file name must be specified.")
sys.exit()
try:
(options, pargs) = getopt.getopt(args, "srtvw:",
["version"])
except Exception as error:
prtOptions()
sys.exit()
outroot = ""
wavecal = ""
verbose = False
timestamps = False
savetmp = False
for i in range(len(options)):
if options[i][0] == "--version":
status = subprocess.call(["cs0.e", "--version"])
return 0
if options[i][0] == "-r":
status = subprocess.call(["cs0.e", "-r"])
return 0
if options[i][0] == "-v":
verbose = True
if options[i][0] == "-t":
timestamps = True
if options[i][0] == "-s":
savetmp = True
if options[i][0] == "-w":
wavecal = options[i][1]
nargs = len(pargs)
if nargs < 1 or nargs > 2:
prtOptions()
sys.exit()
input = pargs[0]
if nargs == 2:
outroot = pargs[1]
status = calstis(input, wavecal=wavecal, outroot=outroot,
savetmp=savetmp,
verbose=verbose, timestamps=timestamps)
sys.exit(status)
def prtOptions():
"""Print a list of command-line options and arguments."""
print("The command-line options are:")
print(" --version (print the version number and exit)")
print(" -r (print the full version string and exit)")
print(" -v (verbose)")
print(" -t (print timestamps)")
print(" -s (save temporary files)")
print(" -w wavecal")
print("")
print("Following the options, list one or more input raw file names,")
print(" enclosed in quotes if more than one file name is specified")
print(" and/or if wildcards are used.")
print("An output directory (include a trailing '/') or a root name for")
print(" the output files may be specified.")
def calstis(input, wavecal="", outroot="", savetmp=False,
verbose=False, timestamps=False,
trailer="", print_version=False, print_revision=False):
"""Calibrate STIS data.
Parameters
----------
input: str
Name of the input file.
wavecal: str
Name of the input wavecal file, or "" (the default). This is
only needed if the name is not the "normal" name
(rootname_wav.fits).
outroot: str
Root name for the output files, or "" (the default). This can
be a directory name, in which case the string must end in '/'.
savetmp: bool
True if calstis should not delete temporary files.
verbose: bool
If True, calstis will print more info.
timestamps: bool
If True, calstis will print the date and time at various points
during processing.
trailer: str
If specified, the standard output and standard error will be
written to this file instead of to the terminal. Note, however,
that if print_version or print_revision is specified, the value
will be printed to the terminal, and any name given for the
trailer will be ignored.
print_version: bool
If True, calstis will print the version number (a string) and
then return 0.
print_revision: bool
If True, calstis will print the full version string and then
return 0.
Returns
-------
status: int
0 is OK.
1 is returned if cs0.e (the calstis host executable) returned a
non-zero status. If verbose is True, the value returned by cs0.e
will be printed.
2 is returned if the specified input file or files were not found.
"""
if print_version:
status = subprocess.call(["cs0.e", "--version"])
return 0
if print_revision:
status = subprocess.call(["cs0.e", "-r"])
return 0
cumulative_status = 0
# infiles may include one or more file names, separated by blanks
# or commas (or both), and any name may include wildcards.
infiles = []
input1 = input.split()
for in1 in input1:
input2 = in1.split(",")
for in2 in input2:
files = glob.glob(in2)
infiles.extend(files)
if input1 and not infiles:
print("No file name matched the string '{}'".format(input))
return 2
if trailer:
if verbose and os.access(trailer, os.F_OK):
print("Appending to trailer file {}".format(trailer))
f_trailer = open(trailer, "a")
fd_trailer = f_trailer.fileno()
else:
f_trailer = None
fd_trailer = None
for infile in infiles:
arglist = ["cs0.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
if savetmp:
arglist.append("-s")
arglist.append(infile)
if outroot:
arglist.append(outroot)
if wavecal:
arglist.append("-w")
arglist.append("%s" % wavecal)
if verbose:
print("Running calstis on {}".format(infile))
print(" {}".format(str(arglist)))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
cumulative_status = 1
if verbose:
print("Warning: status = {}".format(status))
if f_trailer is not None:
f_trailer.close()
return cumulative_status
#-------------------------#
# Interfaces used by TEAL #
#-------------------------#
def getHelpAsString(fulldoc=True):
"""Return documentation on the calstis function."""
return calstis.__doc__
def run(configobj=None):
"""TEAL interface for the calstis function."""
calstis(input=configobj["input"],
wavecal=configobj["wavecal"],
outroot=configobj["outroot"],
savetmp=configobj["savetmp"],
verbose=configobj["verbose"],
timestamps=configobj["timestamps"],
trailer=configobj["trailer"],
print_version=configobj["print_version"],
print_revision=configobj["print_revision"])
if __name__ == "__main__":
main(sys.argv[1:])
| true |
a99c5166d8cbc51448db5d0dbab2d5e168424241 | Python | jakejhansen/Advanced_Image_Analysis_02503 | /Final_Project/object_detection/images/distribute_train_test.py | UTF-8 | 326 | 2.734375 | 3 | [] | no_license | import glob
import numpy as np
from shutil import copy2
images = glob.glob('*.png')
print("Copying images")
for img in images:
if np.random.rand() < 0.85:
copy2(img, 'train/')
copy2(img[:-4] + '.xml', 'train/')
else:
copy2(img, 'test/')
copy2(img[:-4] + '.xml', 'test/')
print("Done") | true |
cde57f4487c710e0c3d6fd43d9b9e86bc143b7a9 | Python | thunderflash/drl | /exps/synthetic.py | UTF-8 | 2,268 | 2.65625 | 3 | [] | no_license | from sys import path
path.append('src/')
import matplotlib.pyplot as plt
from numpy import append, zeros
from trainer import ValidatingTrainer
from models import Nonlinear, Linear
from dataset import Dataset
from utils import synthetic, wealth, sharpe
import plotter
#series = synthetic(4001, seed=1)
#data = Dataset(series[0:-1], [series[1:]])
series = synthetic(4000, seed=1)
data = Dataset(series, [])
#for window in [200]:
# for slide in [5]:
# for lookback in [10]:
# for delta in [0.001]:
#
# models = []
# for lmb in [0.0, 0.0001, 0.001, 0.01]:
# models.append(Linear(delta=delta, lmb=lmb))
#
# trainer = ValidatingTrainer(data, models)
# returns, decisions = trainer.train(window=window, lookback=lookback,
# slide=slide, maxiter=20)
#
# filename = 'figures/synthetic_noside_single_w_%i_s_%i_l_%i_d_%f.pdf' % (
# window, slide, lookback, delta)
# title = 'Single Layer - Synthetic (No Side Information)'
# print "%s\tWealth: %f\tSharpe: %f" % (filename,
# wealth(returns)[-1], sharpe(returns)[-1])
# plotter.save(filename, title, series, returns, decisions)
for window in [200]:
for slide in [5]:
for lookback in [10]:
for delta in [0.001]:
models = []
for hidden in [4, 6, 8]:
for lmb in [0.0, 0.0001, 0.001, 0.01]:
models.append(Nonlinear(delta=delta, lmb=lmb,
hidden=hidden))
trainer = ValidatingTrainer(data, models)
returns, decisions = trainer.train(window=window, lookback=lookback,
slide=slide, maxiter=20)
filename = 'figures/synthetic_noside_multi_w_%i_s_%i_l_%i_d_%f.pdf' % (
window, slide, lookback, delta)
title = 'Multiple Layer - Synthetic (No Side Information)'
print "%s\tWealth: %f\tSharpe: %f" % (filename,
wealth(returns)[-1], sharpe(returns)[-1])
plotter.save(filename, title, series, returns, decisions)
| true |
5feefe3b0feec7e52f2e819ddf6ec437bece5255 | Python | miararoy/ttt3d | /ttt3d/enums.py | UTF-8 | 252 | 2.8125 | 3 | [] | no_license | from enum import IntEnum
class Symbol(IntEnum):
E = 0
O = 1
X = 2
def __repr__(self):
return self.name
class GameResult(IntEnum):
NA = 0
O = 1
X = 2
TIE = 3
def __repr__(self):
return self.name
| true |
0d078bbb9bc523baa1e2916963770ea6c18dd660 | Python | mantianwuming/work_test | /work/shunfeng_test1.py | UTF-8 | 660 | 2.90625 | 3 | [] | no_license | num = int(input())
line = input()
pass_self = []
for i in range(len(line)):
pass_self.append(line[i])
line = input()
pass_peo = list(map(int, line.split()))
def get_ans(pass_self, pass_peo):
x = -1
new_pass_self = []
for i in range(len(pass_self)):
x = ord(pass_self[i]) - ord('A')
new_pass_self.append(x)
new_pass_peo = []
for i in range(len(pass_peo)):
if i not in new_pass_self:
new_pass_peo.append(pass_peo[i])
max_peo = max(new_pass_peo)
max_index = pass_peo.index(max_peo)
max_num = chr(ord('A')+max_index)
return max_num
max_num = get_ans(pass_self, pass_peo)
print(max_num) | true |
95772ad22795d87017ab6d2e626a5285a323c34a | Python | Arpit-Bajgoti/bounce_game | /main.py | UTF-8 | 5,642 | 3.109375 | 3 | [] | no_license | import pygame
from enum import Enum
from collections import namedtuple
import keyboard
import time
import random
# 1536 x 864 is the current screen resolution of my pc
pygame.init()
radius = 10
width = 836
height = 664
block_size = 20
# rgb colors
DARK_GREEN = (0, 100, 0)
RED = (200, 0, 0)
BLUE1 = (0, 0, 255)
BLUE2 = (0, 100, 255)
BLACK = (0, 0, 0)
MAGENTA = (255, 0, 255)
GREEN = (0, 255, 0)
font = pygame.font.SysFont('arial', 25)
class Direction(Enum):
LEFT = 1
RIGHT = 2
Point = namedtuple("Point", "w, h")
color_dict = {1: DARK_GREEN, 0: GREEN}
class TurboGame:
def __init__(self, w=width, h=height):
self.w = w
self.h = h
self.w_initial = int(0.25 * self.w)
self.h_initial = int(0.9 * self.h)
self.counter = 3
self.display = pygame.display.set_mode((self.w, self.h))
pygame.display.set_caption("Turbo")
self.score = 0
self.bat = None
self.update_bat()
self.direction = None
self.ball = None
self._place_ball()
self.game_over = False
self.update = [6, 6]
self.radius = radius
self.width = 120
self.height = 30
self.lst = []
self.stone = []
self.bat_length = 60
self.counter = 5
def update_bat(self):
self.bat = [Point(self.w_initial, self.h_initial),
Point(int(self.w * 0.2), int(0.01 * self.h))]
def play_step(self):
# 1. collect user input
if keyboard.is_pressed("left arrow"):
self.direction = Direction.LEFT
elif keyboard.is_pressed("right arrow"):
self.direction = Direction.RIGHT
self.update_bat()
self.update_ball()
# self.stone_conditions()
self._move(self.direction)
self.direction = None
# 2. update ui
self._update_ui()
return self.game_over, self.score
def _place_ball(self):
x = random.randint(40, self.w)
y = random.randint(100, int(0.5 * self.h_initial))
self.ball = [x, y]
def _update_ui(self):
self.display.fill(BLACK)
pygame.draw.rect(self.display, BLUE1,
pygame.Rect(self.bat[0].w, self.bat[0].h, self.bat[1].w, 30))
pygame.draw.circle(self.display, RED, tuple(self.ball), self.radius)
self.draw_brick()
text = font.render("Score: " + str(self.score), True, MAGENTA)
self.display.blit(text, [0, 0])
pygame.display.flip()
def _move(self, direction):
if direction == Direction.RIGHT and self.w_initial < (self.w - self.bat[1].w - 20):
self.w_initial += block_size
elif direction == Direction.LEFT and self.w_initial > 10:
self.w_initial -= block_size
def update_ball(self):
# ball hitting bat
if (self.ball[1] + self.radius) > self.h_initial - 10:
if (self.ball[0] - self.radius > self.w_initial - 10) and self.ball[0] + self.radius < self.w_initial + \
self.bat[1].w + 10:
self.update[1] = random.randint(6, 10)
self.update[1] = -self.update[1]
self.score += 10
else:
self.game_over = True
# ball hitting boundaries
elif self.ball[0] + self.radius > self.w:
self.update[0] = -self.update[0]
# self.update[0] = random.randint(5, 10)
elif self.ball[1] + self.radius < 20:
self.update[1] = -self.update[1]
self.update[1] = random.randint(5, 10)
elif self.ball[0] + self.radius < 20:
self.update[0] = -self.update[0]
self.update[0] = random.randint(5, 10)
self.stone_conditions()
# updating ball at each while loop
self.ball[0] += self.update[0]
self.ball[1] += self.update[1]
# TODO this code for bricks
def brick(self):
for i in range(30, self.w - self.width - 10, self.width + 10):
for j in range(10, self.h - (self.height * 5), self.height + 10):
x = i
y = j
z = [x, y]
self.stone.append(z)
def draw_brick(self):
for value in self.lst:
pygame.draw.rect(self.display, color_dict[value[2]],
pygame.Rect(value[0], value[1], self.width, self.height))
def stone_conditions(self):
for value in self.lst:
if value[0] < (self.ball[0] - self.radius) < (value[0] + self.width) and \
value[1] + 20 > (
self.ball[1] - self.radius) > (value[1] - self.height + 10):
self.update[1] = -self.update[1]
x = self.lst.index(value)
if value[2]:
value[2] -= 1
self.score += 5
else:
self.score += 10
self.lst.pop(x)
self.counter -= 1
if __name__ == '__main__':
game = TurboGame()
# game loop
game.brick()
game.lst = random.sample(game.stone, random.randint(5, 8))
for val in game.lst:
val.append(random.randint(0, 1))
while True:
if game.counter == 3:
num = random.randint(2, 4)
add = random.sample(game.stone, num)
for val in add:
val.append(random.randint(0, 1))
game.lst.extend(add)
game.counter = 3 + num
game_over, score = game.play_step()
if game_over:
break
time.sleep(0.03)
print('Final Score', score)
pygame.quit()
| true |
27ef114d334ee0fdc58602b2ff52846eeecfe976 | Python | DanieleCalanna/PyTorchTrainer | /torchtrainer/utils/epochs_plotter.py | UTF-8 | 2,127 | 2.796875 | 3 | [] | no_license | import os
import glob
import pandas as pd
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
class EpochsPlotter:
def __init__(self, folder_path, labels=None, columns=None, load=True):
self.folder_path = folder_path
self.exp_name = os.path.basename(os.path.normpath(folder_path))
self.dataframes = {}
if not os.path.exists(self.folder_path):
load = False
if load:
if os.path.exists(self.folder_path):
dataframe_paths = glob.glob(os.path.join(self.folder_path, "*.csv"))
self.labels = [os.path.basename(os.path.splitext(x)[0]) for x in dataframe_paths]
else:
raise Exception("{} folder does not exists".format(self.folder_path))
if not self.labels:
raise Exception("{} folder does not contain any csv file".format(self.folder_path))
for label, path in zip(self.labels, dataframe_paths):
self.dataframes[label] = pd.read_csv(path, index_col='Epoch')
self.columns = list(self.dataframes[label].columns)
else:
self.labels = labels
self.columns = columns
for label in self.labels:
self.dataframes[label] = pd.DataFrame(columns=self.columns, dtype=float)
self.dataframes[label].index.name = "Epoch"
def save(self):
os.makedirs(self.folder_path, exist_ok=True)
for label in self.labels:
csv_path = os.path.join(self.folder_path, "{}.csv".format(label))
self.dataframes[label].to_csv(csv_path)
def plot(self, show=False):
for column in self.columns:
fig, ax = plt.subplots()
fig.suptitle(self.exp_name)
for label in self.labels:
ax.plot(self.dataframes[label][column], label=label)
ax.set(xlabel='Epoch', ylabel=column, title=column)
ax.grid()
ax.legend()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plot_path = os.path.join(self.folder_path, "{}.png".format(column))
fig.savefig(plot_path)
if show:
plt.show()
plt.close()
def set_row(self, label, epoch, row):
self.dataframes[label].loc[epoch] = pd.Series(data=row, name=epoch)
def set(self, label, epoch, column, value):
self.dataframes[label].at[epoch, column] = value
| true |
0f7373786a9b371973b9709626dbdc903d9559b8 | Python | plast-lab/doop-mirror | /bin/log-analyzer.py | UTF-8 | 7,722 | 2.96875 | 3 | [
"UPL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #! /usr/bin/env python
#
# Script used to analyze log files created with BloxBatch.
#
# Currently supports log files created with debugDetail@factbus (the default) and
# debugDetail@benchmark. Has been tested with log files generated from LB version 3.8.
#
# Use -h for details on the options.
#
# Remember to keep the version of this script in sync with the supported LB engine versions.
#
import argparse
import sys
import re
import difflib
#
# Helper functions
#
def sort_dict_by_value(adict):
return sorted(adict.items(), key=lambda (k,v): (v,k), reverse = True)
def sort_tuple_list_by_value(alist):
return sorted(alist, key=lambda (k,v): v, reverse = True)
def print_predicates(predicates, unit = 's'):
for tuple in predicates:
if tuple[1] != 0:
print tuple[0] + ' => ' + str(tuple[1]) + unit + "\n"
#
# Log file iterators
#
# They return a tuple (predicate, time, facts) where:
#
# predicate - is a string representing the executed predicate
# time - is a float representing how long the execution took
# facts = is an int representing how many facts were derived
#
class Benchmark:
"""Iterator for looping over entries in a benchmark log file."""
def __init__(self, f):
self.file = f
def __iter__(self):
return self
def next(self):
for line in self.file:
if re.search('DEBUG_DETAIL. benchmark cache predicate', line):
l = line.split(' ')
pred = l[6]
facts = int(l[4])
time = float(l[7])
return (pred, time, facts)
raise StopIteration
class Factbus:
"""Iterator for looping over entries in a factbus log file."""
def __init__(self, f):
self.file = f
self.state = 0
def __iter__(self):
return self
def next(self):
for line in self.file:
# starting a factbus
if self.state == 0 and (re.search('Full evaluation', line) or re.search('Full aggregation', line) or re.search('Putback evaluation', line) or re.search('Assertion evaluation', line) or re.search('Retraction evaluation', line)):
self.state = 1
pred = ''
# getting the predicate signature
elif self.state == 1:
if re.search('DEBUG_DETAIL', line):
self.state = 2
else:
pred += line[33:]
elif self.state == 2 and re.search('new facts', line):
l = line.split(' ')
if 'derived' == l[3]:
s = l[7]
facts = int(l[4])
else:
s = l[6]
facts = 0
# s will be '(xyz' so we have to remove the (
time = float(s[1:])
self.state = 0
return (pred, time, facts)
raise StopIteration
#
# Functions to process files
#
def aggregate(iterator, only_no_facts = False):
""" Aggregates the records of this iterator.
Returns a 3-tuple (dict, int, float) with predicate->time, count and total.
"""
predicates = dict()
count = 0
total = float(0)
for (pred, time, facts) in iterator:
if facts == 0 or not only_no_facts:
if not pred in predicates:
predicates[pred] = 0
predicates[pred] += time
count += 1
total += time
return (predicates, count, total)
def collect(iterator, only_no_facts = False):
""" Collects the records of this iterator.
Returns a 3-tuple (list, int, float) with predicates, count and total.
"""
predicates = []
count = 0
total = float(0)
for (pred, time, facts) in iterator:
if facts == 0 or not only_no_facts:
predicates.append((pred, time))
count += 1
total += time
return (predicates, count, total)
def compare_exact(main, baseline):
""" Compares the results of a main file with those of a baseline using exact predicate match.
Returns a list of predicate tuples with (predicate definition, measure difference).
"""
diff = dict()
for key, value in main.iteritems():
if key in baseline:
diff[key] = value - baseline[key]
else:
diff[key] = value
return diff.items()
def compare(main, baseline):
""" Compares the results of a main file with those of a baseline using fuzzy matching.
Returns a list of predicate tuples with (predicate definition, measure difference).
"""
diff = dict()
for key, value in main.iteritems():
matches = difflib.get_close_matches(key, baseline.keys(), 1, 0.5)
if len(matches) > 0:
new_key = key + "Vs.\n" + matches[0]
diff[new_key] = value - baseline[matches[0]]
del baseline[matches[0]]
else:
new_key = key + "UNMATCHED.\n"
diff[new_key] = value
return diff.items()
def process(args):
""" Prints a sorted list of predicates that took more than 0 units to execute."""
unit = 'ms' if args.benchmark else 's'
# process main file
mainIter = Benchmark(args.file) if args.benchmark else Factbus(args.file)
mainRecords = collect(mainIter, args.nofacts) if args.noagg else aggregate(mainIter, args.nofacts)
if (args.baseline):
# also process baseline file, compare and print
baselineIter = Benchmark(args.baseline) if args.benchmark else Factbus(args.baseline)
baselineRecords = aggregate(baselineIter, args.nofacts)
compare_function = compare_exact if args.exact else compare
print_predicates(
sort_tuple_list_by_value(compare_function(mainRecords[0], baselineRecords[0])),
unit)
else:
# simply print predicates and perhaps the total
predicates = mainRecords[0] if args.noagg else mainRecords[0].items()
print_predicates(sort_tuple_list_by_value(predicates), unit)
if (args.total):
print str(mainRecords[1]) + " records total " + str(mainRecords[2]) + unit
#
# Main script
#
parser = argparse.ArgumentParser(
description='Analyzes a bloxbatch log file and prints a sorted list of records.')
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r'),
help='the log file to analyze.')
parser.add_argument('-benchmark', '-bench', action='store_true',
help='flags that the log file was created with a debugDetail@benchmark configuration. The default is degubDetail@factbus.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-noagg', '-n', action='store_true',
help='do not aggregate time measures based on predicate definitions.')
group.add_argument('-baseline', '-b', type=argparse.FileType('r'),
help='a log file to compare against. Both files must have the same format and aggregation must be used.')
parser.add_argument('-nofacts', '-z', action='store_true',
help='only process predicate executions that derived no facts.')
parser.add_argument('-exact', '-e', action='store_true',
help='when comparing against a baseline, use exact predicate match. This is much faster than the default but will not match\
predicates that have variables with artificial names or that slightly changed.')
parser.add_argument('-total', '-t', action='store_true',
help='print the count of processed records and the total sum of measures.')
parser.add_argument('-version', action='version', version='%(prog)s 0.1 supports LB 3.8 ')
process(parser.parse_args())
| true |
19b8eea2edb53ed8eb8c603cc647411380219b8d | Python | iceshadows/CLASS-Assistant | /readxl.py | UTF-8 | 248 | 2.9375 | 3 | [] | no_license | import xlrd
data = xlrd.open_workbook('namelist.xlsx')
table = data.sheets()[0]
rows = table.nrows
print(rows)
for row in range(rows):
# print(row)
idnum = table.cell(row,0).value
name = table.cell(row,1).value
print(name +''+idnum) | true |
fc1d91decf9b07c2eebfdb12451d1d68f423609b | Python | hn416784/fizzbuzz | /task0.py | UTF-8 | 425 | 4.125 | 4 | [] | no_license | #printing number from 1-100
def numbers(count):
while count <= 100:
print(count)
count=count+3;
if count > 100:
count = 100
numbers(0)
#for multiples
def fizz_buzz(num):
if num % 3 == 0:
return 'Fizz'
elif num % 5==0:
return 'Buzz'
elif num%3==0 and num%5==0:
return 'FizzBuzz'
else:
return num
for n in range(1,100):
print(fizz_buzz(n))
| true |
5afd2ea4343983aae7a67b89df07b8ce9ee00a1a | Python | mattstoneham/PiBot | /examples/colour_sensor/rbg_values.py | UTF-8 | 1,917 | 2.84375 | 3 | [] | no_license | __author__ = 'Matt'
__author__ = 'Matt'
import RPi.GPIO as GPIO
import time
class RGBvalues(object):
s2 = 20
s3 = 16
signal = 21
NUM_CYCLES = 10
def __init__(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.signal,GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.s2,GPIO.OUT)
GPIO.setup(self.s3,GPIO.OUT)
print("\n")
try:
self.loop()
except KeyboardInterrupt:
self.endprogram()
def loop(self):
temp = 1
while True:
GPIO.output(self.s2,GPIO.LOW)
GPIO.output(self.s3,GPIO.LOW)
time.sleep(0.3)
start = time.time()
for impulse_count in range(self.NUM_CYCLES):
GPIO.wait_for_edge(self.signal, GPIO.FALLING)
duration = time.time() - start #seconds to run for loop
red = self.NUM_CYCLES / duration #in Hz
print("red value - ",red)
GPIO.output(self.s2,GPIO.LOW)
GPIO.output(self.s3,GPIO.HIGH)
time.sleep(0.3)
start = time.time()
for impulse_count in range(self.NUM_CYCLES):
GPIO.wait_for_edge(self.signal, GPIO.FALLING)
duration = time.time() - start
blue = self.NUM_CYCLES / duration
print("blue value - ",blue)
GPIO.output(self.s2,GPIO.HIGH)
GPIO.output(self.s3,GPIO.HIGH)
time.sleep(0.3)
start = time.time()
for impulse_count in range(self.NUM_CYCLES):
GPIO.wait_for_edge(self.signal, GPIO.FALLING)
duration = time.time() - start
green = self.NUM_CYCLES / duration
print("green value - ",green)
print('\n\n')
time.sleep(2)
def endprogram(self):
GPIO.cleanup()
if __name__=='__main__':
RGBvalues()
| true |
dfab9c1bce482aa88b5f1f5488c20c0f68c85a98 | Python | Riptide684/British-Informatics-Olympiads | /2016Q2b.py | UTF-8 | 3,112 | 3.53125 | 4 | [] | no_license | #Sean Morrell - Aylesbury Grammar School
def pos_to_coords(pos):
coordinates = [0, 0]
y = 5 - ((pos - 1) // 5)
x = pos % 5
if x == 0:
x = 5
coordinates[0] = x
coordinates[1] = y
return coordinates
def do_overflow(board):
add = []
for element in board:
if board[element] >= 4:
board[element] -= 4
coordinate = element.strip('][').split(', ')
#####################################################################
coordinate1 = [0, 0]
coordinate1[0] = int(coordinate[0]) + 1
coordinate1[1] = int(coordinate[1])
if str(coordinate1) in board:
board[str(coordinate1)] += 1
else:
add.append(coordinate1)
#####################################################################
coordinate2 = [0, 0]
coordinate2[0] = int(coordinate[0]) - 1
coordinate2[1] = int(coordinate[1])
if str(coordinate2) in board:
board[str(coordinate2)] += 1
else:
add.append(coordinate2)
#####################################################################
coordinate3 = [0, 0]
coordinate3[0] = int(coordinate[0])
coordinate3[1] = int(coordinate[1]) + 1
if str(coordinate3) in board:
board[str(coordinate3)] += 1
else:
add.append(coordinate3)
#####################################################################
coordinate4 = [0, 0]
coordinate4[0] = int(coordinate[0])
coordinate4[1] = int(coordinate[1]) - 1
if str(coordinate4) in board:
board[str(coordinate4)] += 1
else:
add.append(coordinate4)
#####################################################################
for value in add:
board[str(value)] = 1
check = False
for element in board:
if board[element] >= 4:
check = True
return[board, check]
entry = input("Enter the values: ")
values = entry.split(" ")
position = int(values[0])
sequence_length = int(values[1])
turns = int(values[2])
sequence = input("Enter the sequence: ").split(" ")
grid = {}
for i in range(turns):
coords = pos_to_coords(position)
if str(coords) in grid:
grid[str(coords)] += 1
else:
grid[str(coords)] = 1
if grid[str(coords)] >= 4:
overflow = True
while overflow:
tmp = do_overflow(grid)
grid = tmp[0]
overflow = tmp[1]
position = (position + int(sequence[i % sequence_length])) % 25
if position == 0:
position = 25
output = ""
for a in range(1, 6):
for b in range(1, 6):
try:
number = grid["[" + str(b) + ", " + str(6-a) + "]"]
except KeyError:
number = 0
output += str(number) + " "
output += "\n"
print(output)
print(grid)
| true |
f00515feb232f956a9419b3e169abb532a8743ba | Python | Aasthaengg/IBMdataset | /Python_codes/p03078/s830133898.py | UTF-8 | 442 | 2.71875 | 3 | [] | no_license | x,y,z,k=map(int,input().split())
a=sorted(list(map(int,input().split())),reverse=True)
b=sorted(list(map(int,input().split())),reverse=True)
c=sorted(list(map(int,input().split())),reverse=True)
ab=[]
for i in range(x):
for j in range(y):
ab.append(a[i]+b[j])
ab.sort(reverse=True)
ans=[]
for i in range(min(k,x*y)):
for j in range(z):
ans.append(ab[i]+c[j])
ans.sort(reverse=True)
for i in range(k):
print(ans[i]) | true |
60d7adefffef29a6dfbb29353f8c28c5b00e82bd | Python | pointOfive/TD | /MYLECTURES/baggingANDrfs/voting.py | UTF-8 | 2,291 | 2.96875 | 3 | [] | no_license | # run this with: bokeh serve --show voting.py
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.plotting import ColumnDataSource, Figure
from bokeh.models.widgets import Slider
from scipy import stats
my_plot = Figure(title="Binomial Distribution of Correct Votes", plot_height=400, plot_width=400, x_axis_label='Number of Experts Voting Correctly', y_axis_label='Binomial Probability')
my_plot_b = Figure(title="Power Calculation", plot_height=400, plot_width=400, x_axis_label='Number of Experts', y_axis_label='Probability Majority of Experts Are Correct')
p=.7
n=11
slider_p = Slider(start=.01, end=.99, step=.01, value=p, title="Chance Each Expert is Correct")
slider_n = Slider(start=1, end=99, step=2, value=n, title="Number of Experts")
# The datapoints
sup = range(n+1)
pmf = stats.binom.pmf(sup, n, p)
source_points = ColumnDataSource(data=dict(sup=sup, pmf=pmf))
my_plot.scatter(x='sup', y='pmf', source=source_points, color="#2222aa", line_width=3)
x=2*[n/2.]
y=[0,max(pmf)]
source_points_d = ColumnDataSource(data=dict(x=x,y=y))
my_plot.line(x='x', y='y', source=source_points_d, color="#ff0000", line_width=3)
sup = range(1,100,2)
cut = [i/2 for i in sup]
cdf = 1-stats.binom.cdf(cut, sup, p)
source_points_b = ColumnDataSource(data=dict(sup=sup, cdf=cdf))
my_plot_b.line(x='sup', y='cdf', source=source_points_b, color="#2222aa", line_width=3)
sup = [n]
cdf = [1-stats.binom.cdf(sup[0]/2, n, p)]
source_points_c = ColumnDataSource(data=dict(sup=sup, cdf=cdf))
my_plot_b.scatter(x='sup', y='cdf', source=source_points_c, color="#00ff00", line_width=3)
def update(attrname, old, new):
n=slider_n.value
p=slider_p.value
sup = range(n+1)
pmf = stats.binom.pmf(sup, n, p)
source_points.data = dict(sup=sup, pmf=pmf)
x=2*[n/2.]
y=[0,max(pmf)]
source_points_d.data = dict(x=x,y=y)
sup = range(1,100,2)
cut = [i/2 for i in sup]
cdf = 1-stats.binom.cdf(cut, sup, p)
source_points_b.data = dict(sup=sup, cdf=cdf)
sup = [n]
print(sup[0])
cdf = [1-stats.binom.cdf(sup[0]/2, n, p)]
source_points_c.data = dict(sup=sup, cdf=cdf)
for w in [slider_n, slider_p]:
w.on_change('value', update)
layout = column(row(my_plot, my_plot_b), slider_p, slider_n)
curdoc().add_root(layout)
| true |
aa5496731af22a1c42008ff45d08671da30853c4 | Python | vovuh/python-domino | /main.py | UTF-8 | 266 | 2.59375 | 3 | [] | no_license | # NOTE: you have to install keyboard module to run this project
# pip install keyboard
from Game import Game
if __name__ == '__main__':
while True:
game = Game()
need_to_continue = game.play()
if not need_to_continue:
break
| true |
aa393e385ed35517f9505915943ee662959fe61b | Python | btroisi/PythonProjects | /Project1/shapeE.py | UTF-8 | 353 | 2.875 | 3 | [] | no_license | from turtle import *
def shapeD( distance ):
forward( distance )
left( 90 )
forward( distance )
left( 90 )
forward( distance )
left( 90 )
forward( distance )
left( 30 )
forward( distance )
left( 120 )
forward( distance )
def shapeE():
shapeD( 70 )
shapeD( 80 )
shapeD( 90 )
shapeD( 100 )
shapeE()
raw_input('Enter to continue')
| true |
354c4630a28da6c5b28cc8dfdd584c55caca643e | Python | akyruu/blender-cartography-addon | /drawing/drawer/drawer.py | UTF-8 | 1,028 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | """
Module for drawing
History:
2020/08/21: v0.0.1
+ add cartography drawing
+ add cartography room drawing (point + plane)
"""
import logging
import utils
from model import CartographyRoom
from templating import CartographyTemplate
# Classes =====================================================================
class CartographyDrawer:
"""Drawer of cartography"""
# Fields ------------------------------------------------------------------
__logger = logging.getLogger('CartographyDrawer')
# Constructor -------------------------------------------------------------
def __init__(self, template: CartographyTemplate, *room_drawers):
self.__template = template
self.__room_drawers = room_drawers
# Methods -----------------------------------------------------------------
def draw(self, room: CartographyRoom):
collection = utils.blender.collection.create(room.name)
for roomDrawer in self.__room_drawers:
roomDrawer.draw(room, collection)
| true |
7a474ccc4e0a542841010485f7a38a9701794d20 | Python | TerrenceTong/kdd-hw | /sample.py | UTF-8 | 5,019 | 2.609375 | 3 | [] | no_license | import os
import gc
import sys
import pandas as pd
def typicalsamling(group,typicalNDict):
name = group.name
n=typicalNDict[name]
return group.sample(n=n)
def replaced_typicalsamling(group,typicalNDict):
name = group.name
n=typicalNDict[name]
return group.sample(n=n,replace=True)
def sample_time(TICKERS_DIR,BETA_DIR,RESULT_DIR,TIME,NUM_XLSX_BASE):
fund_number_filenames = os.listdir(BETA_DIR)
fund_number_filenames.sort()
print("fundnumber_files: \n {}".format(fund_number_filenames))
tickers_filenames = os.listdir(TICKERS_DIR)
tickers_filenames.sort()
print("tickers_files: \n {}".format(tickers_filenames))
""" already = os.listdir(RESULT_DIR) """
for fund_number_filename in fund_number_filenames:
""" if(fund_number_filename in already):
print("continue")
continue """
RESULT = pd.DataFrame(columns=['fund number','time','return','isreplace'])
df_beta = pd.read_csv(BETA_DIR+fund_number_filename)
for idx, row in df_beta.iterrows():
#print(row)
typicalNDict = {
1: int(100*row[1]),
2: int(100*row[2]),
3: int(100*row[3]),
4: int(100*row[4]),
5: int(100*row[5]),
6: int(100*row[6]),
7: int(100*row[7]),
8: int(100*row[8])
}
print(typicalNDict)
num_xlsx = NUM_XLSX_BASE
#num_xlsx = 3
for i in range(5):
df_ticker_groupby_time = pd.read_excel(TICKERS_DIR+tickers_filenames[num_xlsx]).groupby(TIME)
print("open {}".format(tickers_filenames[num_xlsx]))
num_xlsx = num_xlsx+1
#df_ticker_groupby_time = df_ticker.groupby(TIME)
""" del df_ticker
gc.collect() """
for name,group in df_ticker_groupby_time:
#fund_number = row[0]
return_bar = 0
isreplace = 0
try:
sample_result = group.groupby('Type',group_keys=False).apply(typicalsamling,typicalNDict)
return_bar = round(sample_result['Returns without Dividends'].mean(),5)
except Exception as e:
#print("replaced sample.")
isreplace = 1
sample_result = group.groupby('Type',group_keys=False).apply(replaced_typicalsamling,typicalNDict)
return_bar = round(sample_result['Returns without Dividends'].mean(),5)
result_series = pd.Series({'fund number':row[0],'name':name,'return':return_bar,'isreplace':isreplace})
RESULT.loc[RESULT.shape[0]] = result_series
print('{},{},{},{} has done.'.format(row[0],name,return_bar,isreplace))
del name
del group
del df_ticker_groupby_time
del return_bar
del isreplace
del sample_result
del result_series
gc.collect()
""" del df_ticker_groupby_time
gc.collect() """
print("--------df_ticker_groupby_time----countnumber:{}".format(sys.getrefcount(df_ticker_groupby_time)))
del df_ticker_groupby_time
gc.collect()
del num_xlsx
del typicalNDict
del idx
del row
gc.collect()
NUM_XLSX_BASE = (NUM_XLSX_BASE+5)%20
RESULT.to_csv(RESULT_DIR+fund_number_filename,index=False)
print("--------df_beta----countnumber:{}".format(sys.getrefcount(df_beta)))
del df_beta
gc.collect()
#sample_time('Monthly Final Database/','result_monthly/','mean_of_sample_monthly/','Month',0)
sample_time('Daily Final Database/','result_daily/','mean_of_sample_daily/','Names Date',0)
| true |
f31f994771afe4880bd0622288e7ed4eb71e900c | Python | freddycra/proyecto_1_paradigmas | /proyecto/Model.py | UTF-8 | 334 | 2.84375 | 3 | [] | no_license | from Grammar import Grammar
class Model(object):
def __init__(self):
super(Model, self).__init__()
self.grammar = Grammar()
def addRules(self, rules):
my_list = rules.split('\n')
for i in my_list:
self.grammar.addRule(i)
def printInfo(self):
self.grammar.printInfo()
| true |
1c5a0b6d7e8866df0bea1325c2d6753a90da80f1 | Python | spacewander/vim-snippets-paster | /vim_snippets_paster/converters/ultility.py | UTF-8 | 1,389 | 2.890625 | 3 | [] | no_license | import re
class NotImplementFeatureException(Exception):
"""an Exception used in parsing"""
def __init__(self, msg='', feature=None):
if feature is not None:
self.message = "%s is not implemented" % feature
else:
self.message = msg
def __str__(self):
return self.message
class UnsupportFeatureException(Exception):
"""an Exception used in building"""
def __init__(self, msg='', feature=None):
if feature is not None:
self.message = "%s is unsupport" % feature
else:
self.message = msg
def __str__(self):
return self.message
embeded = re.compile('`(.*?)`', re.MULTILINE | re.DOTALL)
# It is not easy to handle nested brackets with regex in python
# See http://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
# Current version can only handle two-level nesting, but it may be enough
placeholder = re.compile('\${([^}]*\$\{[^}]*\}.*?|.*?)}|\$(\d+)(?!\w)', re.MULTILINE | re.DOTALL)
transformation = re.compile('\${(\d+/.*?)}', re.MULTILINE | re.DOTALL)
def format_placeholders(lines):
"""
1. convert $0 and $VISUAL to ${0} and ${VISUAL}
2. convert $1, $2, ... into ${1}, ${2}, ...
"""
return ([
re.sub('(?<!\\\)\$(\d+)', '${\\1}', line)
.replace('$VISUAL', '${VISUAL}') for line in lines])
| true |
56cf00c457bfc2053e9d8be9c0f3d49ae084c9ef | Python | Purposefully/TeachMe | /lms_app/management/commands/seed.py | UTF-8 | 3,859 | 3.0625 | 3 | [] | no_license | from django.core.management.base import BaseCommand
from ...models import Course, Question, Answer
import random
from django.utils.crypto import get_random_string
# python manage.py seed --mode=refresh
# Clears all data and creates questions and answers
MODE_REFRESH = 'refresh'
# Clears all data and does not create any object
MODE_CLEAR = 'clear'
class Command(BaseCommand):
help = "seed database for testing and development."
def add_arguments(self, parser):
parser.add_argument('--mode', type=str, help="Mode")
def handle(self, *args, **options):
self.stdout.write('seeding data...')
run_seed(self, options['mode'])
self.stdout.write('done')
def clear_data():
# Deletes all the table data
# logger.info("Delete instances")
Answer.objects.all().delete()
Question.objects.all().delete()
def create_course():
# logger.info("Creating a course")
topics = ["dogs", "cats", "birds", "coding", "mental health"]
title = random.choice(topics)
course = Course(
title=title,
description=f"All about "+title,
video_id = get_random_string(length=6)
)
course.save()
# logger.info("{} course created.".format(course))
# print("*************************************************************")
# print(course.__dict__)
return course
def create_quiz():
# logger.info("Creating a quiz")
questions = [
"What lorem ipsum dolor sit amet, consectetur adipiscing elit?",
"Who lorem ipsum dolor sit amet, consectetur adipiscing elit?",
"Where lorem ipsum dolor sit amet, consectetur adipiscing elit?",
"How lorem ipsum dolor sit amet, consectetur adipiscing elit?",
"When lorem ipsum dolor sit amet, consectetur adipiscing elit?"
]
random.shuffle(questions)
correct_answer_index = random.randint(1,4)
# temporarily assign a correct_answer_id and then come back to update it
for question in questions:
item = Question(
content = question,
correct_answer_id = 2,
course = Course.objects.last()
)
item.save()
a_num = 0
for num in range(1,5):
if num == correct_answer_index:
correct_option = create_correct_answer()
item.correct_answer_id = correct_option.id
item.save()
else:
create_wrong_answer(a_num)
a_num +=1
# print("-----------------------------------------------------------------------")
# print(item.__dict__)
def create_correct_answer():
# Creating a correct answer
# logger.info("creating a correct answer")
correct_answers = [
"Pick me! I'm the right answer.",
"Obviously the correct answer",
"Pick me if you want to be right!",
"I'm telling you: this is the correct answer.",
"Hint: this is the correct answer!"
]
correct_answer = Answer(
content = random.choice(correct_answers),
question = Question.objects.last()
)
correct_answer.save()
# print(correct_answer.__dict__)
return correct_answer
def create_wrong_answer(idx):
# Creating a correct answer
# logger.info("creating a correct answer")
wrong_answers = [
"An appealing but incorrect answer.",
"Obviously NOT the correct answer",
"Hint: this is an incorrect answer!"
]
answer = Answer(
content = wrong_answers[idx],
question = Question.objects.last()
)
answer.save()
# print(answer.__dict__)
return answer
def run_seed(self, mode):
# Clear data from tables
clear_data()
if mode == MODE_CLEAR:
return
# Create 2 courses with questions and answers
for i in range(2):
create_course()
create_quiz()
| true |
b398604ad3e1afce663de1636333f158de6285f5 | Python | bahattin-urganci/python-training | /dictionary.py | UTF-8 | 298 | 3.078125 | 3 | [
"MIT"
] | permissive | import pandas as pd
bolge='Akdeniz'
sehirler=['Burdur',
'Isparta',
'Antalya',
'Mersin',
'Adana',
'Kahramanmaraş',
'Osmaniye',
'Hatay']
#arrayden dict ne güzel oluşuyor ama :) no loop
data ={'Bölge':bolge,'Şehirler':sehirler}
#şimdi bunu dataframe yapalım
df=pd.DataFrame(data)
print(df) | true |
63d86ed66e7ee222f24434a5caca5698f82d8f23 | Python | Fedy1661/Informatics-EGE-2022 | /23/137/code.py | UTF-8 | 279 | 3.171875 | 3 | [] | no_license | import math
def f(start, x):
if start < x: return 0
if start == x: return 1
if math.log2(start) % 1 == 0: return f(start - 1, x)
zeroize = int('1' + '0' * int(math.log2(start)), 2)
return f(start-1, x) + f(zeroize, x)
print(f(int('1100', 2), int('100', 2))) | true |
6dfd38e3bb26b0320145ef55199107288664a790 | Python | sellalab/HumanLinkedSelectionMaps | /likelihood/jackknife_params.py | UTF-8 | 2,090 | 2.59375 | 3 | [] | no_license | __author__ = 'davidmurphy'
import os
import numpy as np
from sys import argv
from classes.runstruct import ChromStruct, root_dir
init_dir = root_dir + '/result/init_files'
final_dir = root_dir + '/result/final_files'
ffmt = init_dir + '/YRI.{an}.BS1.6.CS0.0.NOT_STARTED.initial.txt'
def jack_params(anno):
# set path for saving jackknife results
save_dir = final_dir + '/{an}_jackknife_results/'.format(an=anno)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
# create list of filenames for saving or loading presaved data
ftokens = ['pmf.npy', 'udl.npy', 'pi0.npy', 'clh.npy']
# save data arrays generated from original files if flagged
pmf = []
udl = []
pi0 = []
clh = []
for jkidx in xrange(1441):
ji = '{:04}'.format(jkidx)
# set foldrer path to current jackknife index
fldr = '{an}_jkidx_{ji}'.format(an=anno, ji=ji)
fpath = final_dir + '/' + fldr
# skip jkidx folder paths that dont exist
if (not os.path.isdir(fpath)) or (len(os.listdir(fpath)) == 0):
continue
# get list of files in the folder and find the "composite" file
f_list = os.listdir(fpath)
c = [f for f in f_list if 'composite' in f]
# should only have one composite file
if len(c) > 1:
print "MULTIPLE COMPOSITES! {}".format(fpath)
# initialize RunStruct with composite file
f_jk = '{}/{}'.format(fpath, c[0])
cst = ChromStruct('chr1', init=f_jk)
# get dfe, udel, pi0 and CLLH from composite run
pmf.append(cst.uvec[0] * 1e8)
udl.append(sum(cst.uvec[0]) * 1e8)
pi0.append(cst.params[-1] / cst.fixed.tau_init)
clh.append(cst.stat.best_lh)
# convert to arrays and save
dtlists = [pmf, udl, pi0, clh]
for ft, dt in zip(ftokens, dtlists):
fsave = save_dir + ft
np.save(fsave, np.array(dt))
def main():
if len(argv) != 2:
print 'usage: jackknife_params <anno>'
exit(1)
jack_params(argv[1])
if __name__ == '__main__':
main() | true |
cfac587d1557d1bd44222d5c723f1499f2637cc6 | Python | xyths/trading-robot | /eval/GateIO/HttpUtil.py | UTF-8 | 1,248 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import http.client
import urllib
import json
import hashlib
import hmac
def getSign(params, secretKey):
bSecretKey = bytes(secretKey, encoding='utf8')
sign = ''
for key in params.keys():
value = str(params[key])
sign += key + '=' + value + '&'
bSign = bytes(sign[:-1], encoding='utf8')
mySign = hmac.new(bSecretKey, bSign, hashlib.sha512).hexdigest()
return mySign
def httpGet(url, resource, params=''):
conn = http.client.HTTPSConnection(url, timeout=10)
conn.request("GET", resource + '/' + params)
response = conn.getresponse()
data = response.read().decode('utf-8')
return json.loads(data)
def httpPost(url, resource, params, apiKey, secretKey):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"KEY": apiKey,
"SIGN": getSign(params, secretKey)
}
conn = http.client.HTTPSConnection(url, timeout=10)
tempParams = urllib.parse.urlencode(params) if params else ''
print(tempParams)
conn.request("POST", resource, tempParams, headers)
response = conn.getresponse()
data = response.read().decode('utf-8')
params.clear()
conn.close()
return data
| true |
0d8bd852714cc49a97711483bb918f6e0ca03a04 | Python | MatoPlus/ProjectWitchCraft | /main.py | UTF-8 | 27,329 | 3.21875 | 3 | [
"MIT"
] | permissive | """Author: Rixin Yang
Date: May 30, 2018
Description: Summative Bullet Hell Game - A bullet hell game created in
pygame with the game_sprites class.
Known bugs: Some sound do not play when they are supossed to.
*Please note that must instructions are in full detail in the Readme text
file in the same file directory as main.
"""
# I - IMPORT AND INITIALIZE
import pygame, game_sprites, random
#pre_init reduces sound delay
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
def main():
'''This function defines the 'mainline logic' for PROJECT: Witchcraft.'''
# DISPLAY - set display resolution and caption.
screen_size = (640, 480)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_icon(pygame.image.load(
"images/icon.png").convert_alpha())
pygame.display.set_caption("PROJECT: Witchcraft")
#Set up main menu loop
while game_intro(screen):
#If game loop if over via window exit, kill game. instead of loop back.
if not game_loop(screen):
break
# Unhide the mouse pointer - before closing window
pygame.mouse.set_visible(True)
#Quit the game with delay to hear music fade
pygame.mixer.music.fadeout(1000)
pygame.time.delay(1000)
pygame.quit()
def pause(screen):
'''This function pauses the game loop with the darker paused frame as
background. This function accepts the screen parameter to capture the
paused frame from the game loop when the function is called.
'''
# E - Entities - background, buttons and sprite group set up
background = screen
#dark surface is a special surface that is blited to make background darker.
dark = pygame.Surface((background.get_width()-200, background.get_height()),
flags=pygame.SRCALPHA)
dark.fill((50, 50, 50, 0))
background.blit(dark, (0, 0), special_flags=pygame.BLEND_RGBA_SUB)
paused = pygame.image.load("images/paused.png").convert_alpha()
background.blit(
paused, ((screen.get_width()-330)/2, screen.get_height()-300))
screen.blit(background, (0, 0))
resume_button = game_sprites.Button(
((screen.get_width()-200)/2, screen.get_height()-200), "Resume", (255,255,255))
menu_button = game_sprites.Button(
((screen.get_width()-200)/2, screen.get_height()-150), "Main Menu", (255,255,255))
#Buttons in order
buttons = [resume_button, menu_button]
#Set up sprite group.
all_sprites = pygame.sprite.Group(buttons)
#Sound effects
select_sound = pygame.mixer.Sound("sounds/select.ogg")
ok = pygame.mixer.Sound("sounds/ok.ogg")
select_sound.set_volume(0.3)
ok.set_volume(0.3)
# A - Action (broken into ALTER steps)
# A - Assign values to key variables
clock = pygame.time.Clock()
keep_going = True
FPS = 30
#Starting select.
selected = [buttons[0]]
# L - Loop
while keep_going:
# T - Timer to set frame rate
clock.tick(FPS)
# E - Event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
#Window exit return value from pause to game loop
return 2
#Navigate through buttons
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if selected != [resume_button]:
select_sound.play()
selected = [buttons[(buttons.index(selected[0])-1)]]
if event.key == pygame.K_DOWN:
if selected != [menu_button]:
select_sound.play()
selected = [buttons[(buttons.index(selected[0])+1)]]
#Confirming button press on z.
if event.key == pygame.K_z:
keep_going = False
ok.play()
if selected == [resume_button]:
#Retrun resume value
return 1
elif selected == [menu_button]:
#Return menu value
return 0
#Select button highlight
for select in selected:
select.set_select()
# R - Refresh display
all_sprites.clear(screen, background)
all_sprites.update()
all_sprites.draw(screen)
pygame.display.flip()
pygame.display.flip()
def game_over(screen):
'''This function pauses the game loop with a darker paused frame as
background using the screen parameter after the game is over. This
function to gives player choices to play again or go back to menu.
'''
# E - Entities - background, buttons and sprite group set up
background = screen
#dark surface is a special surface that is blited to make background darker.
dark = pygame.Surface((background.get_width()-200, background.get_height()),
flags=pygame.SRCALPHA)
dark.fill((50, 50, 50, 0))
background.blit(dark, (0, 0), special_flags=pygame.BLEND_RGBA_SUB)
game_over = pygame.image.load("images/game_over.png").convert_alpha()
background.blit(
game_over, ((screen.get_width()-400)/2, screen.get_height()-300))
screen.blit(background, (0, 0))
restart_button = game_sprites.Button(
((screen.get_width()-200)/2, screen.get_height()-200), "Restart",
(255,255,255))
menu_button = game_sprites.Button(
((screen.get_width()-200)/2, screen.get_height()-150), "Main Menu",
(255,255,255))
#Buttons in order
buttons = [restart_button, menu_button]
all_sprites = pygame.sprite.Group(buttons)
#Sound effects
select_sound = pygame.mixer.Sound("sounds/select.ogg")
ok = pygame.mixer.Sound("sounds/ok.ogg")
select_sound.set_volume(0.3)
ok.set_volume(0.3)
# A - Action (broken into ALTER steps)
# A - Assign values to key variables
clock = pygame.time.Clock()
keep_going = True
FPS = 30
#Starting select.
selected = [buttons[0]]
# L - Loop
while keep_going:
# T - Timer to set frame rate
clock.tick(FPS)
# E - Event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
#Window exit return value
return 2
#Navigate through buttons
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if selected != [restart_button]:
select_sound.play()
selected = [buttons[(buttons.index(selected[0])-1)]]
if event.key == pygame.K_DOWN:
if selected != [menu_button]:
select_sound.play()
selected = [buttons[(buttons.index(selected[0])+1)]]
#Confirming button press on z.
if event.key == pygame.K_z:
keep_going = False
ok.play()
if selected == [restart_button]:
#Return resume value
return 1
elif selected == [menu_button]:
#Return menu value
return 0
#Select button highlight
for select in selected:
select.set_select()
# R - Refresh display
all_sprites.clear(screen, background)
all_sprites.update()
all_sprites.draw(screen)
pygame.display.flip()
pygame.display.flip()
def game_intro(screen):
'''This function defines the main menu logic for the game PROJECT:
Witchcraft. This function accepts a display parameter to know which surface
to blit all events.'''
# E - Entities - background, buttons and sprite group set up
background = pygame.image.load("images/title.png").convert()
screen.blit(background, (0, 0))
start_button = game_sprites.Button(
(screen.get_width()/2, screen.get_height()-130), "Start", (0,0,0))
erase_button = game_sprites.Button(
(screen.get_width()/2, screen.get_height()-90), "Erase Data", (0,0,0))
quit_button = game_sprites.Button(
(screen.get_width()/2, screen.get_height()-50), "Quit", (0,0,0))
#Buttons in order
buttons = [start_button, erase_button, quit_button]
#Set up sprite group.
all_sprites = pygame.sprite.Group(buttons)
#Sounds
#Background music
pygame.mixer.music.load("sounds/main_menu.ogg")
pygame.mixer.music.set_volume(0.3)
pygame.mixer.music.play(-1)
#Sound effects
select_sound = pygame.mixer.Sound("sounds/select.ogg")
ok = pygame.mixer.Sound("sounds/ok.ogg")
reset = pygame.mixer.Sound("sounds/reset.ogg")
select_sound.set_volume(0.3)
ok.set_volume(0.3)
reset.set_volume(0.3)
# A - Action (broken into ALTER steps)
# A - Assign values to key variables
clock = pygame.time.Clock()
keep_going = True
FPS = 30
#Starting select.
selected = [buttons[0]]
# L - Loop
while keep_going:
# T - Timer to set frame rate
clock.tick(FPS)
# E - Event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
#Return exit game value.
return 0
#Navigate through buttons
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if selected != [start_button]:
select_sound.play()
selected = [buttons[(buttons.index(selected[0])-1)]]
if event.key == pygame.K_DOWN:
if selected != [quit_button]:
select_sound.play()
selected = [buttons[(buttons.index(selected[0])+1)]]
#Confirming button press on z.
if event.key == pygame.K_z:
if selected != [erase_button]:
keep_going = False
ok.play()
if selected == [start_button]:
pygame.mixer.music.stop()
#Return start game loop value.
return 1
elif selected == [quit_button]:
#Return exit game value.
return 0
else:
reset.play()
#reset highscore
save_data = open("data/highscore.txt", 'w')
save_data.write(str(0))
save_data.close()
#Select button highlight
for select in selected:
select.set_select()
# R - Refresh display
all_sprites.clear(screen, background)
all_sprites.update()
all_sprites.draw(screen)
pygame.display.flip()
pygame.display.flip()
def game_loop(screen):
'''This function defines the main game logic for the game PROJECT:
Witchcraft. This function accepts a display parameter to know which surface
to blit all events sprites.'''
# ENTITIES - create background and gameover label.
background = game_sprites.Background()
# Create a list of Joystick objects.
joysticks = []
for joystick_no in range(pygame.joystick.get_count()):
stick = pygame.joystick.Joystick(joystick_no)
stick.init()
joysticks.append(stick)
#Sound - loading and setting volume
#Music
pygame.mixer.music.load("sounds/background.ogg")
pygame.mixer.music.set_volume(0.2)
pygame.mixer.music.play(-1)
#Sound effects.
paused = pygame.mixer.Sound("sounds/pause.ogg")
player_death = pygame.mixer.Sound("sounds/player_death.ogg")
player_shoot = pygame.mixer.Sound("sounds/player_shoot.ogg")
graze = pygame.mixer.Sound("sounds/graze.ogg")
point = pygame.mixer.Sound("sounds/point.ogg")
enemy_death = pygame.mixer.Sound("sounds/enemy_death.ogg")
life_drop = pygame.mixer.Sound("sounds/get_life.ogg")
bomb_drop = pygame.mixer.Sound("sounds/get_bomb.ogg")
bombing = pygame.mixer.Sound("sounds/bomb.ogg")
bullet_sounds = []
for sound in range(1,6):
bullet_sounds.append(pygame.mixer.Sound("sounds/bullet"+
str(sound)+".ogg"))
paused.set_volume(0.3)
player_death.set_volume(0.3)
player_shoot.set_volume(0.1)
graze.set_volume(0.3)
point.set_volume(0.3)
enemy_death.set_volume(0.4)
life_drop.set_volume(0.4)
bomb_drop.set_volume(0.4)
bombing.set_volume(0.4)
for bullet_sound in bullet_sounds:
bullet_sound.set_volume(0.1)
#Player sprite creation, append them in a list.
player = game_sprites.Player(screen)
hitbox = game_sprites.Hitbox(screen, player)
# Sprites for: ScoreKeeper label
score_tab = game_sprites.Score_tab(screen)
#Cloud sprite
clouds = []
for cloud in range(4):
clouds.append(game_sprites.Cloud(screen))
#Enemy spawner sprites
spawners = []
for spawner_type in range(2):
spawners.append(game_sprites.Spawner(screen, spawner_type))
#Initialize sprite groups for better layering
low_sprites = pygame.sprite.OrderedUpdates(spawners, background, clouds,
player, hitbox)
enemy_sprites = pygame.sprite.OrderedUpdates()
player_bullet_sprites = pygame.sprite.OrderedUpdates()
enemy_bullet_sprites = pygame.sprite.OrderedUpdates()
bomb_sprites = pygame.sprite.OrderedUpdates()
animation_sprites = pygame.sprite.OrderedUpdates()
drop_sprites = pygame.sprite.OrderedUpdates()
top_sprites = pygame.sprite.OrderedUpdates(score_tab)
#All sprites groups up, layering with order
all_sprites = pygame.sprite.OrderedUpdates(low_sprites, enemy_sprites, \
player_bullet_sprites, enemy_bullet_sprites, animation_sprites, \
drop_sprites, top_sprites)
# ASSIGN - assign important variables to start game.
clock = pygame.time.Clock()
keep_going = True
half_mode = False
difficulty = 0
limits = [(1, 2), (1, 3), (2, 4), (2, 5), (3, 6)]
boss_limit, common_limit = limits[difficulty]
common_enemies = 0
boss_enemies = 0
FPS = 30
frames_passed = 0
window_exit = 0
restart = 0
game_over_frames = 30
# Hide the mouse pointer
pygame.mouse.set_visible(False)
# LOOP
while keep_going:
# TIME
clock.tick(FPS)
# EVENT HANDLING: player use arrow keys
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
window_exit = 1
#Get a list containing boolean values of pressed keys to their
#position.
keys_pressed = pygame.key.get_pressed()
#Exit program on escape
if keys_pressed[pygame.K_ESCAPE]:
paused.play()
pygame.mixer.music.pause()
option = pause(screen)
if option == 0 or option == 2:
keep_going = False
if option == 2:
window_exit = 1
pygame.mixer.music.unpause()
#Movement.
player.change_direction((0,0))
if keys_pressed[pygame.K_LEFT]:
player.change_direction((-1,0))
if keys_pressed[pygame.K_RIGHT]:
player.change_direction((1,0))
if keys_pressed[pygame.K_UP]:
player.change_direction((0,-1))
if keys_pressed[pygame.K_DOWN]:
player.change_direction((0,1))
#Toggle shoot mode
if keys_pressed[pygame.K_z] and not player.get_lock():
player.shoot_mode(1)
elif not keys_pressed[pygame.K_z]:
player.shoot_mode(0)
#Add bomb to sprite if there is no bomb on screen, not locked.
if keys_pressed[pygame.K_x] and not bomb_sprites and not \
player.get_lock() and score_tab.get_bombs():
bombing.play()
player.set_invincible(2)
bomb_sprites.add(game_sprites.Bomb(player.get_center()))
score_tab.bomb_used()
#Toggle focus mode.
if keys_pressed[pygame.K_LSHIFT] and not player.get_lock():
player.focus_mode(1)
hitbox.set_visible(1)
elif not keys_pressed[pygame.K_LSHIFT]:
player.focus_mode(0)
hitbox.set_visible(0)
#Record frames_passed
frames_passed += 1
#Difficulty based on frames passed.
if frames_passed == FPS*30:
difficulty = 1
elif frames_passed == FPS*60:
difficulty = 2
elif frames_passed == FPS*60*2:
difficulty = 3
elif frames_passed == FPS*60*5:
difficulty = 4
#Set spawn limits based on difficulty.
boss_limit, common_limit = limits[difficulty]
#Set spawn rates of spawner classes based on difficulty.
for spawner in spawners:
spawner.set_rate(difficulty)
#Player bullet event. Let player shoot.
if player.get_shoot() and not player.get_cool_rate() and not \
player.get_lock():
player_shoot.play()
player_bullet_sprites.add(player.spawn_bullet())
#Enemy bullet/sprites. Hit detection, only if player not invincible
if not player.get_invincible():
#Enemy bullets - player hitbox collision.
for hit in pygame.sprite.spritecollide(
hitbox, enemy_bullet_sprites.sprites(), False):
#Shrink the hitbox rect to detect actual size of hitbox
if hitbox.rect.inflate(-14,-14).colliderect(hit) and \
not player.get_invincible():
#Player death events
animation_sprites.add(game_sprites.Explosion(
player.get_center(), 0))
player_death.play()
player.reset()
score_tab.life_loss()
#Enemy sprites - hitbox collision
for enemy in pygame.sprite.spritecollide(
hitbox, enemy_sprites.sprites(), False):
#Shrink the hitbox rect to detect actual size of hitbox
if hitbox.rect.inflate(-14,-14).colliderect(enemy) and \
not player.get_invincible():
#Player death events
animation_sprites.add(game_sprites.Explosion(
player.get_center(), 0))
player_death.play()
player.reset()
score_tab.life_loss()
#Grazing bullets, bullets/player sprite collision - add points.
for bullet in pygame.sprite.spritecollide(
player, enemy_bullet_sprites.sprites(), False):
if player.rect.inflate(-6,-12).colliderect(bullet) and \
not player.get_invincible():
#Graze events if bullet can be grazed
if not bullet.get_grazed():
graze.play()
score_tab.add_points(0)
bullet.set_grazed(1)
#Player sprite, drop sprite collision events.
for drop in pygame.sprite.spritecollide(
player, drop_sprites.sprites(), False):
drop_type = drop.get_type()
#Play correct sound
if drop_type <= 1:
point.play()
elif drop_type == 2:
life_drop.play()
elif drop_type == 3:
bomb_drop.play()
#Add point, life or bomb count to score tab depedning on type.
score_tab.add_points((drop_type)+6) #+6 is used for drop points
drop.kill()
#Enemy rect and shoot events.
for enemy in enemy_sprites.sprites():
#See if enemy is hit by bullet. Return list of bullet that hit.
for bullet in pygame.sprite.spritecollide(
enemy, player_bullet_sprites.sprites(), False):
#Bullet hits enemy. Animate, damage and kill bullet.
animation_sprites.add(
game_sprites.Explosion(bullet.get_center(), 1))
enemy.damaged(1)
bullet.kill()
#Kill enemy if appropriate.
if enemy.get_hp() <= 0 and not enemy.get_killed():
#Play enemy death sound.
enemy_death.play()
#Set enemy instance killed to true.
enemy.set_killed()
animation_sprites.add(game_sprites.Explosion(
enemy.get_center(), 0))
#Drop sprites when enemy killed. Determine #drops.
if enemy.get_type() <= 3:
drops = 4
elif enemy.get_type() > 3:
drops = 2
#Determine drop type.
for drop in range(drops):
random_num = random.randrange(15)
#3 in 15 chance of droping big points
if random_num == 3 or random_num == 7 or \
random_num == 12:
drop_type = 1
#Special drops for only boss types, 1 in 15 chance.
elif random_num == 5 and drops == 4:
#2 in 3 chance bomb drop, 1 in 3 chance life drop.
random_special = random.randrange(3)
if random_special == 1:
drop_type = 2
else:
drop_type = 3
#Drop type normal if no special drops is called.
else:
drop_type = 0
#Create drop sprite
drop_sprites.add(game_sprites.Pick_up(
screen, enemy, drop_type))
#Add the score of the corresponding enemy killed.
score_tab.add_points(enemy.get_type())
#Let enemy shoot if appropriate.
if not enemy.get_cool_rate() and not enemy.get_down_frames() \
and not enemy.get_lock():
#Play bullet sound correpsonding to their bullet type
bullet_sounds[enemy.get_type()-1].play()
#Create bullets.
enemy_bullet_sprites.add(enemy.spawn_bullet(player))
#Bomb detection event. See if it hits bullets. Return list of bullets
for bomb in bomb_sprites.sprites():
for bullet in pygame.sprite.spritecollide(
bomb, enemy_bullet_sprites.sprites(), False):
#See if bomb is too small to detect collision with rim,
#use entire area to detect area of bomb.
#If not to small, use approximate bomb rim area to detect hit
#by seeing if it doesn't collide with outside.
if bomb.get_side() <= 140 or not bomb.rect.inflate(
-bomb.get_side()/4,-bomb.get_side()/4).colliderect(bullet):
#Animate and kill bullet.
animation_sprites.add(
game_sprites.Explosion(bullet.get_center(), 0))
bullet.kill()
#Detect enemies, record types on screen.
common_enemies = 0
boss_enemies = 0
for enemy in enemy_sprites.sprites():
enemy_type = enemy.get_type()
if enemy_type <= 3:
boss_enemies += 1
else:
common_enemies += 1
#Enemy spawning event, spawn enemy if appropriate, not pass spawn limit.
for spawner in spawners:
if (spawner.get_type() == 1 and boss_enemies < boss_limit) or\
(spawner.get_type() == 0 and common_enemies < common_limit):
spawner.set_lock(0)
if not spawner.get_spawn_frames():
enemy_sprites.add(spawner.spawn_enemy())
if spawner.get_type() == 1 and boss_enemies == boss_limit:
spawner.set_lock(1)
elif spawner.get_type() == 0 and common_enemies == common_limit:
spawner.set_lock(1)
#Check to end game when player has no more lives.
if not score_tab.get_lives():
#Keep reducing game_over frames for smooth game over transition.
if game_over_frames > 1:
game_over_frames -= 1
#When game over frames are down, call game over menu.
else:
pygame.mixer.music.stop()
restart = game_over(screen)
keep_going = False
#Update what is in the all_sprites group.
all_sprites = pygame.sprite.OrderedUpdates(low_sprites, enemy_sprites,
player_bullet_sprites, enemy_bullet_sprites,
animation_sprites, bomb_sprites, drop_sprites, top_sprites)
# REFRESH SCREEN - clear previous sprites, update positions and display
all_sprites.clear(screen, background.get_surface())
all_sprites.update()
all_sprites.draw(screen)
pygame.display.flip()
#Save highscore after game.
save_data = open("data/highscore.txt", 'w')
save_data.write(str(score_tab.get_highscore()))
save_data.close()
#Deciding what to return depending on choice.
if restart == 1:
#Start game again, get value returned from next game
game_value = game_loop(screen)
#Return whatever is returned in next game loop if it is not 2
if game_value != 2:
return game_value
else:
#Treat as window exit if next value is 2
window_exit = 1
#Window exit from game over screen, treat as window exit.
elif restart == 2:
window_exit = 1
#Return to main menu if returning and not window exit.
if not window_exit:
return 1
#Return quit pygame value if window exit is called.
else:
return 0
# Call the main function
main()
| true |
99b104332576fc65ed3f36fb832034fc6eb923ab | Python | thisiszw/fyb | /submissions/tosubmit/predictor/predictor.py | UTF-8 | 645 | 3.140625 | 3 | [] | no_license | class Predictor:
def __init__(self):
"""
- `self.batch_size`: to tell the caller the batch_size
"""
self.batch_size = 128
def predict(self, content):
"""
Args:
- contents: list of facts (as in string)
Returns:
- list of dict, with each dict to be a sentence result:
{
'accusation': list of integers (accusation index as in accu.txt)
'imprisonment': float
'articles': list fo integers (article index as in article.txt)
}
"""
return [
{
"accusation": [1],
"imprisonment": 5,
"articles": [5]
} for desc in content] | true |
e6dd63bbce38aaeeb262823f5ba12959d25aa39c | Python | BlueGranite/tpc-ds-dataset-generator | /notebooks/TPC-DS-GenerateData.py | UTF-8 | 4,097 | 2.78125 | 3 | [] | no_license | # Databricks notebook source
# DBTITLE 1,Generate TPC-DS data
# MAGIC %md
# MAGIC Generating data at larger scales can take hours to run, and you may want to run the notebook as a job.
# MAGIC
# MAGIC The cell below generates the data. Read the code carefully, as it contains many parameters to control the process. See the <a href="https://github.com/databricks/spark-sql-perf" target="_blank">Databricks spark-sql-perf repository README</a> for more information.
# COMMAND ----------
# MAGIC %scala
# MAGIC import com.databricks.spark.sql.perf.tpcds.TPCDSTables
# MAGIC
# MAGIC // Set:
# MAGIC val scaleFactor = "1" // scaleFactor defines the size of the dataset to generate (in GB).
# MAGIC val scaleFactoryInt = scaleFactor.toInt
# MAGIC
# MAGIC val scaleName = if(scaleFactoryInt < 1000){
# MAGIC f"${scaleFactoryInt}%03d" + "GB"
# MAGIC } else {
# MAGIC f"${scaleFactoryInt / 1000}%03d" + "TB"
# MAGIC }
# MAGIC
# MAGIC val fileFormat = "parquet" // valid spark file format like parquet, csv, json.
# MAGIC val rootDir = s"/mnt/datalake/raw/tpc-ds/source_files_${scaleName}_${fileFormat}"
# MAGIC val databaseName = "tpcds" + scaleName // name of database to create.
# MAGIC
# MAGIC // Run:
# MAGIC val tables = new TPCDSTables(sqlContext,
# MAGIC dsdgenDir = "/usr/local/bin/tpcds-kit/tools", // location of dsdgen
# MAGIC scaleFactor = scaleFactor,
# MAGIC useDoubleForDecimal = false, // true to replace DecimalType with DoubleType
# MAGIC useStringForDate = false) // true to replace DateType with StringType
# MAGIC
# MAGIC tables.genData(
# MAGIC location = rootDir,
# MAGIC format = fileFormat,
# MAGIC overwrite = true, // overwrite the data that is already there
# MAGIC partitionTables = false, // create the partitioned fact tables
# MAGIC clusterByPartitionColumns = false, // shuffle to get partitions coalesced into single files.
# MAGIC filterOutNullPartitionValues = false, // true to filter out the partition with NULL key value
# MAGIC tableFilter = "", // "" means generate all tables
# MAGIC numPartitions = 4) // how many dsdgen partitions to run - number of input tasks.
# MAGIC
# MAGIC // Create the specified database
# MAGIC sql(s"create database $databaseName")
# MAGIC
# MAGIC // Create the specified database
# MAGIC sql(s"create database $databaseName")
# MAGIC
# MAGIC // Create metastore tables in a specified database for your data.
# MAGIC // Once tables are created, the current database will be switched to the specified database.
# MAGIC tables.createExternalTables(rootDir, fileFormat, databaseName, overwrite = true, discoverPartitions = true)
# MAGIC
# MAGIC // Or, if you want to create temporary tables
# MAGIC // tables.createTemporaryTables(location, fileFormat)
# MAGIC
# MAGIC // For Cost-based optimizer (CBO) only, gather statistics on all columns:
# MAGIC tables.analyzeTables(databaseName, analyzeColumns = true)
# COMMAND ----------
# DBTITLE 1,View TPC-DS data
# examine data
df = spark.read.parquet("/mnt/datalake/raw/tpc-ds/source_files_001TB_parquet/customer")
display(df)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ###Sample Results
# MAGIC Below are a few sample results from generating data at the 1 and 1000 scale.
# MAGIC
# MAGIC | File Format | Generate Column Stats | Number of dsdgen Tasks | Partition Tables | TPC-DS Scale | Databricks Cluster Config | Duration | Storage Size |
# MAGIC | ----------- | --------------------- | ---------------------- | ---------------- | ------------ | --------------------------------------- | -------- | ------------ |
# MAGIC | csv | no | 4 | no | 1 | 1 Standard_DS3_v2 worker, 4 total cores | 4.79 min | 1.2 GB |
# MAGIC | parquet | yes | 4 | no | 1 | 1 Standard_DS3_v2 worker, 4 total cores | 5.88 min | 347 MB |
# MAGIC | json | no | 4 | no | 1 | 1 Standard_DS3_v2 worker, 4 total cores | 7.35 min | 5.15 GB |
# MAGIC | parquet | yes | 1000 | yes | 1000 | 4 Standard_DS3_v2 worker, 16 total cores | 4 hours | 333 GB |
| true |
856f183d214f4c86fc933bb1350b7c3c1d2bae96 | Python | akoshel/MADE | /Part_1/Алгоритмы и структуры данных/Homeworks/grafs_2/Task_B_new.py | UTF-8 | 946 | 2.9375 | 3 | [] | no_license | import sys
def dijkstra(n, s, graph):
d = {i: float('Inf') for i in range(n)}
used = {i: False for i in range(n)}
d[s] = 0
for _ in range(n):
next_e = -1
for v in range(n):
if next_e == -1 or (d[v] < d[next_e] and not used[v]):
next_e = v
if d[next_e] == float('Inf'):
break
used[next_e] = True
for w, u in graph[next_e]:
d[u] = min(d[u], d[next_e] + w)
return d
def main() -> None:
n, m = list(map(int, sys.stdin.readline().split(' ')))
smezh_list = [[] for _ in range(n)]
for _ in range(m):
a, b, w = list(map(int, sys.stdin.readline().split(' ')))
a -= 1
b -= 1
smezh_list[a].append((w, b))
smezh_list[b].append((w, a))
#for i in range(n):
d = dijkstra(n, 4, smezh_list)
# sys.stdout.write(str(d[0]) + ' ')
print(d)
if __name__ == '__main__':
main()
| true |
e7a72f9ea046cdb7fc9f307a2e12bd5509ac9b03 | Python | bkhorrami/Algorithms | /Graph.py | UTF-8 | 2,562 | 3.3125 | 3 | [] | no_license | __author__ = 'babak_khorrami'
from collections import defaultdict
class Node(object):
def __init__(self,id,value = 0):
self.id = id
self.value = value
def get_id(self):
return self.id
def get_value(self):
return self.value
class Edge(object):
def __init__(self,tail,head,weight=1):
self.tail = tail
self.head = head
self.weight = weight
def get_head(self):
return self.head
def get_tail(self):
return self.tail
def get_weight(self):
return self.weight
def get_opposite(self,v):
if self.tail == v:
return self.head
elif self.head == v:
return self.tail
def get_ends(self):
"""
:return: a tuple containing (tail.head)
"""
return (self.tail,self.head)
#**** Graph Class :
class Graph(object):
def __init__(self,nodes=None,edges_incoming=None,edges_outgoing=None,directed=True):
self.directed = directed
if nodes == None:
self.nodes = set()
else:
self.nodes = nodes
self.edges = defaultdict(int)
if edges_outgoing==None:
self.edges_outgoing = defaultdict(int)
else:
self.edges_outgoing = edges_outgoing
if self.directed:
if edges_incoming==None:
self.edges_incoming = defaultdict(int)
else:
self.edges_incoming = edges_incoming
else:
self.edges_incoming = self.edges_outgoing
self.node_count = len(self.nodes)
self.edge_count = 0 #ADD CODE HERE
def get_node_count(self):
return self.node_count
def get_edge_count(self):
return self.edge_count
def get_nodes(self):
return self.nodes
def get_edges(self):
pass
def add_node(self,v):
self.nodes.add(v) #add the new node to the list of nodes
self.edges[v] = defaultdict(int)
self.edges_outgoing[v]=defaultdict(int) #add the node to the adjacency list & add a dict for incident nodes
self.edges_incoming[v]=defaultdict(int)
def add_edge(self,t,h,w):
# Add nodes to the nodes set, if not there:
self.nodes.add(t)
self.nodes.add(h)
self.edges[t][h]=w
self.edges_outgoing[t][h]=w
if self.directed:
self.edges_incoming[h][t]=w
def adjacent_nodes(self,v):
return list(self.edges[v].keys())
def incident_edges(self,v):
pass
| true |
6b7e8ab82191a9f4f2c5bb25a9bf4cc3bdfdc541 | Python | LabmemNo004/AmazonMoviesDataWarehouse | /数据准备/数据处理/MovieOrganize.py | UTF-8 | 1,410 | 2.890625 | 3 | [
"MIT"
] | permissive | import pandas as pd
import numpy as np
import Levenshtein
import re
import math
import codecs
import csv
def calculate(i,j):
# 总相似度
score=1
# 各特征相似度
score_name=1
score_director=1
score_actor=1
score_release=1
score_time=1
# 各特征值
movie_l=webs[i]
name_l=movie_l[2]
director_l=movie_l[15]
actor_l=movie_l[6]
release_l=movie_l[4]
time_l=movie_l[5]
movie_r=webs[j]
name_r=movie_r[2]
director_r=movie_r[15]
actor_r=movie_r[6]
release_r=movie_r[4]
time_r=movie_r[5]
return score
CSV="MovieProducts.csv"
data=pd.read_csv(CSV)
data=np.array(data)
webs=[] # 网页数据列表204654
for frame in data:
j = 0
for i in frame:
if pd.isna(i):
frame[j] = ""
j += 1
webs.append(frame.tolist())
map=[[0]for i in range(len(webs))] # 网页相关程度邻接表 204654
movies=[] # 电影划分,order填充,可在webs里获得完整data
left=0
right=1
HumanSense=0.5 # 将相似度大于0.5的产品判定为相同电影
# 邻接表的构建
for i in range(len(webs)-1):
for j in range(len(webs)-i-1):
left=i
right=j+i+1
similar=calculate(left,right)
print("{}->{}:{}\n".format(left,right,similar))
if similar>=HumanSense:
map[left].append(right)
map[right].append(left)
i=0
| true |
03d9cfcc802b224f4c8ebb62fa0a2f327327cdb3 | Python | nikhilbommu/DS-PS-Algorithms | /Leetcode/LeetCode Problems/ArrayPartitionI.py | UTF-8 | 240 | 3.25 | 3 | [] | no_license | class Solution:
def arrayPairSum(self, nums) -> int:
nums = sorted(nums)
sum1= 0
for i in range(0,len(nums),2):
sum1 += nums[i]
return sum1
s = Solution()
print(s.arrayPairSum([1,4,3,2,5,6])) | true |
e78d35db1c4c67a831b447011898c1eddb855440 | Python | vasumv/photoproj | /test.py | UTF-8 | 1,243 | 2.625 | 3 | [] | no_license | from skimage import data, io, filter
from skimage.transform import resize
from scipy.misc import imshow
from path import Path
import numpy as np
dir = Path("./great_depression/")
side = int(raw_input("Enter size: "))
values = {}
images = dir.files()
i = 0
or_pic = io.imread("MigrantMother.jpg", as_grey=True)
or_pic = np.divide(or_pic, 255.0)
length = or_pic.shape[0] - or_pic.shape[0] % side
width = or_pic.shape[1] - or_pic.shape[1] % side
matrix = np.zeros((length, width))
for image in images:
total = 0.0
pic = io.imread(image, as_grey=True)
if pic.max() > 1.0:
pic = np.divide(pic, 255.0)
size = min(pic.shape[0], pic.shape[1])
pic = pic[:size, :size]
pic = resize(pic, (side, side))
average = np.mean(pic)
values[image] = average, pic
for i in range(length / side):
for j in range(width / side):
least = 9999
im = or_pic[side * i: side * (i + 1), side * j: side * (j + 1)]
mean = np.mean(im)
for image in values:
if abs(mean - values[image][0]) < least:
best_pic, least = values[image][1], abs(mean - values[image][0])
print i, j
matrix[side * i: side * (i + 1), side * j: side * (j + 1)] = best_pic
imshow(matrix)
| true |
dc4776e3af801f8a599fb7c6b7e750c4f5f246a4 | Python | liuzh139/database_render | /read_db_into_tensor.py | UTF-8 | 4,778 | 3.0625 | 3 | [] | no_license | import MySQLdb
import re
import json
import pandas as pd
import tensorflow as tf
db = MySQLdb.connect(
host = 'localhost',
user = 'root',
passwd = '19931124',
db = 'test'
)
cursor = db.cursor()
def get_column_name(connected_cursor):
column_query = 'SELECT * FROM INFORMATION_SCHEMA.COLUMNS'
connected_cursor.execute(column_query)
row = connected_cursor.fetchone()
print("Column name query results:")
while row:
print(row)
row = connected_cursor.fetchone()
print("End of results!")
def get_column_from_table(cursor, table):
x_query = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME=\'%s\'" % (table)
cursor.execute(x_query)
row = cursor.fetchone()
columns = []
while row:
# Remove special char from the column name
column_name = re.sub('[^a-zA-Z0-9 \n\.\'\(\)]', '', str(row))
columns.append(column_name)
row = cursor.fetchone()
return columns
def read_db_columns(cursor, table_name):
x_query = "SELECT * FROM %s" % (table_name)
cursor.execute(x_query)
row = cursor.fetchone()
print(row)
columns = []
def return_json_columns(columns):
result = '{'
for column_name in columns:
result += " \"column_name\" : \"" + column_name + "\", "
result += '}'
return json.dumps(result)
def read_json_to_columns(json_input):
# This is example of how to read the columns into list
json_input = '{"column": [{"name": "age", "data_type": "continous"}, {"name": "gender", "data_type": "categorical"} ] }'
CATEGORICAL_COLUMNS = []
CONTINUOUS_COLUMNS = []
try:
decoded = json.loads(json_input)
# Access data
for x in decoded['column']:
if x['data_type'] == 'continous':
CONTINUOUS_COLUMNS += x['name']
if x['data_type'] == 'categorical':
CATEGORICAL_COLUMNS += x['name']
return CATEGORICAL_COLUMNS, CONTINUOUS_COLUMNS
except (ValueError, KeyError, TypeError):
print( "JSON format error")
def read_json_to_tensors(json_input):
# This is example of how to read the columns into list
json_input = '{"column": [{"name": "age", "data_type": "continous"}, {"name": "gender", "data_type": "categorical"} ] }'
try:
decoded = json.loads(json_input)
# A list of tensor holder for columns
column_tensors={}
# Access data
for x in decoded['column']:
print( x['name'])
if x['data_type'] == 'continous':
column_tensors["tensor_{0}".format(x['name'])] = tf.contrib.layers.real_valued_column(x['name'])
if x['data_type'] == 'categorical':
# hash_bucket_size here can be improved/adjusted based on database size
# can do preprocessing on column categorical value count
column_tensors["tensor_{0}".format(x['name'])] = tf.contrib.layers.sparse_column_with_hash_bucket(x['name'], hash_bucket_size=1000)
return column_tensors
except (ValueError, KeyError, TypeError):
print( "JSON format error")
def read_column_from_db(df, CONTINUOUS_COLUMNS, CATEGORICAL_COLUMNS):
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values)
for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = continuous_cols.copy()
feature_cols.update(categorical_cols)
# Specify a column
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
# Example usage of training a model
def train_linear_classifier(feature_columns, target_column_type):
# Specify a directory to store the model
model_dir = './tmp'
if target_column_type == "categorical":
model = tf.contrib.learn.LinearClassifier(feature_columns = feature_columns, model_dir = model_dir)
if target_column_type == "continous":
model = tf.contrib.learn.DNNRegressor(feature_columns=feature_columns, hidden_units=[1024, 512, 256])
return model
if __name__ == "__main__":
| true |
2bd23a4bcb9b59a2e91bbf8968e8446a336a587e | Python | ivoryRabbit/S3-Rec | /models.py | UTF-8 | 11,120 | 2.578125 | 3 | [] | no_license | import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Embedding, Dense
from layers import Encoder, PositionEncoder
class S3Rec(tf.keras.Model):
def __init__(
self,
n_user,
n_item,
max_item_len,
latent_dim,
n_layer,
n_head,
n_ffn_unit, # the number of units for feed forward network
dropout_rate=0.0,
epsilon=1e-3,
):
super(S3Rec, self).__init__()
self.n_user = n_user
self.n_item = n_item
self.max_item_len = max_item_len
self.latent_dim = latent_dim
self.n_layer = n_layer
self.n_head = n_head
self.n_ffn_unit = n_ffn_unit
self.dropout_rate = dropout_rate
self.epsilon = epsilon
self.item_embedding_layer = Embedding(
n_item + 1,
latent_dim,
mask_zero=True, # mask value = 0
input_length=max_item_len,
name="item_embedding",
)
self.position_encoding_layer = PositionEncoder()
self.item_encoding_layer = Encoder(n_layer, n_head, n_ffn_unit, dropout_rate, epsilon)
# following two lines are just for activating "summary" attribute
_ = self.call(Input(shape=(self.max_item_len,)), training=True)
self.build(input_shape=(None, self.max_item_len))
def call(self, inputs, mask=None, training=False):
item_embed = self.item_embedding_layer(inputs)
embed = self.position_encoding_layer(item_embed)
if mask is None:
mask = self.get_attn_mask(inputs)
return self.item_encoding_layer(embed, mask, training=training)
def get_score(self, outputs, targ):
"""
get similarities between the representatives of sequences and the embeddings of target items
"""
embed = self.item_embedding_layer(targ) # (batch_size, max_item_len, latent_dim)
return tf.reduce_sum(
tf.multiply(outputs, embed), axis=-1
) # inner product w.r.t max_item_len (batch_size, max_item_len)
def train_step(self, data):
item, pos_targ, neg_targ = data
look_mask = self.get_look_mask(item) # mask all next items
loss_mask = self.get_mask(item) # ignore zero paddings
with tf.GradientTape() as tape:
outputs = self(item, look_mask, training=True)
pos_score = self.get_score(outputs, pos_targ)
neg_score = self.get_score(outputs, neg_targ)
loss = tf.reduce_sum(self.loss(pos_score, neg_score, loss_mask), axis=0)
grad = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.trainable_variables))
return {"loss": loss}
def test_step(self, data):
query, cand = data
batch_size = tf.shape(query)[0]
output = self(query)[:, -1, :]
cand_embed = self.item_embedding_layer(cand)
score = tf.einsum("nh,nlh->nl", output, cand_embed)
k = 10
top_k = tf.argsort(score, axis=1, direction="DESCENDING")[:, :k]
rel = tf.cast(
top_k == 0, tf.float32
) # relevant item is located at "0". remark valid_generator function
batch_size = tf.cast(batch_size, tf.float32)
weight = np.reciprocal(np.log2(np.arange(2, k + 2)))
weight = tf.constant(weight, dtype=tf.float32)
HR = tf.reduce_sum(rel) / batch_size
NDCG = tf.reduce_sum(rel * weight) / batch_size
return {f"HR@{k}": HR, f"NDCG@{k}": NDCG}
def get_mask(self, inputs): # calculating loss, ignore zero paddings
return tf.cast(tf.math.greater(inputs, 0), dtype=tf.float32)
def get_attn_mask(self, inputs): # mask zero paddings for MultiHeadAttention
attn_mask = 1.0 - self.get_mask(inputs)
return attn_mask[:, None, None, :] # (batch_size, 1, 1, max_item_len)
def get_look_mask(self, inputs): # mask all next items for MultiHeadAttention
attn_mask = 1.0 - self.get_attn_mask(inputs)
ltri_mask = tf.ones((1, 1, self.max_item_len, self.max_item_len), dtype=tf.float32)
ltri_mask = tf.linalg.band_part(ltri_mask, -1, 0) # lower triangular matrix
return 1.0 - (attn_mask * ltri_mask) # (batch_size, 1, max_item_len, seq_len)
def get_item_embedding(self):
total_idx = tf.range(1, self.n_item + 1)
return self.item_embedding_layer(total_idx)
class AAP_model(tf.keras.Model):
def __init__(self, n_attr, base_model, attr_embedding_layer, loss_weight):
super(AAP_model, self).__init__()
self.n_attr = n_attr
self.item_embedding_layer = base_model.item_embedding_layer
self.attr_embedding_layer = attr_embedding_layer
self.loss_weight = loss_weight
self.W_aap = Dense(base_model.latent_dim, name="AAP_dense")
def call(self, inputs):
e_i = self.item_embedding_layer(inputs)
return self.W_aap(e_i)
def get_attr_embedding(self):
total_idx = tf.range(1, self.n_attr + 1)
return self.attr_embedding_layer(total_idx)
def get_score(self, outputs):
attr_embed = (
self.get_attr_embedding()
) # the complement set of A_i == the negative samples of item i
return tf.matmul(
outputs, attr_embed, transpose_b=True
) # (batch_size, max_item_len, n_attr)
def get_loss(self, pos, neg):
bce = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return bce(pos, neg)
def train_step(self, data):
item, attr = data
with tf.GradientTape() as tape:
outputs = self(item)
score = self.get_score(outputs)
loss = self.loss_weight * tf.reduce_sum(self.get_loss(attr, score), axis=0)
grad = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.trainable_variables))
return {"loss": loss}
class MIP_model(tf.keras.Model):
def __init__(self, base_model, loss_weight):
super(MIP_model, self).__init__()
self.base_model = base_model
self.loss_weight = loss_weight
self.W_mip = Dense(base_model.latent_dim, name="MIP_dense")
def call(self, inputs, training=False):
f_t = self.base_model(inputs, training=training)
return self.W_mip(f_t)
def get_score(self, outputs, targ):
e_i = self.base_model.item_embedding_layer(targ) # (batch_size, max_item_len, latent_dim)
return tf.reduce_sum(
tf.multiply(outputs, e_i), axis=-1
) # inner product (batch_size, max_item_len)
def get_loss(self, pos, neg, mask):
bpr = K.log(tf.nn.sigmoid(pos - neg)) # (batch_size, max_item_len)
# bce = K.log(tf.nn.sigmoid(pos)) + K.log(tf.nn.sigmoid(1-neg))
return -tf.reduce_sum(bpr * mask, axis=1) / tf.reduce_sum(mask, axis=1) # (batch_size, )
def train_step(self, data):
masked, pos_targ, neg_targ = data
loss_mask = self.base_model.get_mask(pos_targ)
with tf.GradientTape() as tape:
outputs = self(masked, training=True)
pos_score = self.get_score(outputs, pos_targ)
neg_score = self.get_score(outputs, neg_targ)
loss = self.loss_weight * tf.reduce_sum(
self.get_loss(pos_score, neg_score, loss_mask), axis=0
)
grad = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.trainable_variables))
return {"loss": loss}
class MAP_model(tf.keras.Model):
def __init__(self, n_attr, base_model, attr_embedding_layer, loss_weight):
super(MAP_model, self).__init__()
self.n_attr = n_attr
self.base_model = base_model
self.attr_embedding_layer = attr_embedding_layer
self.loss_weight = loss_weight
self.W_map = Dense(base_model.latent_dim, name="MAP_dense")
def call(self, inputs, training=False):
f_t = self.base_model(inputs, training=training)
return self.W_map(f_t) # (batch_size, max_item_len, n_attr)
def get_attr_embedding(self):
total_idx = tf.range(1, self.n_attr + 1)
return self.attr_embedding_layer(total_idx) # (n_attr, latent_dim)
def get_score(self, outputs):
attr_embed = self.get_attr_embedding()
return tf.matmul(
outputs, attr_embed, transpose_b=True
) # (batch_size, max_item_len, n_attr)
def get_loss(self, true, pred, mask):
bce = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return tf.reduce_sum(bce(true, pred) * mask, axis=1) / tf.reduce_sum(mask, axis=1)
def train_step(self, data):
item, attr = data
loss_mask = self.base_model.get_mask(item)
with tf.GradientTape() as tape:
outputs = self(item, training=True)
score = self.get_score(outputs)
loss = self.loss_weight * tf.reduce_sum(self.get_loss(attr, score, loss_mask), axis=0)
grad = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.trainable_variables))
return {"loss": loss}
class SP_model(tf.keras.Model):
def __init__(self, base_model, loss_weight):
super(SP_model, self).__init__()
self.base_model = base_model
self.loss_weight = loss_weight
self.W_sp = Dense(base_model.latent_dim, name="SP_dense")
def call(self, inputs, training=False):
s = self.base_model(inputs, training=training)[:, -1, :] # the last position in a sequence
return self.W_sp(s) # (batch_size, latent_dim)
def get_score(self, outputs, seg, training=False):
s_til = self.base_model(seg, training=training)[:, -1, :] # the last position in a sequence
return tf.reduce_sum(tf.multiply(outputs, s_til), axis=1) # inner product (batch_size, )
def get_loss(self, pos, neg):
bpr = K.log(tf.nn.sigmoid(pos - neg))
# bce = K.log(tf.nn.sigmoid(pos)) + K.log(tf.nn.sigmoid(1-neg))
return -bpr # (batch_size, )
def train_step(self, data):
masked, pos_seg, neg_seg = data
with tf.GradientTape() as tape:
outputs = self(masked, training=True)
pos_score = self.get_score(outputs, pos_seg, training=True)
neg_score = self.get_score(outputs, neg_seg, training=True)
loss = self.loss_weight * tf.reduce_sum(self.get_loss(pos_score, neg_score), axis=0)
grad = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.trainable_variables))
return {"loss": loss}
| true |
f09da72e16632a94e15b69c1fb4c9ba1f194c7ba | Python | lb123456789/mygit | /connect1/TCPclient.py | UTF-8 | 485 | 2.65625 | 3 | [] | no_license | from socket import *
serverName = '127.0.0.1'
serverPort = 11000
BUFSIZ = 1024
ADDR = (serverName,serverPort)
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect(ADDR)
i=1
while True:
data = "client message"
if not data:
break
clientSocket.send(data.encode('utf-8'))
returnData = clientSocket.recv(BUFSIZ)
if not returnData:
break
print('Return time is:%s' %returnData.decode('utf-8'))
print(i)
i=i+1
clientSocket.close() | true |
ff0f74f8d35fe37883665e352427cc0cb1bdace7 | Python | MakeSchool-17/sorting-algorithms-python-ignat980 | /csv_parser.py | UTF-8 | 595 | 3.84375 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | import csv
import sys
def parse(file):
parsed = {
'headers': None,
'rows': []
}
with open(file) as data_file:
reader = csv.reader(data_file)
parsed['headers'] = next(reader) # first row of CSV is the headers
parsed['rows'] = [row for row in reader] # remaining rows are data rows
return parsed
if __name__ == "__main__":
file = sys.argv[1]
data = parse(file)
print("Parsing {}...".format(file))
print()
print("Headers: {}".format(', '.join(data['headers'])))
print("Row count: {}".format(len(data['rows'])))
| true |
5527633378856ffd568da48d6d3be9c6e0b7593c | Python | M-Vause/SEED | /Algorithms/pySINDy/env/lib/python3.6/site-packages/pylint/test/functional/too_many_arguments.py | UTF-8 | 392 | 3.15625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | # pylint: disable=missing-docstring
def stupid_function(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9): # [too-many-arguments]
return arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
class MyClass:
text = "MyText"
def mymethod1(self):
return self.text
def mymethod2(self):
return self.mymethod1.__get__(self, MyClass)
MyClass().mymethod2()()
| true |
bb78114d2a904e5ebe6f74fd80ffbb6014b30bca | Python | liuwenye2010/prj_python | /csv2wave/csv2wave.py | UTF-8 | 6,052 | 3.0625 | 3 | [] | no_license | """Load & convert data (should be decimal) from CSV file and convert into Mono Wave file"""
import csv
import wave
import struct
import math
from collections import namedtuple
from datetime import datetime
from pprint import pprint
import os
import glob
import argparse
import os.path
import sys
default_file_to_convert = "audio.csv"
column_name_to_parse = "MOSI"
def cmd_parser():
global column_name_to_parse
global default_file_to_convert
parser = argparse.ArgumentParser(description="This program load one column data (should be decimal) from CSV file and convert into one mono wave file")
parser.add_argument("-i", dest="input" ,required=False, help="input file name (e.g audio.csv)")
parser.add_argument("-c", dest="column_name" ,required=False, help="column name to parse (e.g MOSI)")
parser.add_argument("-s", dest="sample_rate" ,required=False, help="sample rate for wave (e.g 16000)")
parser.add_argument("-w", dest="sample_width" ,required=False, help="sample width for wave (1 = 8 bits, 2 = 16, 3 = invalid, 4 = 32)")
#parser.add_argument("-o", dest="output" ,required=False, help="output file name")
#parser.add_argument("-t", dest="file_type" ,required=True, help="Option that for the file's suffix , e.g py")
args = parser.parse_args()
input_file_name = args.input
column_name = args.column_name
sample_rate = args.sample_rate
sample_width = args.sample_width
#output_file_name = args.output
#file_type = args.file_type
#print("file_type:{0}".format(file_type))
#print("input_file_name:{0}".format(input_file_name))
#print("output_file_name:{0}".format(output_file_name))
if input_file_name is not None:
pass
else:
input_file_name = default_file_to_convert
print("[Warning] not input argument for input_file_name found, set input as default ==> {0}".format(default_file_to_convert))
if column_name is not None:
column_name_to_parse = str(column_name)
print("column_name_to_parse is {0}".format(column_name_to_parse))
else:
print("[Warning] not input argument for column_name found, set column_name as default ==> {0}".format(column_name_to_parse))
if sample_rate is not None:
sample_rate = int(sample_rate)
pass
else:
sample_rate = 16000 # 16KHz
if sample_width is not None:
if sample_width not in ['0', '1','2', '4'] :
print("ERROR: Invalid sample_width parameter value")
parser.print_help()
sys.exit(-1)
else:
sample_width = int(sample_width)
print("sample_width:{0}".format(sample_width))
else:
sample_width = 2 # 16bit
print("[INFO] column_name_to_parse is {0}".format(column_name_to_parse))
print("[INFO] sample_width is {0}".format(sample_width))
print("[INFO] sample_rate is {0}".format(sample_rate))
output_file_name = input_file_name + '.wav'
print("[INFO] input is {0}, output is {1}".format(input_file_name,output_file_name))
if os.path.isfile(input_file_name):
try:
csv2wave_mono(sample_rate,sample_width,input_file_name, output_file_name) # 16KHz/16bits
except KeyboardInterrupt:
print("[INFO] User Key Interrupt exit")
sys.exit(-1)
else:
pass
else:
print("\n[ERROR]:input file {0} is not exist !\n".format(input_file_name))
parser.print_help()
sys.exit(-1)
print("Done")
def parse_timestamp(text):
return datetime.strptime(text, '%Y-%m-%d %H:%M:%S')
def iter_records(file_name,column_name):
Column = namedtuple('Column', 'src dest convert')
columns = [Column(column_name, column_name, int),] # TODO: check the int will limit to 16bit samplre widith ?
with open(file_name, 'rt') as fp:
reader = csv.DictReader(fp)
for csv_record in reader:
record = {}
for col in columns:
value = csv_record[col.src]
record[col.dest] = col.convert(value)
yield record
def csv2wave_mono(sampleRate,sampleWidth,input_file, output_file):
wavef = wave.open(output_file,'w')
wavef.setnchannels(1) # 1: mono
wavef.setsampwidth(sampleWidth) #sampleWidth -- size of data: 1 = 8 bits, 2 = 16, 3 = invalid, 4 = 32, etc...
wavef.setframerate(sampleRate)
pad_j = 0
record_data = 0
for i, record in enumerate(iter_records(input_file,column_name_to_parse)):
#if i >= 10:
# break
#pprint(record)
if (sampleWidth ==4 ):
if(pad_j == 0):
record_data = int(record[column_name_to_parse])
pad_j = pad_j + 1
continue
elif (pad_j == 3):
record_data = int(record[column_name_to_parse]) *(256**pad_j) + record_data
if (record_data> 0x7FFFFFFF):
record_data = record_data - 0x7FFFFFFF
data = struct.pack('<i', record_data)
pad_j = 0
else:
record_data = int(record[column_name_to_parse])*(256**pad_j) + record_data
pad_j = pad_j + 1
continue
elif (sampleWidth == 2 ):
if(pad_j == 0):
record_data = int(record[column_name_to_parse])
pad_j = pad_j + 1
continue
else:
record_data = int(record[column_name_to_parse]) *(256**pad_j) + record_data
if (record_data> 32767):
record_data = record_data - 65536
data = struct.pack('<h', record_data)
pad_j = 0
elif (sampleWidth == 1 ):
data = struct.pack('<b', record[column_name_to_parse])
else:
print("\n[ERROR]:Unsupported sampleWidth: {0} !\n".format(sampleWidth))
sys.exit(-1)
wavef.writeframesraw( data )
wavef.close()
if __name__ == "__main__":
cmd_parser()
| true |
fb9dbf1635b9def0e51410fb3cac75e6e6aa7cce | Python | mtn/advent17 | /day03/part2.py | UTF-8 | 611 | 2.734375 | 3 | [] | no_license | from collections import defaultdict
inp = 312051
grid = defaultdict(lambda: defaultdict(int))
grid[0][0] = 1
N = 10
x = y = 0
dx = 0
dy = -1
for i in range(N ** 2):
if -N/2 < x <= N/2 and -N/2 < y <= N/2:
for ddy in [-1, 0, 1]:
for ddx in [-1, 0, 1]:
if ddy == ddx == 0:
continue
grid[y][x] += grid[y+ddy][x+ddx]
if grid[y][x] >= inp:
print(grid[y][x])
exit()
if x == y or (x < 0 and x == -y) or (x > 0 and x == 1 - y):
dx, dy = -dy, dx
x, y = x + dx, y + dy
| true |
57ba022f84fbcf7b6bf17b49d0cc89d7b4b86e2f | Python | Hirochon/django-drf | /book/models.py | UTF-8 | 1,056 | 2.515625 | 3 | [] | no_license | from django.db import models
import uuid
from django.utils import timezone
class Book(models.Model):
"""本モデル"""
class Meta:
db_table = 'book'
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(verbose_name='タイトル', unique=True, max_length=20)
price = models.IntegerField(verbose_name='価格', null=True)
created_at = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
class BookStock(models.Model):
"""本の在庫モデル"""
class Meta:
db_table = 'bookstock'
book = models.OneToOneField(Book, verbose_name='本', on_delete=models.CASCADE)
quantity = models.IntegerField(verbose_name='在庫数', default=0)
def __str__(self):
return self.book.title + ' (' + str(self.quantity) + '冊)' + ' <' + str(self.book.id) + '>'
# class Publisher(models.Model):
# """出版社モデル"""
# class Meta:
# db_table = 'publisher'
# id = models.UUIDField() | true |
bf85764b558d5534c458c38e513ec4e884d8e0a0 | Python | Anirud2002/flappybird | /try.py | UTF-8 | 42 | 3.09375 | 3 | [] | no_license | list = []
list.extend((2,3))
print(list) | true |
6d5b5a20632ab36f25cb7684fc93fa2db1579d65 | Python | shubham-dixit-au7/test | /assignments/Week06/Day01_(24-02-2020)/Ques2.py | UTF-8 | 2,343 | 3.96875 | 4 | [] | no_license | #Question- Implement Queues using Stacks
#Answer-
class Node:
def _init_(self, data):
self.data = data
self.next = None
class LinkedList:
def _init_(self):
self.head = None
# self.end = None
def push(self, data):
if self.head is None:
temp_node = Node(data)
temp_node.next = self.head
self.head = temp_node
# self.end = temp_node
else:
temp_node = Node(data)
temp_node.next = self.head
self.head = temp_node
return
def pop(self):
if self.head is None:
return None
temp_node = self.head
self.head = self.head.next
return temp_node
class Stack:
def _init_(self, max_size=100):
self.stack = LinkedList()
self.max_size = max_size
self.curr_size = 0
def push(self, data):
if self.max_size > self.curr_size:
self.stack.push(data)
self.curr_size += 1
def pop(self):
if self.curr_size <= 0:
return None
x = self.stack.pop()
self.curr_size -= 1
return x
def isEmpty(self):
if self.curr_size <= 0:
return True
return False
class Queue:
push_stack = Stack()
pop_stack = Stack()
def enqueue(self, data):
Queue.push_stack.push(data)
def deque(self):
if Queue.pop_stack.isEmpty():
if Queue.push_stack.isEmpty():
return None
else:
for _ in range(Queue.push_stack.curr_size):
x = Queue.push_stack.pop()
Queue.pop_stack.push(x)
result = Queue.pop_stack.pop()
return result.data
que = Queue()
list1 = "5 11 5 6 2 3"
for i in list1.split():
print("Enqueueing : %d" % int(i))
que.enqueue(int(i))
print()
for _ in range(4):
x = que.deque()
if x:
print("De-queueing : %d" % x.data)
else:
print("De-queueing : Queue is Empty")
break
print()
list1 = "8 4 9 5 7"
for i in list1.split():
print("Enqueueing : %d" % int(i))
que.enqueue(int(i))
print()
for _ in range(20):
x = que.deque()
if x:
print("De-queueing : %d" % x.data)
else:
print("De-queueing : Queue is Empty")
break | true |
5089ec100b6c536ec984eed06399b5cd951d6e2f | Python | JannaKim/PS | /dp/14501_퇴사On이해후다시.py | UTF-8 | 682 | 2.890625 | 3 | [] | no_license | N = int(input())
cn = [0]
for _ in range(N):
cn.append([int(i) for i in input().split()])
cn += [[0,0]]
#dp[i]: i일 '전'에 끝나는 최상의 스케쥴
dp = [0]+ [0]*(N+2)
for i in range(1,N+2):
if i+cn[i][0]<=N+1: # N+1일 '전'까지 끝날 수 있는 스케쥴만.
dp[i+cn[i][0]]= max(dp[i+cn[i][0]], dp[i]+cn[i][1])
# i+1 날은 더이상 비교할, i+1일 '직전'에 맞춰 끝나는 스케쥴이 없다.
# i+1일 직전에 맞추지 않아도 더 많은 금액을 받을 수 있는 스케쥴이 있을 수 있으므로
#dp[i+1]=max(dp[1:i+2])
dp[i+1]=max(dp[i+1],dp[i])
print(max(dp[:N+2]))
'''
7
3 10
5 20
1 10
1 20
2 15
4 40
2 200
''' | true |
783a17b76579dc624a8734c40698e808ca44ac31 | Python | MarioPezzan/ExerciciosGuanabaraECurseraPyCharm | /Exercícios curso em video/Exercicios/ex031.py | UTF-8 | 220 | 3.75 | 4 | [] | no_license | km = float(input('Quantos quilometros você viajará? '))
print(f'Você iniciará uma viagem de {km}Km')
if km >= 200:
print(f'E terá que pagar R${km*0.45:.2f}')
else:
print(f'E terá que pagar R${km*0.50:.2f}') | true |