blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e8b19cb400c858bc28d1ac1b33e8a1b3f7192903
|
Python
|
j-rossi-nl/coliee-2019
|
/Task_01/dataset_with_high_scores.py
|
UTF-8
| 1,259
| 2.765625
| 3
|
[] |
no_license
|
"""
"""
import pandas as pd
INPUT_FILE = 'ranking/v3_train_scores.txt'
WITH_QRELS = 'text_summarized_200.csv'
def main():
scores = pd.read_csv(INPUT_FILE).set_index(['case_id', 'candidate_id'])
original = pd.read_csv(WITH_QRELS).set_index(['case_id', 'candidate_id'])
full_mix = scores.join(original).reset_index()[['case_id', 'candidate_id', 'case_text', 'candidate_text', 'score', 'candidate_is_noticed']]
high_scores = full_mix[full_mix['score'] > 0.9].drop(columns=['score'])
# Restrict to only the test cases
test_cases = pd.read_csv('test_cases_id.csv', names=['case_id'], header=None).set_index('case_id')
high_test = high_scores.set_index('case_id').join(test_cases, how='inner').reset_index()
high_train = high_scores[~high_scores['case_id'].isin(test_cases.reset_index()['case_id'])].reset_index()
high_test.to_csv('HIGH_test.csv', index=False)
high_train.to_csv('HIGH_train.csv', index=False)
high_scores.to_csv('HIGH_all.csv', index=False)
print('Train: {} Test: {}'.format(high_train.shape, high_test.shape))
print('Train:\n{}\nTest:\n{}'.format(high_train['candidate_is_noticed'].value_counts(), high_test['candidate_is_noticed'].value_counts()))
if __name__ == '__main__':
main()
| true
|
105f437782b818e66122b84088377d775b1b04a8
|
Python
|
1997priyam/Data-Structures
|
/arrays&mix/countandsay.py
|
UTF-8
| 633
| 4.40625
| 4
|
[] |
no_license
|
"""
The count-and-say sequence is the sequence of integers beginning as follows:
1, 11, 21, 1211, 111221, ...
"""
def countAndSay(A):
num = "1"
for i in range(A-1):
count = 0
prev = num[0]
new_num = ""
for j in range(len(num)):
if num[j] == prev:
count+=1
else:
new_num = new_num + str(count) + prev
prev = num[j]
count = 1
if j == len(num)-1:
new_num = new_num + str(count) + prev
num = new_num
return num
a = int(input("Enter a number: "))
print(countAndSay(a))
| true
|
0986876ba4b10cfe40a4acb6acb4c2b08ee379fd
|
Python
|
ZhengPeng7/MaoJuXiWu
|
/OS_final_project/FCFS_and_SJF.py
|
UTF-8
| 3,913
| 3.171875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/python3.5
## Modify the SJF and FCFS algorithm in the topic of dealing with jobs.
job_num = 5
class Job():
# 定义作业
def __init__(self, arr_time=-1, sev_time=-1, cpt_time=-1, wghted_run_time=-1):
self.arr_time = arr_time
self.sev_time = sev_time
self.cpt_time = cpt_time
self.wghted_run_time = wghted_run_time
self.run_time = self.cpt_time - self.arr_time
# 初始化工作
job0 = Job(arr_time=0, sev_time=4)
job1 = Job(arr_time=1, sev_time=1)
job2 = Job(arr_time=4, sev_time=2)
job3 = Job(arr_time=5, sev_time=1)
job4 = Job(arr_time=6, sev_time=3)
job_list = []
for i in range(job_num):
job_list.append(locals()['job' + str(i)])
def FCFS(job_list):
# 先来先服务算法, 比较简单, 不赘述.
avg_run_time = 0
avg_wght_run_time = 0
print('NO.\t\tarr_time\tsev_time\tweighted_run_time')
curr_cpt_time = 0
for i in job_list:
curr_cpt_time += i.sev_time
i.cpt_time = curr_cpt_time
i.wghted_run_time = (i.cpt_time - i.arr_time) / i.sev_time
i.run_time = i.cpt_time - i.arr_time
print("job{}\t{}\t\t\t{}\t\t\t{}".format(str(job_list.index(i)), i.arr_time, i.cpt_time, i.wghted_run_time))
avg_run_time = sum([i.run_time for i in job_list]) / len(job_list)
avg_wght_run_time = sum([i.run_time for i in job_list]) / sum([i.sev_time for i in job_list])
print('avg_run_time: ', avg_run_time)
print('avg_wght_run_time: ', avg_wght_run_time)
return 0
def SJF(job_list):
# 短作业优先算法(非抢占式).
job_list_back_up = job_list.copy()
print('NO.\t\tarr_time\tsev_time\tweighted_run_time')
curr_cpt_time = 0
curr_time = 0
# 预留初始作业表, 用于之后的确定作业序号.
arr_time_list_initial = [i.arr_time for i in job_list.copy()]
for n in range(len(job_list)):
# 遍历先存的作业构成的作业表, 选出已经或刚好在等待的作业, 选出它们之中运行时间最短的.
arr_time_list = [i.arr_time for i in job_list]
for i in range(len(arr_time_list)):
arr_time_list[i] -= curr_time
arr_time_list[i] = max(0, arr_time_list[i]) # 已经等待或刚好等待都记为刚到
min_list = [i for i in range(len(arr_time_list)) if arr_time_list[i] == min(arr_time_list)] # 选出刚到的作业的序号
t_list = [job_list[i] for i in min_list] # 将"刚到"的作业存入t_list
ely_cpt_time = [i.sev_time for i in t_list] # 计算它们的服务时间
idx = ely_cpt_time.index(min(ely_cpt_time)) # 选出"刚到"的作业中服务时间最短的
curr_job = job_list.pop(min_list[idx]) # 选出目标作业, 进行一系列的操作
curr_job_no = arr_time_list_initial.index(curr_job.arr_time)
curr_cpt_time += curr_job.sev_time
curr_job.cpt_time = curr_cpt_time
curr_job.wghted_run_time = (curr_job.cpt_time - curr_job.arr_time) / curr_job.sev_time
curr_job.run_time = curr_job.cpt_time - curr_job.arr_time
print("job{}\t{}\t\t\t{}\t\t\t{}".format(curr_job_no, curr_job.arr_time, curr_job.sev_time,
curr_job.wghted_run_time))
curr_time += curr_job.sev_time
avg_run_time = sum([i.run_time for i in job_list_back_up]) / len(job_list_back_up)
avg_wght_run_time = sum([i.run_time for i in job_list_back_up]) / sum([i.sev_time for i in job_list_back_up])
print('avg_run_time: ', avg_run_time)
print('avg_wght_run_time: ', avg_wght_run_time)
return 0
while True:
algorithm_choice = input("FCFS or SJF?\n")
if algorithm_choice == 'FCFS':
FCFS(job_list)
break
elif algorithm_choice == 'SJF':
SJF(job_list)
break
elif algorithm_choice == 'q':
break
else:
print("Invalid choice, enter 'q' to quit.\n")
| true
|
093c265a12b28dcaa27b9876fc5a3998a29a3c0b
|
Python
|
gf234/python_problem_solving
|
/프로그래머스/JadenCase 문자열 만들기.py
|
UTF-8
| 476
| 3.25
| 3
|
[] |
no_license
|
def solution(s):
jadencase = []
words = s.split()
for word in words:
temp = word.lower()
if temp[0].isalpha():
jadencase.append(temp[0].upper() + temp[1:])
else:
jadencase.append(temp)
ans = ''
i = 0
flag = True
for c in s:
if c == ' ':
ans += c
flag = True
elif flag:
ans += jadencase[i]
i += 1
flag = False
return ans
| true
|
0e9dec060f2ff8d4a22d145c352abc43374fd031
|
Python
|
senthilknatesan/home-sales-weather
|
/sap1.py
|
UTF-8
| 2,552
| 3.0625
| 3
|
[] |
no_license
|
#############################################################################
# File Name: sap1.py
# Creats the median listings and sold data files to be loaded into the mysql
#############################################################################
input_sold_file = '/Users/senthilnatesan/Desktop/job-search/sap/Zip_MedianSoldPrice_AllHomes.csv'
output_sold_file = '/Users/senthilnatesan/Desktop/job-search/sap/zip_median_sold_2015.csv'
input_list_file = '/Users/senthilnatesan/Desktop/job-search/sap/Zip_MedianListingPrice_AllHomes.csv'
output_list_file = '/Users/senthilnatesan/Desktop/job-search/sap/zip_median_list_2015.csv'
def create_zip_median_sold_file(inf, outf, nz_cnt):
line_no = 0
header_word_count = 0
out_file = open (outf, 'w' )
invalid_zip = 0
invalid_zip_data = 0
good_data = 0
with open(inf) as f:
for line in f:
line = line.strip()
line_no += 1
words = line.split(',')
if (line_no == 1):
header_word_count = len(words)
header = words
continue
if (len (words) != header_word_count ):
print "Unknown word count. skip the line. " ,line
continue
d = dict (zip(header, words))
s = get_2015_median_sold_or_list_price(d, nz_cnt)
if s == "":
invalid_zip_data += 1
continue
else:
good_data += 1
out_file.write(s)
out_file.close()
print ("total lines ", line_no - 1, "good data ", good_data, "bad data ", invalid_zip_data)
def get_2015_median_sold_or_list_price(d, nz_cnt):
non_zero_cnt = 0
z = d.get('"RegionName"')
if z == None:
return ""
state_code = d.get('"State"')
if ( state_code == '"HI"') or (state_code == '"AK"' ):
return ""
z = z[1:6]
year = "2015"
s = ""
for i in range (1, 13):
month = "%02d" % (i)
yy_mm = '\"{0}-{1}\"'.format(year, month)
v = d.get(yy_mm, 0)
try:
f = float(v)
except ValueError:
f = 0.0
if (f != 0.0):
non_zero_cnt += 1
s = s + '{0},{1},{2}\n'.format(z, yy_mm[1:len(yy_mm)-1], f)
if ( non_zero_cnt < nz_cnt ):
return ""
return s
def main ():
create_zip_median_sold_file(input_sold_file, output_sold_file, 10)
create_zip_median_sold_file(input_list_file, output_list_file, 0)
if __name__ == '__main__':
main()
| true
|
d074c80ff22d1909eb2eb9a301eefc1cf02c1f5c
|
Python
|
NilanjanaLodh/lab_sem5
|
/DBMSlab/comparingDB_FS/addRecord
|
UTF-8
| 305
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/python
from sys import argv
import csv
filename= argv[1]
fileobj= open(filename,'r')
print fileobj.readline()
inputline = raw_input();
inputrow= inputline.split(',')
with open(filename,'a') as fileobj:
csvwriter = csv.writer(fileobj, delimiter=",")
csvwriter.writerow(inputrow)
| true
|
1d5a709ca47eaf8eca132f2b6b2bd13638c72033
|
Python
|
Anvesh8263/python-lab-30
|
/perfectnumber.py
|
UTF-8
| 275
| 3.78125
| 4
|
[] |
no_license
|
num=int(input("Enter the number"))
sum=0
for i in range(1,num):
if(num%i==0):
sum=sum+i
if (sum==num):
print("The number is a perfect number")
else:
print("The number is not a perfect")
| true
|
aec9c04bc63d7fb0d5b65f2e8f2f6fdb159abbfc
|
Python
|
hoh1/MIS3640
|
/In-Class-Activities/In-Class-5/shape3.py
|
UTF-8
| 883
| 3.953125
| 4
|
[] |
no_license
|
import turtle
import math
draw = turtle.Turtle()
turtle.speed(10)
draw.hideturtle()
def position (x,y):
turtle.penup()
turtle.setx(x)
turtle.sety(y)
turtle.pendown()
def polyline(t, n, length, angle):
for i in range(n):
turtle.fd(length)
turtle.lt(angle)
def arc(t, r, angle, x, y):
position(x, y)
arc_length = 2 * math.pi * r * angle / 360
n = int(arc_length / 3) + 1
step_length = arc_length / n
step_angle = float(angle) / n
polyline(t, n, step_length, step_angle)
def circle(t, r, x, y):
arc(t, r, 360, x, y)
#shape 3
def shape3(t, r, x, y):
turtle.pensize(3)
circle(draw, r, x, y-r)
arc(draw, r/2, 180, x, y)
arc(draw, r/2, 180, x, y)
circle(draw, r/6, x, y+(r/3))
circle(draw, r/6, x, y-(2*r/3))
shape3(draw, 100, 0, 0)
turtle.mainloop()
| true
|
3a70f8d1710dab910d547a7e94ec57dba1c5b829
|
Python
|
abespitalny/CodingPuzzles
|
/Leetcode/valid_sudoku.py
|
UTF-8
| 1,876
| 3.65625
| 4
|
[] |
no_license
|
from leetcode import *
class Solution:
# Time: O(n^2) where n is 9 in this case.
# Space: O(n)
def isValidSudoku(self, board: List[List[str]]) -> bool:
for i in range(9):
colSet = set()
rowSet = set()
for j in range(9):
if board[i][j] != '.':
if board[i][j] in rowSet:
return False
rowSet.add(board[i][j])
if board[j][i] != '.':
if board[j][i] in colSet:
return False
colSet.add(board[j][i])
for i in range(3):
for j in range(3):
subgrid = set()
for k in range(3):
for m in range(3):
cell = board[k + 3*i][m + 3*j]
if cell != '.':
if cell in subgrid:
return False
subgrid.add(cell)
return True
solution = Solution()
# Expected: True
print(solution.isValidSudoku(board =
[["5","3",".",".","7",".",".",".","."]
,["6",".",".","1","9","5",".",".","."]
,[".","9","8",".",".",".",".","6","."]
,["8",".",".",".","6",".",".",".","3"]
,["4",".",".","8",".","3",".",".","1"]
,["7",".",".",".","2",".",".",".","6"]
,[".","6",".",".",".",".","2","8","."]
,[".",".",".","4","1","9",".",".","5"]
,[".",".",".",".","8",".",".","7","9"]]
))
# Expected: False
print(solution.isValidSudoku(board =
[["8","3",".",".","7",".",".",".","."]
,["6",".",".","1","9","5",".",".","."]
,[".","9","8",".",".",".",".","6","."]
,["8",".",".",".","6",".",".",".","3"]
,["4",".",".","8",".","3",".",".","1"]
,["7",".",".",".","2",".",".",".","6"]
,[".","6",".",".",".",".","2","8","."]
,[".",".",".","4","1","9",".",".","5"]
,[".",".",".",".","8",".",".","7","9"]]
))
| true
|
2adcc9536e7293cbcc571cc04b3224cf5a56b1d5
|
Python
|
jacksonmoreira/Curso-em-video-mundo1-
|
/Exercicios/script023.py
|
UTF-8
| 157
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
from math import trunc
n1 = float(input('Digite um número:'))
n2 = trunc(n1)
print('{} foi o número digitado, sua porção inteira é {}.'.format(n1, n2))
| true
|
1d22cab16424e2a52fed07478be8694c06339190
|
Python
|
Prasantacharya/Stonk-bot
|
/bot/stonk.py
|
UTF-8
| 2,785
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
import sqlite3
import io
import json
import urllib3
http = urllib3.PoolManager()
'''
# Purpose: Helper function for getting stocks data,
# Args: stock ticker
# Returns: json data for stock from yahoo finance api
# Ex: getStonk('AMD') => {price: 15, currency: USD, ... }
'''
def getStonk(stonk):
r = http.request('GET', "https://query1.finance.yahoo.com/v7/finance/quote?symbols=" + stonk)
return json.loads(r.data)
'''
# Purpose: Helper functoin to determine if historic
# Args: json data
# Returns: True if historic, false if not
# Ex: checkHistoric(getStonk("AMD")) => false
'''
def checkHistoric(data):
historic = False
prunedData = {"name": "", "tradable": True, curr_price = ""}
return {historic, prunedData}
class MarketPlace(object):
"""docstring forMarketPlace."""
def __init__(self):
self.conn = sqlite3.connect('market.sqlite', check_same_thread=False) # to prevent multiple threads from accessing at same time
self.cur = self.conn.cursor()
self.requestQueue = {} # (ticker, last update-time, )
# Database structure:
# 2 tables:
# stock table: (key: id-{text}, asset {commodity, crypto, stock})
# person table: (key: id-{text}, money-{double precision})
self.cur.execute("""CREATE TABLE IF NOT EXISTS stockAccount(
id INT
, asset varchar(10)
, amount FLOAT
);""")
self.cur.execute("""CREATE TABLE IF NOT EXISTS bankAccount(
id INT
, amount DOUBLE
);""")
# default adding a person to the account
def add_account(self, id):
self.cur.executescript("""IF NOT EXISTS (SELECT * FROM stockAccount WHERE id = ?)
INSERT INTO stockaccount VALUES(?,NULL,0);
""", (id,))
self.cur.execute("INSERT INTO bankaccount VALUES(?, ?);", (id, 5000))
self.conn.commit()
return True
def remove_account(self, id):
return True
def buy_stock(self, userID, ticker, shares, pricePerShare):
if (pricePerShare * shares) > (self.cur.execute("SELECT")):
return False
return True
def sell_stock(self, userID, ticker, shares, money):
return True
def add_money(self, userID, money):
return True
def tax_money(self, userID, money):
return True
def tax_percent(self, percent):
# Affects all users
return True
def getAccount(self, id):
self.cur.execute("SELECT * FROM stockAccount WHERE id = ?;", id):
return self.cur.fetchone();
# Money gets
def tax_return(self, money):
# Affects all users
return True
def get_stock():
return True
| true
|
1f294bf9031721065df392d5bc65b89ca54b790b
|
Python
|
StevenColeHart/CodingDojo
|
/Python/Django/BE_Wishlist/apps/my_wishlist/models.py
|
UTF-8
| 2,704
| 2.65625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import re
import bcrypt
email_regex = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
# Create your models here.
class UserManager(models.Manager):
# in classes when you are defining function they must have (self, and data
def register_validator(self, data):
errors = []
if len(data['name']) < 3 or any(char.isdigit() for char in data['name']) :
errors.append("Invalid First Name")
if len(data['user_name']) < 3 or any(char.isdigit() for char in data['user_name']) :
errors.append("Invalid Username")
if len(data['email']) < 0:
errors.append("Invalid Email")
if not email_regex.match(data['email']):
errors.append("Invalid Email")
if len(data['password']) < 8 :
errors.append("Password is too short")
elif data['password'] != data['confirmation'] :
errors.append("password and confirmation aren't the same")
if self.filter(email=data['email']).count() > 0:
errors.append("Someone with that email is already registered")
return errors
def login_validator(self, data):
errors = []
if len(data['user_name']) < 0:
errors.append("Invalid Username")
if len(data['password']) < 8 :
errors.append("Password is too short")
if self.filter(user_name=data['user_name']).count() < 1:
errors.append("You haven't registered yet")
elif bcrypt.checkpw(data['password'].encode(), self.filter(user_name=data['user_name'])[0].password.encode()):
errors.append("You haven't registered yet")
return errors
class User(models.Model):
name = models.CharField(max_length=255)
user_name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class ItemManager(models.Manager):
def item_validator(self, data):
errors = []
if len(data['item_name']) < 2:
errors.append("Invalid Item Name")
return errors
class Items(models.Model):
item_name = models.CharField(max_length=255)
item_creator = models.ForeignKey(User,related_name='created_item')
wish_listers = models.ManyToManyField(User,related_name='wishlist')
created_at= models.DateField(auto_now_add=True)
updated_at=models.DateField(auto_now=True)
objects=ItemManager()
| true
|
4166c003f52083b6cf0fb6e29f01466181f8bcea
|
Python
|
taborns/vulneralapi
|
/vulneral/analyze/printer.py
|
UTF-8
| 854
| 2.546875
| 3
|
[] |
no_license
|
class Printer:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
@staticmethod
def createColorBlock( block, data):
return block + str(data) + Printer.ENDC
@staticmethod
def bold( data):
return Printer.createColorBlock(Printer.BOLD, data)
@staticmethod
def warning( data):
return Printer.createColorBlock(Printer.WARNING, data)
@staticmethod
def green( data):
return Printer.createColorBlock(Printer.OKGREEN, data)
@staticmethod
def blue( data):
return Printer.createColorBlock(Printer.OKBLUE, data)
@staticmethod
def fail( data):
return Printer.createColorBlock(Printer.FAIL, data)
| true
|
7358fade85538fa2169ab74520332964ce85cca5
|
Python
|
simonstead/imageserver
|
/app.py
|
UTF-8
| 379
| 2.640625
| 3
|
[] |
no_license
|
from flask import Flask, send_file, jsonify
from os import listdir
app = Flask(__name__)
@app.route('/')
def index():
return "GET @ /images/<filename>"
@app.route('/images/<image>')
def send_image(image):
if image in listdir('static/images'):
return send_file('static/images/{}'.format(image), mimetype='image/png')
return jsonify("Sorry, not found"), 404
| true
|
171bd35231e7afc908c4ffc7272217eb1896f588
|
Python
|
MertNuhuz/dabl
|
/dabl/pipelines.py
|
UTF-8
| 1,796
| 3.0625
| 3
|
[] |
permissive
|
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression, Ridge, Lasso
def get_fast_classifiers(n_classes):
"""Get a list of very fast classifiers.
Parameters
----------
n_classes : int
Number of classes in the dataset. Used to decide on the complexity
of some of the classifiers.
Returns
-------
fast_classifiers : list of sklearn estimators
List of classification models that can be fitted and evaluated very
quickly.
"""
return [
# These are sorted by approximate speed
DummyClassifier(strategy="prior"),
GaussianNB(),
make_pipeline(MinMaxScaler(), MultinomialNB()),
DecisionTreeClassifier(max_depth=1, class_weight="balanced"),
DecisionTreeClassifier(max_depth=max(5, n_classes),
class_weight="balanced"),
DecisionTreeClassifier(class_weight="balanced",
min_impurity_decrease=.01),
LogisticRegression(C=.1, solver='lbfgs', multi_class='auto',
class_weight='balanced')
]
def get_fast_regressors():
"""Get a list of very fast regressors.
Returns
-------
fast_regressors : list of sklearn estimators
List of regression models that can be fitted and evaluated very
quickly.
"""
return [
DummyRegressor(),
DecisionTreeRegressor(max_depth=1),
DecisionTreeRegressor(max_depth=5),
Ridge(alpha=10),
Lasso(alpha=10)]
| true
|
8025fbb3e96ee62a80eb0bb441968b6bdc83de10
|
Python
|
libing7569/stp
|
/stp/core/task.py
|
UTF-8
| 897
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#coding: utf-8
class Task:
def __init__(self, task_id, priority, tasktype, data, *subtasks):
self.task_id = task_id
self.priority = priority
self.state = None
self.type = tasktype
self.data = data
self.subtasks = {t.task_id: t for t in subtasks}
self.is_remote = False
self.remote_server = None
def __cmp__(self, other):
return cmp(self.priority, other.priority)
def call(self, func):
return func(self.task_id, self.data)
def add_subtasks(self, subtask):
self.subtasks[subtask.task_id] = subtask
def del_subtasks(self, subtask):
self.subtasks.pop(subtask.task_id, None)
def renice(self, priority):
self.priority = priority
def setRemote(self, ip, port):
self.is_remote = True
self.remote_server = (ip, port)
| true
|
d66aa50e79ab37c718d92a6b48a27d9040025c75
|
Python
|
MrHamdulay/csc3-capstone
|
/examples/data/Assignment_5/hrrbha001/question1.py
|
UTF-8
| 978
| 4.25
| 4
|
[] |
no_license
|
# bbs simulator
# hs
# 23 march 2011
choice = ""
message = "no message yet"
while choice != "X":
print ("Welcome to UCT BBS")
print ("MENU")
print ("(E)nter a message")
print ("(V)iew message")
print ("(L)ist files")
print ("(D)isplay file")
print ("e(X)it")
print ("Enter your selection:")
choice = input ("").upper ()
if choice == "E":
message = input ("Enter the message:\n")
elif choice == "V":
print ("The message is:", message)
elif choice == "L":
print ("List of files: 42.txt, 1015.txt")
elif choice == "D":
filename = input ("Enter the filename:\n")
if filename == "42.txt":
print ("The meaning of life is blah blah blah ...")
elif filename == "1015.txt":
print ("Computer Science class notes ... simplified")
print ("Do all work")
print ("Pass course")
print ("Be happy")
else:
print ("File not found")
print ("Goodbye!")
| true
|
57a4a2488561ad1efa4bf681438f43dde6e892dd
|
Python
|
r8d8/lastlock
|
/QCA4020_SDK/target/sectools/qdn/sectools/common/utils/datautils/hex16_handler.py
|
UTF-8
| 2,257
| 2.703125
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"HPND",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
# ===============================================================================
#
# Copyright (c) 2013-2017 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# ===============================================================================
from .data_detail import DataType
from .data_detail import DataDetail
from .hex_handler import TypeHex
from .hex_handler import HexHandler
from .hex_handler import HexDetailer
from .hex_handler import HexNormalizer
class TypeHex16(TypeHex):
"""Custom type to specify 16-bit Hex"""
pass
# Global instance of the 16-bit TypeHex16 class.
type_hex_16 = TypeHex16()
class Hex16Normalizer(HexNormalizer):
def normalize(self, i_value, length=6):
o_value = super(Hex16Normalizer, self).normalize(i_value)
hex_str = hex(int(o_value, 16))
hex_str = hex_str[:-1] if hex_str.endswith('L') else hex_str
if len(hex_str) < length:
padding_len = length - len(hex_str)
hex_str = hex_str[:2] + "0" * padding_len + hex_str[2:]
if len(hex_str) != length:
raise ValueError("Hex string is too long: " + hex_str)
return hex_str
class Hex16Detailer(HexDetailer):
def detail(self, i_format):
"""See :meth:`.Data_Handler.BaseDetailer.detail`"""
assert isinstance(i_format, TypeHex16)
return DataDetail(DataType.Hex16, self.data_handler, i_format)
class Hex16Handler(HexHandler):
def __init__(self, data_handler):
"""Initialized the various features."""
super(Hex16Handler, self).__init__(data_handler)
self._i_detailer = Hex16Detailer(self)
self._i_normalizer = Hex16Normalizer(self)
#--------------------------------------------------------------------------
# Methods that must be implemented by the data handler
#--------------------------------------------------------------------------
@classmethod
def get_data_handler_data_type(cls):
"""Returns the data type that is handled by this data handler"""
return TypeHex16
@classmethod
def get_data_handler(cls):
"""Returns the class reference of this data handler"""
return Hex16Handler
| true
|
500f6906bb51bca7600a103a4403f29900052093
|
Python
|
cjoewong/General_Edge
|
/lambda_functions/linear_regression_lambda.py
|
UTF-8
| 4,641
| 2.6875
| 3
|
[] |
no_license
|
'''
A linear regression model that runs on AWS Lambda
AWS needs a zip file because it doesn't have numpy (so I can't use the console editor)
Make sure the zip file name, .py name and the handler name on Lambda coincide.
@ Original Author : Liang Zheng
@ Modified by : Chege Gitau
'''
#_______________________________________________________________________________
import numpy as np
import decimal
import time
from DynamoDBUtility import Table
print('Loading function')
def lambda_handler(event, context):
# Fetch the DynamoDB resource
tStart = time.time()
# Change: Getting the number of samples from the 'SampleSize' table is tricky
# When we'll have multiple Pi's, keeping track of this number will be buggy
# For this reason, I'm setting the value of 'datanum' to the number of items
# that we're going to get from the table containing the aggregated sensor data
# Initialize helper variables
featurenum = 3
collectornum = 2
betam = np.zeros((featurenum,collectornum))
dataBytesFeatures = 0
numSensors = 0
# Fetch the features calculated by Gateway A
table_A = Table('sensingdata_A')
itemKey = {'forum' : 'roomA', 'subject' : 'sensorA'}
item_A = table_A.getItem(itemKey)
betam[0][0] = item_A['feature_A']
betam[1][0] = item_A['feature_B']
betam[2][0] = item_A['feature_C']
# dataBytesFeatures += item_A['data_bytes']
# numSensors += item_A['number_of_sensors']
# Fetch the features calculated by Gateway B
table_B = Table('sensingdata_B')
itemKey = {'forum' : 'roomB', 'subject' : 'sensorB'}
item_B = table_B.getItem(itemKey)
betam[0][1] = item_B['feature_A']
betam[1][1] = item_B['feature_B']
betam[2][1] = item_B['feature_C']
# dataBytesFeatures += item_B['data_bytes']
# numSensors += item_B['number_of_sensors']
# Fetch the aggregated data from Gateway C
table_C = Table('sensingdata_C')
itemKey = {'forum' : 'roomC', 'subject' : 'sensorC'}
item_C = table_C.getItem(itemKey)
aggregatedData = item_C['aggregated_data']
#numSensors += item_C['number_of_sensors']
datanum = len(aggregatedData)
X = np.zeros((datanum,featurenum))
y = np.zeros((datanum,1))
for i in range(datanum):
X[i][0] = aggregatedData[i]['X_1']
X[i][1] = aggregatedData[i]['X_2']
X[i][2] = aggregatedData[i]['X_3']
y[i][0] = aggregatedData[i]['Y']
#data_bytes = item_C['data_bytes']
def prox_simplex(y):
# projection onto simplex
n = len(y)
val = -np.sort(-y)
suppt_v = np.cumsum(val) - np.arange(1, n+1, 1) * val
k_act = np.sum(suppt_v < 1)
lam = (np.sum(val[0:k_act]) - 1.0) / k_act
x = np.maximum(y-lam, 0.0)
return x
def combine(y, X, betam):
K = betam.shape[1]
w = np.ones((K,)) / K
maxit = 1000
tol = 1e-3
Xb = np.dot(X, betam)
step = 1.0 / np.max(np.linalg.svd(Xb, full_matrices=0, compute_uv=0)) ** 2
for it in range(maxit):
prev_w = np.copy(w)
res = y - np.dot(np.matrix(Xb), np.matrix(w).T)
grad = -np.dot(np.matrix(Xb).T, np.matrix(res))
w -= step * np.squeeze(np.asarray(grad.T))
w = prox_simplex(w)
if np.linalg.norm(w - prev_w) / (1e-20 + np.linalg.norm(prev_w)) < tol:
break
return w
w = combine(y, X, betam)
w_temp = [decimal.Decimal(str(w[i])) for i in range(collectornum)]
wb = np.dot(np.matrix(betam), np.matrix(w).T)
Predict_y = np.dot(np.matrix(X), wb)
Predict_y_array = np.squeeze(np.asarray(Predict_y))
MSE = np.sqrt(np.sum((y-np.squeeze(np.asarray(Predict_y))) ** 2)) / datanum
MSE_temp = decimal.Decimal(str(MSE))
tEnd = time.time()
Lambda_ExecTime = tEnd - tStart
tEnd_temp = decimal.Decimal(str(tEnd))
Lambda_ExecTime_temp = decimal.Decimal(str(Lambda_ExecTime))
Predict_y_array = Predict_y_array.tolist()
y = y.tolist()
for i in range(len(Predict_y_array)):
y[i] = decimal.Decimal(str(y[i][0]))
Predict_y_array[i] = decimal.Decimal(str(Predict_y_array[i]))
table = Table('weightresult')
resultData = {
'environment' : 'roomA',
'sensor': 'sensorA&B&C',
'w_1' : w_temp[0],
'w_2' : w_temp[1],
'Prediction' : Predict_y_array,
'Real_Data' : y,
'Error' : MSE_temp,
'Lambda_ExecTime' : Lambda_ExecTime_temp,
'Time': tEnd_temp
}
item = table.addItem(resultData)
# Record this run
resultData.pop('environment', None)
resultData.pop('sensor', None)
resultData.pop('Prediction', None)
resultData.pop('Real_Data', None)
record = table.getItem({'environment' : 'roomA', 'sensor' : 'expResults'})
results = record['results']
results.append(resultData)
item = table.addItem(record)
lambda_handler(35, 46)
| true
|
86b40bb6f222b56c53fe5ed229d1eb5f5672e54e
|
Python
|
sds1vrk/Algo_Study
|
/Programers_algo/Greedy/pro_3_re_re.py
|
UTF-8
| 718
| 2.875
| 3
|
[] |
no_license
|
def solution(number, k):
num_size = len(number) - k
# 10-4 =6 개
start = 0
answer = ""
for i in range(num_size):
max_num = number[start]
max_idx = start
for j in range(start, k + i + 1):
if max_num < number[j]:
max_num = number[j]
max_idx = j
# print("max_num",max_num)
# 10번 케이스 시간초과 발생 방지
if max_num=="9":
break
# print("test")
start = max_idx + 1
answer += max_num
# print(answer)
# answer = ''
return answer
# solution("1924",2)
# solution("1231234", 3)
solution("4177252841",4)
| true
|
0207b26718e9f675382aef505275fd39c2436b06
|
Python
|
smadala/IIIT-PG11
|
/sce/slides/python/pra/Python_example_scripts/Exceptions/try_finally.py
|
UTF-8
| 215
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/python
# demo of the try...finally construct
try:
n=float(raw_input('Enter your number:'))
double = 2 * n
finally:
print 'Who can stop me from executing?'
print 'Double=', double
| true
|
468ea90b75a3af854757b51bb899f4e1c39c5d7d
|
Python
|
checheanya/HSE_bioinformatics
|
/HW3/needle_lin.py
|
UTF-8
| 639
| 2.84375
| 3
|
[] |
no_license
|
a = [i for i in (input()).upper()]
b = [i for i in (input()).upper()]
match, mut, gap = 5, -4, -10
leng = len(a) + 1
high = len(b) + 1
matrix = []
for i in range(high):
zero_row = [0] * leng
matrix.append(zero_row)
matrix[0] = [i * (-10) for i in range(leng)]
for i in range(1, high):
matrix[i][0] = i * (-10)
for i in range(1, high):
for j in range(1, leng):
if a[j - 1] == b[i - 1]:
step = match
else:
step = mut
matrix[i][j] = max(matrix[i - 1][j - 1] + step, matrix[i][j - 1] + gap, matrix[i - 1][j] + gap)
for j in matrix:
print(*j)
| true
|
9ed3f7bc024c4bbf3215190349941a598c3706e4
|
Python
|
Ahmad-Shafique/Python-Problem-Solving
|
/Problem solutions/11.py
|
UTF-8
| 244
| 3.25
| 3
|
[] |
no_license
|
def QuestionEleven():
Input=input()
List = Input.split(",")
resultList = []
for item in List:
ni=item
if(number_conversion_helper.convertToBase(ni,2,10)%5==0):
resultList.append(item)
print(",".join(resultList))
QuestionEleven()
| true
|
a002983634b80e6af5297e9d3d431b54c595e8ef
|
Python
|
deprofundis/deprofundis
|
/datasets.py
|
UTF-8
| 662
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import pandas as pd
from ipdb import set_trace as pause
class Dataset(dict):
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def load_mnist(filen='../data/mnist_train.csv.gz', nrows=None):
"""
Reads in the MNIST dataset and returns a Dataset object which holds the data
along with metadata from the files specified
"""
panda = pd.read_csv(filen, delimiter=',', dtype=int, header=None, nrows=nrows,
compression=('gzip' if filen.endswith('.gz') else None))
data = panda.values # numpy array
return Dataset(X=data, name='mnist', filen=filen)
| true
|
690bc3a4236dc41f21e40655bcd123bc7eaefbd9
|
Python
|
wolfdale/Imgur-Image-Ripper
|
/imager.py
|
UTF-8
| 663
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
import urllib
from bs4 import BeautifulSoup
print 'Welcome to Imager'
a=int(raw_input("Enter Number of Images to be scratched: --> "))
for i in range(0,a):
web=urllib.urlopen('http://imgur.com/random')
soup=BeautifulSoup(web)
##soup.prettify()
for link in soup.find_all('img'):
if(0==0):##loop breaker as url we are searching is the first to occur in web page
img=link.get('src')
break
s='http:'
img=s+img
if "loader" in img: continue ## Filtering url
print img
urllib.urlretrieve(img, 'file'' '+ str(i+1) +'.jpg')##Can not concatenate 'string' object with 'int' thus use str(i)
| true
|
243f4942284e3e5adabf3a22c2e3bf5b472cdf47
|
Python
|
Py-Za/basics02
|
/homework02_part2.py
|
UTF-8
| 1,250
| 4.375
| 4
|
[] |
no_license
|
# Zadanie 2:
# Utwórz w nim klasę o dowolnej, sensownej nazwie. Klasa powinna zawierać pole counter, możliwe do ustawienia dla każdej instancji przy tworzeniu obiektu (jako argument w funkcji __init__()).
# Klasa powinna implementować bezargumentową metodę raise_counter(), która zwiększa pole counter w obiekcie o jeden.
# W script02.py napisz funkcję (nie metodę) przyjmującą dwa argumenty. Wewnątrz funkcji stwórz obiekt klasy utworzonej w punkcie drugim, o początkowej wartości pola counter równej pierwszemu argumentowi funkcji. W funkcji podnieś counter obiektu do wysokości wartości drugiego argumentu funkcji, przez wielokrotne wywołanie metody raise_counter. Po każdym wywołaniu metody raise_counter wyprintuj wartość pola counter dla obiektu. Zastosuj do tego odpowiednią pętlę. Dla uproszczenia możemy założyć, że argumenty funkcji są zawsze liczbowe i pierwszy jest zawsze co najmniej o jeden mniejszy od drugiego.
class Herd:
def __init__(self, counter):
self.counter = counter
def raise_counter(self):
self.counter += 1
def count_sheep(x, y):
sheep = Herd(x)
while sheep.counter < y:
sheep.raise_counter()
print(sheep.counter)
count_sheep(1, 5)
| true
|
cbf8a66d925b21569cc1e1d9e63db9934466253d
|
Python
|
ldhjj77/ldhjj77.github.io
|
/python/A/A025_Stock.py
|
UTF-8
| 2,694
| 2.859375
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import urllib.request as req
import urllib.request
###################### 환율
def Exchange_Rate():
url = 'https://finance.naver.com/marketindex/'
res = req.urlopen(url)
soup = BeautifulSoup(res,'html.parser', from_encoding='euc-kr')
name_nation = soup.select('h3.h_lst > span.blind')
name_price = soup.select('span.value')
i = 0
for c_list in soup:
try:
print(i+1, name_nation[i].text, name_price[i].text)
i = i + 1
except IndexError:
pass
##################### 코스피 지수
def kospi():
url = "https://finance.naver.com/sise/sise_index.nhn?code=KOSPI"
fp = urllib.request.urlopen(url)
source = fp.read()
fp.close()
soup = BeautifulSoup(source, 'html.parser')
soup = soup.find_all('div', attrs={'id':'quotient'})
kos = soup[0].get_text().strip()
print()
print('코스피 지수 : ' + kos)
##################### 코스닥 지수
def kosdaq():
url = "https://finance.naver.com/sise/sise_index.nhn?code=KOSDAQ"
fp = urllib.request.urlopen(url)
source = fp.read()
fp.close()
soup = BeautifulSoup(source, 'html.parser')
soup = soup.find_all('div', attrs={'id':'quotient'})
kos = soup[0].get_text().strip()
print()
print('코스닥 지수 : ' + kos)
#################### 다우지수
def dau():
url = "https://finance.naver.com/world/sise.nhn?symbol=DJI@DJI"
fp = req.urlopen(url)
source = fp.read()
fp.close()
soup = BeautifulSoup(source, 'html.parser')
soup = soup.findAll("em")
# print(soup)
dau = soup[2].get_text().strip()
dau1 = soup[3].get_text().strip()
dau2 = soup[4].get_text().replace("\n", "").strip()
print()
print('다우지수 : '+ dau )
print('전일대비 : '+ dau1 + dau2)
#################### 나스닥 지수
def nasdaq():
url = "https://finance.naver.com/world/sise.nhn?symbol=NAS@IXIC"
fp = req.urlopen(url)
source = fp.read()
fp.close()
soup = BeautifulSoup(source, 'html.parser')
soup = soup.findAll("em")
# print(soup)
dau = soup[2].get_text().strip()
dau1 = soup[3].get_text().strip()
dau2 = soup[4].get_text().replace("\n", "").strip()
print()
print('나스닥지수 : '+ dau )
print('전일대비 : '+ dau1 + dau2)
if __name__ == '__main__': # scrape_weather()라는 함수가 같은파일(A020_project.py)안에 있다면 실행하게
Exchange_Rate()
kospi()
kosdaq()
dau()
nasdaq()
| true
|
00a74c6fdab1966e8905b5af3cb3547024e23714
|
Python
|
recycledbeans/PigeonPi
|
/check_follows.py
|
UTF-8
| 1,677
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# Import all of the necessary modules
import os
import sys
import tweepy
import pygame
from credentials import * # <---- Be sure to put your Twitter application's credentials here
# Tweepy OAuth (Authentication)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Initiate the Tweepy API class
api = tweepy.API(auth)
# Making sure we have entered a username argument
try:
username = sys.argv[1]
except Exception:
print "Don't forget to supply a username!"
exit(0)
# print username
# exit(0)
# Tweepy calls the get_user Twitter API call
me = api.get_user(username);
# This is the number of our followers (in string form)
followers = me.followers_count
newusername = False
file_name = os.path.dirname(os.path.abspath(__file__)) + '/followers/' + username + '.txt';
if os.path.isfile(file_name) is False:
newusername = True
# Open up a file for caching the username's followers
if newusername == True:
file_ = open(file_name, 'w')
else:
file_ = open(file_name, 'r+')
# How many followers did we have the last time we checked?
if newusername == True:
old_count = "0"
else:
old_count = file_.read(10)
# If we have more followers:
if int(followers) > int(old_count):
# Show it on the terminal screen
print "New Follower!"
# Play the including mp3 file
pygame.mixer.init()
pygame.mixer.music.load(os.path.dirname(os.path.abspath(__file__)) + "yay.mp3")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
# Overwrite the old amount of followers with the new amount
file_.seek(0)
file_.truncate()
file_.write(str(followers))
file_.close()
| true
|
bab766b2e8fa59431a3d183308b4f965b21dd5c5
|
Python
|
ArdaCemBilecan/PythonProjects
|
/MatPlobLib.py
|
UTF-8
| 2,437
| 3.53125
| 4
|
[] |
no_license
|
import matplotlib.pyplot as plt
import pandas as pd
# matplotlib kutuphanesi
# gorsellestime kotuphanesi
# line plot, scatter plot, bar plot, subplots, histogram
df = pd.read_csv("iris.csv")
print(df.columns)
print(df.Species.unique())
print(df.info())
print(df.describe())
setosa = df[df.Species == "Iris-setosa"]
versicolor = df[df.Species == "Iris-versicolor"]
print(setosa.describe())
print(versicolor.describe())
# %%
df1 = df.drop(["Id"],axis=1)
setosa = df[df.Species == "Iris-setosa"]
versicolor = df[df.Species == "Iris-versicolor"]
virginica = df[df.Species == "Iris-virginica"]
plt.plot(setosa.Id,setosa.PetalLengthCm,color="red",label= "setosa")
plt.plot(versicolor.Id,versicolor.PetalLengthCm,color="green",label= "versicolor")
plt.plot(virginica.Id,virginica.PetalLengthCm,color="blue",label= "virginica")
plt.legend()
plt.xlabel("Id")
plt.ylabel("PetalLengthCm")
plt.show()
df1.plot(grid=True,alpha= 0.9)
plt.show()
#%% scatter plot
setosa = df[df.Species == "Iris-setosa"]
versicolor = df[df.Species == "Iris-versicolor"]
virginica = df[df.Species == "Iris-virginica"]
plt.scatter(setosa.PetalLengthCm,setosa.PetalWidthCm,color="red",label="setosa")
plt.scatter(versicolor.PetalLengthCm,versicolor.PetalWidthCm,color="green",label="versicolor")
plt.scatter(virginica.PetalLengthCm,virginica.PetalWidthCm,color="blue",label="virginica")
plt.legend()
plt.xlabel("PetalLengthCm")
plt.ylabel("PetalWidthCm")
plt.title("scatter plot")
plt.show()
# %% histogram
plt.hist(setosa.PetalLengthCm,bins= 50)
plt.xlabel("PetalLengthCm values")
plt.ylabel("frekans")
plt.title("hist")
plt.show()
# %% bar plot
import numpy as np
#x = np.array([1,2,3,4,5,6,7])
#
#y = x*2+5
#
#plt.bar(x,y)
#plt.title("bar plot")
#plt.xlabel("x")
#plt.ylabel("y")
#plt.show()
x = np.array([1,2,3,4,5,6,7])
a = ["turkey","usa","a","b","v","d","s"]
y = x*2+5
plt.bar(a,y)
plt.title("bar plot")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# %% subplots
df1.plot(grid=True,alpha= 0.9,subplots = True)
plt.show()
setosa = df[df.Species == "Iris-setosa"]
versicolor = df[df.Species == "Iris-versicolor"]
virginica = df[df.Species == "Iris-virginica"]
plt.subplot(2,1,1)
plt.plot(setosa.Id,setosa.PetalLengthCm,color="red",label= "setosa")
plt.ylabel("setosa -PetalLengthCm")
plt.subplot(2,1,2)
plt.plot(versicolor.Id,versicolor.PetalLengthCm,color="green",label= "versicolor")
plt.ylabel("versicolor -PetalLengthCm")
plt.show()
| true
|
c9bef4a68e8bb9e29f66c29fb1770c765397afca
|
Python
|
yukikawana/PhotographicImageSynthesis
|
/vislog.py
|
UTF-8
| 369
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys
window = 1000
fl=sys.argv[1]
ind=int(sys.argv[2])
if(len(sys.argv)>3):
window=int(sys.argv[3])
frame = test_df = pd.read_csv(fl, header=None, skiprows=17, sep=" ")
print(frame.ix[0,0])
frame.ix[:,ind].rolling(window=window).mean().plot()
plt.show()
#plt.savefig("plot.png")
| true
|
8297af6d0cc8c85435642602c65c5cc171b4f6d4
|
Python
|
ItManHarry/Python
|
/PythonCSDN/code/chapter6/MultiExceptElse.py
|
UTF-8
| 196
| 3.578125
| 4
|
[] |
no_license
|
try:
a = int(input('Number A:'))
b = int(input('Number B:'))
print('a / b is : ', a / b)
except (ValueError, ArithmeticError) as e:
print(e)
print(type(e))
else:
print('Everything is OK!!!')
| true
|
bec5080cc20805dab29b6e27c9ec4a5d283da36b
|
Python
|
wesleyjr01/Harvard-CS50_2020
|
/cs50_IntroductionToCS/week07_SQL/submission/houses/import.py
|
UTF-8
| 1,385
| 2.921875
| 3
|
[] |
no_license
|
import sys
import csv
import cs50
# Create database
db = cs50.SQL("sqlite:///students.db")
if len(sys.argv) != 2:
print("we need argv=2")
sys.exit()
else:
with open(sys.argv[1], "r") as csvfile:
students_csv = csv.DictReader(csvfile)
for student in students_csv:
array_name = student["name"].split()
if len(array_name) == 2:
first_name = array_name[0]
last_name = array_name[1]
house = student["house"]
birth = student["birth"]
db.execute(
"INSERT INTO students (first, middle, last, house, birth) VALUES(?, ?, ?, ?, ?)",
first_name,
None,
last_name,
house,
birth,
)
elif len(array_name) == 3:
first_name = array_name[0]
middle_name = array_name[1]
last_name = array_name[2]
house = student["house"]
birth = student["birth"]
db.execute(
"INSERT INTO students (first, middle, last, house, birth) VALUES(?, ?, ?, ?, ?)",
first_name,
middle_name,
last_name,
house,
birth,
)
| true
|
ce202115737a9b7115e6371602dbde02dcdde5e9
|
Python
|
WellingtonTorres/PythonExercicios
|
/ex035.py
|
UTF-8
| 436
| 3.984375
| 4
|
[] |
no_license
|
from time import sleep
print('=-='*10)
print('ANALISANDO UM TRIÂNGULO')
print('=-='*10)
a = float(input('Primeiro segmento: '))
b = float(input('Segundo segmento: '))
c = float(input('Terceiro segmento: '))
print("Analisando se as condições foram verdadeiras...")
sleep(3)
if a < b + c and b < a + c and c < a + b:
print('Os segmentos podem formar um triângulo!')
else:
print('Os segmentos não pode formar um triângulo!')
| true
|
5846e5df9180db5d72a1172570b2cede2f94d469
|
Python
|
blockheads/ConquerorGame
|
/NPC/NordicHuman.py
|
UTF-8
| 894
| 2.796875
| 3
|
[] |
no_license
|
import codecs
import sys
import Sprites
from NPC.Npc import Npc, DATA_PATH
from util import Reader
class NordicHuman(Npc):
def __init__(self):
super().__init__(self.genName(),Sprites.NPC_H)
"""
Generates a nordic human name
"""
def genName(self):
# ensure proper encoding
# read file, needs to be encoded by utf_8
f = codecs.open(DATA_PATH + "NORDIC_H.txt", encoding='utf_8')
firstName = Reader.random_line(f)
firstName = firstName.rstrip()
f = codecs.open(DATA_PATH + "NORDIC_H.txt", encoding='utf_8')
lastName = Reader.random_line(f)
lastName = lastName.rstrip()
return firstName + " " + lastName
if __name__ == '__main__':
# if running from this directory we start at data rather than root
DATA_PATH = "data/"
nordichuman = NordicHuman()
print(nordichuman.name)
| true
|
4962ebfec97c2c0cfbaba6d518103ffcec6bd560
|
Python
|
dbarbella/analogy
|
/finding_analogies/fixes.py
|
UTF-8
| 720
| 2.65625
| 3
|
[] |
no_license
|
#array containing regular expressions for common scanning errors and the corrected strings
#TODO add RE for <end of sentence .Begin of next sentence> you'd and other would
fixes = [("\s*'\s*t\s*", "'t "),
("\s*'\s*ve\s*", "'ve "),
("\s*'\s*s\s*", "'s "),
("s\s*'\s*", "s' "),
("\s*I\s*'\s*m\s*", "I'm "),
("\s*'\s*nt\s*", "n't "),
("\s*\.{3}s*", "... "),
("\s*\?\s*", "? "),
("\s*!\s*", "! "),
("\s*,\s*", ", "),
("\s*\.\s*", ". "),
("\s*;\s*", "; "),
("\s*:\s*", ": "),
("s\s*’\s*", "s' "),
("\s*\(\s*", "("),
("\s*\)\s*", ")"),
("you\s*\'\s*d",'you\'d')]
| true
|
633a15c4c9701aeb95b4bd4e28531de25691a49e
|
Python
|
Voidoz/PyDecode
|
/Decode.py
|
UTF-8
| 1,300
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#############################
# Import dependencies #
#############################
import html
import re
#############################
# Print intro #
#############################
print(str(
"######################################################\n" +
"# Welcome to PyDecode! #\n" +
"######################################################\n" +
"# Please note that this program can only decode HTML #\n" +
"######################################################\n"))
#############################
# Get information from user #
#############################
fileName = str("./" + str(input("What is the name of the file you wish to decode?: ")))
outputFileName = str("./" + str(input("What would you like to name the file containing the result?: ")) + ".txt")
#############################
# Validate fileName #
#############################
while fileName.endswith(".html") == False:
print("Detected invalid information!\nYou must add .html to the end of the file you wish to decode!")
fileName = str("./" + str(input("Please try again: ")))
#############################
# Decode HTML file and #
# put result in .txt file #
#############################
print()
input("Press any key to close...")
| true
|
47880ce7cc3084c123c5b69b5bfb222f85e9e689
|
Python
|
FergusInLondon/Runner
|
/test/runner.py
|
UTF-8
| 3,377
| 2.90625
| 3
|
[] |
no_license
|
import unittest
import json
import pandas as pd
from unittest.mock import patch
from runner import Runner
def get_payload(fixture):
with open(f"test/fixtures/{fixture}.json") as json_file:
data = json.load(json_file)
return data
class MockStrategy(object):
def start(self, control):
self.control = control
example_kline = {
'e': '24hrTicker',
'E': 1580770073221,
's': 'BTCUSDT',
'p': '-117.45000000',
'P': '-1.250',
'w': '9362.96596369',
'x': '9394.08000000',
'c': '9276.63000000',
'Q': '0.01075500',
'b': '9275.46000000',
'B': '0.26951100',
'a': '9276.63000000',
'A': '0.00002400',
'o': '9394.08000000',
'h': '9618.79000000',
'l': '9234.00000000',
'v': '52160.24254000',
'q': '488374575.55996477',
'O': 1580683673213,
'C': 1580770073213,
'F': 238122539,
'L': 238624515,
'n': 501977
}
class TestRunner(unittest.TestCase):
@patch('runner.Client', autospec=True)
@patch('runner.BinanceSocketManager', autospec=True)
def test_runner_init(self, socket_mock, client_mock):
"""
Ensure that the runner initialises by (a) creating a Binance API client,
(b) retrieving account details, and (c) starting a User Websocket Conn.
"""
client_instance = client_mock.return_value
client_instance.get_account.return_value = get_payload("account")
r = Runner("apiKey", "apiSecret", "symbol", None)
client_mock.assert_called_once_with("apiKey", "apiSecret")
client_instance.get_account.assert_called_once()
socket_mock.return_value.start_user_socket.assert_called_once()
@patch('runner.Client', autospec=True)
@patch('runner.BinanceSocketManager', autospec=True)
def test_runner_run(self, socket_mock, client_mock):
"""
Ensure that `.start()` is called on the strategy, that the correct args
are provided to the new streaming ticker socket, and that the socket is
correctly started.
"""
mock_strategy = MockStrategy()
client_instance = client_mock.return_value
client_instance.get_account.return_value = get_payload("account")
r = Runner("apiKey", "apiSecret", "symbolToMonitor", mock_strategy)
r.run()
self.assertEqual(mock_strategy.control, r)
call = socket_mock.return_value.start_symbol_ticker_socket.call_args_list
self.assertEqual(call[0][0][0], "symbolToMonitor")
socket_mock.return_value.start.assert_called_once()
@patch('runner.Client', autospec=True)
@patch('runner.BinanceSocketManager', autospec=True)
def test_parse_dataframe(self, socket_mock, client_mock):
"""
Ensure that inbound kline messages are correctly parsed in to Pandas
dataframes.
"""
df = Runner("apiKey", "apiSecret", "symbolToMonitor", MockStrategy()).parse_dataframe(example_kline)
self.assertFalse(df.empty)
self.assertEqual(df.shape, (1, 22))
self.assertEqual(df.index.name, "EventTime")
# a few random columns
self.assertEqual(df["OpenPrice"][0], '9394.08000000')
self.assertEqual(df["PriceChangePercent"][0], '-1.250')
self.assertEqual(df["LastQuantity"][0], '0.01075500')
if __name__ == "__main__":
unittest.main()
| true
|
e24a03057372bfacacdbac1b3441203a2e19f108
|
Python
|
syurskyi/Algorithms_and_Data_Structure
|
/Data Structures & Algorithms - Python/Section 6 Data Structures Stacks & Queues/src/48.SOLUTION-Queue-Enqueue.py
|
UTF-8
| 985
| 4.21875
| 4
|
[] |
no_license
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
class Queue:
def __init__(self, value):
new_node = Node(value)
self.first = new_node
self.last = new_node
self.length = 1
def print_queue(self):
temp = self.first
while temp is not None:
print(temp.value)
temp = temp.next
def enqueue(self, value):
new_node = Node(value)
if self.first is None:
self.first = new_node
self.last = new_node
else:
self.last.next = new_node
self.last = new_node
self.length += 1
my_queue = Queue(1)
print('Queue before enqueue(2):')
my_queue.print_queue()
my_queue.enqueue(2)
print('\nQueue after enqueue(2):')
my_queue.print_queue()
"""
EXPECTED OUTPUT:
----------------
Queue before enqueue(2):
1
Queue after enqueue(2):
1
2
"""
| true
|
9e09f102876f0e2ba8526b3fb55cd6989beb3c5e
|
Python
|
openvinotoolkit/mmaction2
|
/tools/data/hvu/merge_annot.py
|
UTF-8
| 4,028
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
from os import makedirs
from os.path import exists
from argparse import ArgumentParser
from collections import defaultdict
from tqdm import tqdm
def ensure_dir_exists(dir_path):
if not exists(dir_path):
makedirs(dir_path)
def get_valid_sources(all_sources):
return [s for s in all_sources if exists(s)]
def parse_hvu_records(data_sources):
assert len(data_sources) > 0
out_records = defaultdict(list)
for data_source in data_sources:
with open(data_source) as input_stream:
for line_id, line in enumerate(input_stream):
if line_id == 0:
continue
line_elements = line.strip().split(',')
if len(line_elements) != 4:
continue
tags, video_name, start, end = line_elements
tags = tags.split('|')
url = f'https://www.youtube.com/watch?v={video_name}'
segment_start = float(start)
segment_end = float(end)
out_records[video_name].append({
'url': url,
'start': segment_start,
'end': segment_end,
'tags': tags
})
return out_records
def parse_kinetics_records(data_sources):
assert len(data_sources) > 0
out_records = defaultdict(list)
for data_source in data_sources:
with open(data_source) as input_stream:
for line_id, line in enumerate(input_stream):
if line_id == 0:
continue
line_elements = line.strip().split(',')
if len(line_elements) != 5:
continue
label, video_name, start, end, _ = line_elements
url = f'https://www.youtube.com/watch?v={video_name}'
segment_start = float(start)
segment_end = float(end)
out_records[video_name].append({
'url': url,
'start': segment_start,
'end': segment_end,
'tags': [label],
})
return out_records
def merge_records(src_records, candidate_records):
def _is_same_segment(a, b):
intersect_start = max(a['start'], b['start'])
intersect_end = min(a['end'], b['end'])
return intersect_end > intersect_start
out_records = src_records
for video_name, segments in tqdm(candidate_records.items(), leave=False):
if video_name not in out_records.keys():
out_records[video_name] = segments
else:
cur_segments = out_records[video_name]
for candidate_segment in segments:
matches = [
True for cur_segment in cur_segments
if _is_same_segment(cur_segment, candidate_segment)
]
if len(matches) == 0:
out_records[video_name].append(candidate_segment)
return out_records
def main():
parser = ArgumentParser()
parser.add_argument('--hvu_sources', '-hi', nargs='+', type=str, required=True)
parser.add_argument('--kinetics_sources', '-ci', nargs='+', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
args = parser.parse_args()
ensure_dir_exists(args.output_dir)
hvu_data_sources = get_valid_sources(args.hvu_sources)
hvu_records = parse_hvu_records(hvu_data_sources)
print('Found {} HVU records.'.format(sum(len(l) for l in hvu_records.values())))
kinetics_data_sources = get_valid_sources(args.kinetics_sources)
kinetics_records = parse_kinetics_records(kinetics_data_sources)
print('Found {} Kinetics records.'.format(sum(len(l) for l in kinetics_records.values())))
merged_records = merge_records(hvu_records, kinetics_records)
print('Merged {} records.'.format(sum(len(l) for l in merged_records.values())))
if __name__ == '__main__':
main()
| true
|
51319ab10bd0f36bd1dd82e91e322c8c580d44b2
|
Python
|
enterstudio/bokeh
|
/examples/plotting/file/color_data_map.py
|
UTF-8
| 1,411
| 2.671875
| 3
|
[] |
permissive
|
import numpy as np
from bokeh.io import show
from bokeh.layouts import gridplot
from bokeh.models import (
ColumnDataSource,
ColorBar,
LinearColorMapper,
LogColorMapper,
)
from bokeh.palettes import Viridis3, Viridis256
from bokeh.plotting import figure
x = np.random.random(2500) * 140 - 20
y = np.random.normal(size=2500) * 2 + 5
source = ColumnDataSource(dict(x=x, y=y))
opts = dict(x='x', line_color=None, source=source)
def make_plot(mapper, title):
mapper.low_color = 'blue'
mapper.high_color = 'red'
p = figure(toolbar_location=None, tools='', title=title)
color_bar = ColorBar(color_mapper=mapper, location=(0, 0))
p.circle(
x='x', y='y',
fill_color={'field': 'x', 'transform': mapper}, line_color=None,
source=source
)
p.add_layout(color_bar, 'right')
return p
p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear, low/high = blue/red')
p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log, low/high = blue/red')
p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear, low/high = blue/red')
p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log, low/high =, blue/red')
show(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))
| true
|
14a2e0eedd37265cc952713f59fc7946bdeb5deb
|
Python
|
tectronics/geditcom-ii
|
/trunk/Scripts/Development/Miscellaneous/Decode Lat Lon.py
|
UTF-8
| 804
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/python
#
# Decode Lat Lon (Python Script for GEDitCOM II)
# Load GEDitCOM II Module
from GEDitCOMII import *
import math
################### Main Script
# Preamble
gedit = CheckVersionAndDocument("Decode Lat Lon",1.6,2)
if not(gedit) : quit()
gdoc = FrontDocument()
print str(GetScreenSize())
print str(GetMainScreenSize())
print str(kmPerLongitudeDegree(0.))
trials = ["-75.1697","45","-1"]
for i in range(len(trials)) :
print "Input: "+trials[i]
gc = GlobalCoordinate(trials[i])
if gc.error==None :
print " number: "+gc.signedNumber()
print " compass: "+gc.compassNumber()
print " dms: "+gc.dmsNumber(False,True).encode('utf-8')
print " dms: "+gc.dmsNumber(False,False).encode('utf-8')
else :
print " error: "+gc.error
| true
|
c0581477a822bff7002ab1b310d1063db236761a
|
Python
|
MaGabriela21/flujoenredes
|
/Tarea1/instancias.py
|
UTF-8
| 1,781
| 2.859375
| 3
|
[] |
no_license
|
basicUnit = []
neighbors = []
with open("2DU60-05-1.dat",'r') as archivo:
n = int(archivo.readline())
for i in range(n):
stringLine = archivo.readline()
splitLine = stringLine.split(" ")
valueList = [float(e) for e in splitLine]
index, x, y, a, b, c = valueList
basicUnit.append((x,y,a))
neigh = int(archivo.readline())
for i in range(neigh):
stringLine = archivo.readline()
splitLine = stringLine.split(" ")
valueList = [int(e) for e in splitLine]
neigh1, neigh2 = valueList
neighbors.append((neigh1,neigh2))
with open("basicUnits.dat", 'w') as archivo:
for i in range(n):
x,y,a = basicUnit[i]
print(x,y,a, file = archivo)
with open("instancias.plt",'w') as aristas:
print("set term png", file = aristas)
print("set output '2DU60-05-1.png'", file = aristas)
print("set pointsize 2", file = aristas)
print("unset arrow", file = aristas)
print("set style fill transparent solid 0.6",file = aristas)
print("unset colorbox", file = aristas)
print("set xrange [0:550]", file = aristas)
print("set yrange [0:550]", file = aristas)
# print("set size square", file = aristas)
# print("set palette model RGB defined ( 0 'red', 1 'green' )", file=aristas)
num = 1
for i in range(neigh):
neigh1, neigh2 = neighbors[i]
x1, y1,a = basicUnit[neigh1]
x2, y2,a = basicUnit[neigh2]
print("set arrow", num, "from", x1, "," ,y1," to ", x2, ",", y2, "nohead", file = aristas)
num +=1
print("show arrow", file = aristas)
print("plot 'basicUnits.dat' using 1:2:(2*sqrt($3-800)) with circles notitle", file = aristas)
| true
|
41d58bc6cb7d2370a6cea67fdc9b01b0a09fedb7
|
Python
|
b72uno/courses
|
/udacity/RND/perception/Exercise-1/RANSAC.py
|
UTF-8
| 4,420
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
# Import PCL module
import pcl
# Load Point Cloud file
cloud = pcl.load_XYZRGB('tabletop.pcd')
## Voxel Grid filter
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
# Note: this (1) is a poor choice of leaf size
# it implies that voxel is 1 cubic meter in volume
# so it may remove important features.
# Experiment and find the appropriate size!
# A good estimate size can be obtained by having some prior
# information about the scene, like the size of the smallest
# object or total volume of the scene in Field of View.
LEAF_SIZE = 0.01
# LEAF_SIZE = 0.01 # seems to work reasonably well for this dataset
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
filename = 'voxel_downsampled.pcd'
pcl.save(cloud_filtered, filename)
## PassThrough filter
# Create a PassThrough filter object
passthrough = cloud_filtered.make_passthrough_filter()
# Assign axis and range to the passthrough filter object
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
# Again, values depend on dataset, experiment.
# These work well for this dataset
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the point cloud
cloud_filtered = passthrough.filter()
filename = 'pass_through_filtered.pcd'
pcl.save(cloud_filtered, filename)
# While filtering gets rid of some noise, to make further progress
# we need to divide the point cloud in smaller objects
# based on some common property - shape, color, size or neighborhood.
# Enter segmentation
## RANSAC plane segmentation
# We will use a popular technique known as Random Sample Consensus
# or "RANSAC". RANSAC is an algorithm that can be used to identify
# points in dataset that belong to a particular model.
# It assumes that all of the data in a dataset is composed of both
# inliers and outliers - where inliers can be defined by a particular
# model with specific set of parameters, outliers do not fit that model
# and hence can be discarded.
# If you have a prior knowledge of a certain shape being present in a
# a given dataset, you can use RANSAC to estimate what pieces of the point
# cloud set belong to that shape by assuming a particular model.
# e.g. robot autonomous navigation - for collision avoidance with objects
# and to determine traversable terrain, ground plane segmentation is an
# important part of a mobile robot's perception toolkit.
# Create the segmentation object
seg = cloud_filtered.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Again, depends on dataset, experiment
max_distance = 0.01
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
## Extract inliers
# the filter above is frequently used along other techniques to
# obtain a subset of points from an input point cloud. Most object
# recognition algorithms return a set of indices associated with the
# points that form the identified target object.
# As a result, it is convenient to use the ExtractIndices filter to
# extract the pointcloud associated with the identified object.
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
filename = 'extracted_inliers.pcd'
pcl.save(extracted_inliers, filename)
# Save pcd for table
# pcl.save(cloud, filename)
# Extract outliers
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
filename = 'extracted_outliers.pcd'
pcl.save(extracted_outliers, filename)
# Save pcd for tabletop objects
## Filtering noise (not needed here, but will become useful)
# Create a filter object
outlier_filter = cloud_filtered.make_statistical_outlier_filter()
# Set the number of neighbouring points to analyze for any given points
outlier_filter.set_mean_k(50)
# Set threshold scale factor
x = 1.0
# Any point with a mean distance larger than global (mean distance + x * std_dev) will be considered outlier
outlier_filter.set_std_dev_mul_thresh(x)
# Finally call the filter function for magic
cloud_filtered = outlier_filter.filter()
| true
|
f4081a5394e8b58da9896046eb8ffaaa01e39dbb
|
Python
|
DaianeFeliciano/python-fatec
|
/atv40.py
|
UTF-8
| 547
| 4.21875
| 4
|
[] |
no_license
|
"""Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu aumento.
Para salários superiores a R$1250,00, calcule um aumento de 10%. Para os inferiores ou iguais, o aumento é de 15%."""
salario = float(input("Digite o salário: "))
if salario > 1250:
aumento = (salario*0.10)+salario #(salario*15/100)
print("Você obteve um aumento de 10%, ficou no total de {}".format(aumento))
else:
aumento = (salario*0.15)+salario
print("Você obteve um aumento de 15%, ficou no total de {}".format(aumento))
| true
|
f8125d14dce303f0c44e67c0a7999388ab1c73d1
|
Python
|
kafedra-bit/resela-plus
|
/resela/model/User.py
|
UTF-8
| 6,110
| 2.75
| 3
|
[] |
no_license
|
"""
User.py
*******
"""
import json
from flask import session as flask_session
from flask_login import UserMixin, AnonymousUserMixin
from keystoneauth1 import session
from keystoneclient.auth.identity import v3
from resela.app import APP, LOGIN_MANAGER
from resela.backend.managers.UserManager import UserManager
class User(UserMixin):
"""Store user information.
Used by Flask-Login to represent the current user (`flask.current_user`).
"""
def __init__(self, user_id=None, email=None, token=None, name=None,
surname=None, role=None, session=None):
# TODO(vph): `new_token` is a makeshift argument. It should supplant
# TODO(vph): the `token` once we've migrated to Flask-Login.
# TODO(vph): Its type is a `dict`, containing 'X-Auth-Token'.
"""
:param user_id: A user's id. Corresponds to OpenStack user ids.
:type user_id: `str`
:param email: A user's e-mail address.
:type email: `str`
:param token: A database token, to retrieve a user's hashed password.
:type token: `str`
:param name: A user's name.
:type name: `str`
:param surname: A user's surname.
:type surname: `str`
:param role: A user's role.
:type role: `str`
"""
self.user_id = user_id
self.email = email
self.token = token
self.name = name
self.surname = surname
self.role = role
if token is not None:
sess = authenticate(token)
self.session = sess
else:
self.session = session
@property
def full_name(self):
"""Retrieve a user's full name."""
return self.name + ' ' + self.surname
def get_id(self):
"""Retrieve the user's id.
This function is required by Flask-Login. The default implementation
of `get_id` retrieves the attribute `id`, which this user
implementation does not have.
"""
return self.user_id
class AnonymousUser(AnonymousUserMixin, User):
"""An un-authenticated user, as prescribed by Flask-Login.
The inheritance from both `AnonymousUserMixin` and `User` is necessary to
possess all attributes that characterize a user, and still have all have
the special attribute prescribed by Flask-Login set to the correct values.
"""
def __init__(self):
super().__init__()
def authenticate(credentials, user_domain_name='Default',
project_domain_name='Default', project_name='Default'):
"""Authenticate a user with either a password or a token.
The occasion on which one authenticates with a username-password pair is
the initial login. All future calls should authenticate with the token
received from OpenStack.
:param credentials: Login credentials.
:type credentials: `dict`, either {'username': x, 'password': y } or {'X-Auth-Token': z}
:param user_domain_name: User's domain name for authentication.
:type user_domain_name: `str`
:param project_domain_name: Project's domain name for project.
:type project_domain_name: `str`
:param project_name: Project name for project scoping.
:type project_name: `str`
:return: An authenticated OpenStack session.
:rtype: `keystone1.session.Session`
:raise: TypeError: No authentication credentials were provided.
:raise: keystoneauth1.exceptions.http.Unauthorized: Authentication \
failed.
"""
if 'username' in credentials and 'password' in credentials:
username = credentials['username']
password = credentials['password']
auth = v3.Password(
auth_url=APP.iniconfig.get('openstack', 'keystone'),
username=username,
password=password,
project_name=project_name,
project_domain_name=project_domain_name,
user_domain_name=user_domain_name
)
# The token returned by OpenStack through `get_auth_headers()` is set
# in a `X-Auth-Token` field.
elif 'X-Auth-Token' in credentials:
token = credentials['X-Auth-Token']
auth = v3.Token(
auth_url=APP.iniconfig.get('openstack', 'keystone'),
token=token,
project_name=project_name,
project_domain_name=project_domain_name
)
else:
# TODO: Make a custom exception.
raise TypeError('No credentials provided.', credentials)
cert_path = APP.iniconfig.get('openstack', 'cert_path')
sess = session.Session(auth=auth, verify=cert_path)
# Check if authentication succeeds. Raises an error upon failure.
sess.get_token()
return sess
@LOGIN_MANAGER.user_loader
def load_user(user_id):
"""Load a user to be set as the `current_user`.
According to the specification, `None` should be returned when
a user with the provided user id cannot be retrieved. A return value of
`None` will invalidate the Flask session, and Flask-Login will discard it,
forcing the user to re-authenticate.
:param user_id: User id of the user to be retrieved. Corresponds to the
IDs used in OpenStack.
:type user_id: `str`
:return: An user object corresponding to the user id.
:rtype: `model.User` or `None`
"""
try:
token = json.loads(flask_session['session'])
os_session = authenticate(credentials=token)
user_m = UserManager(session=os_session)
user = user_m.get(user=user_id)
session_user_kwargs = {
'user_id': user.id,
'email': user.name,
'name': user.first_name,
'surname': user.last_name,
'role': os_session.auth.auth_ref['roles'][0]['name'],
'token': json.loads(flask_session['session'])
}
return User(**session_user_kwargs)
except:
# TODO(vph): Add debug log message.
return None
# TODO(vph): Moved from `app.py` so that it does not cause import cycles;
# TODO(vph): Should be moved somewhere else...
LOGIN_MANAGER.anonymous_user = AnonymousUser
| true
|
26190ef4267c3f7880a815d16081ae770bf4fe8f
|
Python
|
kapitsa2811/STN-OCR-Tensorflow
|
/src_code/models/resnet_stn.py
|
UTF-8
| 3,317
| 2.734375
| 3
|
[] |
no_license
|
"""
The script is the implementation of Resnet detection(Localisation network) and Recognition Network
"""
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from src_code.models.resnet_tf import ResnetModel_18_34
kernel_regularizer = regularizers.l1_l2(l1=1e-4, l2=1e-4)
class StnOcr(ResnetModel_18_34):
def __init__(self, input, nb_classes, detection_filter, recognition_filter):
"""
:param input: input image
:param nb_classes: number of charchters
:param detection_filter: detection network filter sizes
:param recognition_filter: recognition network filter size
"""
super(StnOcr, self).__init__(input_shape=input, nb_classes=nb_classes)
self.num_labels = 3
self.num_steps = 1
self.detection_filter = detection_filter
self.recognition_filter = recognition_filter
def resnetDetRec(self,sampled_image=None,flag='detection'):
if flag == 'detection':
filter = self.detection_filter
inp = self.input
name='det'
else:
filter = self.recognition_filter
inp = sampled_image
name='rec'
print(filter[0], filter[1], filter[2])
with tf.name_scope(name) as scope:
inp = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=kernel_regularizer,name=scope+'/conv2d0/')(inp)
inp = layers.BatchNormalization(name=scope+'/conv2d0_bn/')(inp)
inp = layers.AvgPool2D(strides=2,name=scope+'/conv2d0_avgPooling/')(inp)
inp = self.residualNet(inp=inp, filter=filter[0], size=3, stride=1, projection=False, name=scope+f'Conv2d_block1/')
inp = self.residualNet(inp=inp, filter=filter[1], size=3, stride=1, projection=True, name=scope+f'Conv2d_block2/')
inp = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(inp)
inp = self.residualNet(inp=inp, filter=filter[2], size=3, stride=1, projection=True, name=scope+f'Conv2d_block3/')
inp = layers.AvgPool2D(pool_size=5)(inp)
inp = layers.Flatten()(inp)
if flag == 'detection':
inp = layers.Reshape((self.num_steps, -1))(inp)
inp = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(inp)
theta = layers.TimeDistributed(layers.Dense(6, activation='sigmoid'))(inp)
return theta
else:
inp = layers.Dense(256, activation='relu')(inp)
classifiers = []
for i in range(self.num_labels):
inp = layers.Dense(11)(inp)
inp = layers.Reshape((self.num_steps, -1, 11))(inp)
inp = tf.expand_dims(inp, axis=1)
classifiers.append(inp)
inp = layers.concatenate(classifiers, axis=1)
inp = layers.Reshape((-1, 11))(inp)
inp = tf.keras.activations.softmax(inp)
print(inp.shape)
return inp
if __name__ == "__main__":
detection_filter = [32, 48, 48]
recognition_filter = [32,64,128]
stn_obj = StnOcr((128, 128, 1), 10, detection_filter,recognition_filter)
stn_obj.resnetDetRec('detection')
| true
|
6344e550031f9b7787efb21553d7b0b8db5cf976
|
Python
|
shailenderacc/PythonProg
|
/ifProgramFlow/ifprogramflow.py
|
UTF-8
| 313
| 3.484375
| 3
|
[] |
no_license
|
_author_ = 'shail'
name = input("Please provide your name :")
age = int(input("Please provide your age, {0} :".format(name)))
if age >= 18:
print("You are old enough to vote {0}".format(name))
print("Please put X in the ballot box")
else:
print("Please come back after {0} years".format(18 - age))
| true
|
7cd01abfa2650beb236195e93e419ac7e9477f49
|
Python
|
dyeap-zz/CS_Practice
|
/Leetcode/79.py
|
UTF-8
| 1,560
| 3.546875
| 4
|
[] |
no_license
|
'''
var:
1. use a dictionary {let:adjacent letters}
1. go through all letters in grid.
if match first letter call search
2.
valid (row,col,grid)
search (word, index, row, col,grid )
1. base case:
return True
2. for row in (-1,1)
for col in (-1,1)
if valid(row,col,grid)
recurse
return False
'''
class Solution:
def exist_helper(self, word, index, row, col, grid):
if index >= len(word):
return True
if (row < 0 or row >= len(grid)) or (col < 0 or col >= len(grid[0])) or (grid[row][col] != word[index]):
return False
# on board visited
save_letter = grid[row][col]
grid[row][col] = "#"
res = self.exist_helper(word, index + 1, row + 1, col, grid) or self.exist_helper(word, index + 1, row - 1, col,
grid) or self.exist_helper(
word, index + 1, row, col + 1, grid) or self.exist_helper(word, index + 1, row, col - 1, grid)
save_letter = grid[row][col]
return res
def exist(self, board, word: str) -> bool:
if len(board) == 0:
return False
for row in range(len(board)):
for col in range(len(board[0])):
if self.exist_helper(word, 0, row, col, board[:][:]):
return True
return False
board =[["C","A","A"],["A","A","A"],["B","C","D"]]
word = "AAB"
sol = Solution()
print(sol.exist(board,word))
| true
|
bf16b415385eeb08a4b6bbdd8f67bbf15db3b757
|
Python
|
shahed-shd/Online-Judge-Solutions
|
/Codeforces/1100C - NN and the Optical Illusion.py
|
UTF-8
| 602
| 3.34375
| 3
|
[] |
no_license
|
# ==================================================
# Problem : 1100C - NN and the Optical Illusion
# Run time : 0.109 sec.
# Language : Python 3.7.2
# ==================================================
import sys
import math
def main():
# sys.stdin = open("in.txt", "r")
# sys.stdout = open("out.txt", "w")
it = iter(map(float, sys.stdin.read().split()))
n = next(it)
r = next(it)
A = 2 * math.pi / n
B = (math.pi - A) / 2
K = math.sin(B) / math.sin(A)
x = r / (2*K - 1)
sys.stdout.write('%.7f' % x)
if __name__ == '__main__':
main()
| true
|
931ff1b83959d3d3e12c1c5936be0822ef045202
|
Python
|
VerifierIntegerAssignment/DailyBackUp
|
/2/test1.py
|
UTF-8
| 331
| 2.765625
| 3
|
[] |
no_license
|
import wadze
with open('test/test1.wasm', 'rb') as file:
data = file.read()
module = wadze.parse_module(data)
# If you also want function code decoded into instructions, do this
module['code'] = [ wadze.parse_code(c) for c in module['code']]
for exp in module['code']:
for inst in exp.instructions:
print(inst)
| true
|
97ef5b51bd3c3f2d6a8b1e30de3bb7f0fcabdc9a
|
Python
|
mariotto1/CPI
|
/classifiers.py
|
UTF-8
| 3,609
| 2.5625
| 3
|
[] |
no_license
|
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import keras as kr
import numpy
import utils
import preprocessing
import configuration as conf
nn_layers = {
'lstm': kr.layers.LSTM,
'gru': kr.layers.GRU,
'dense': kr.layers.Dense
}
optimizers = {
'sgd': kr.optimizers.SGD,
'adam': kr.optimizers.Adam
}
def mlp(train, test):
model = kr.models.Sequential()
# model.add(kr.layers.Conv1D(2,5,strides=1, padding='valid',input_shape=(train.shape[1]-1)))
model.add(kr.layers.InputLayer(input_shape=(train.shape[1] - 1,)))
for layer in conf.mlp_layers:
model.add(nn_layers[layer['type']](**layer['params']))
model.add(kr.layers.Dense(units=len(conf.damage_types) + 1, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer=optimizers[conf.optimizer['type']](**conf.optimizer['params']))
model.fit(train[:, :-1], kr.utils.np_utils.to_categorical(train[:, -1]), epochs=conf.epochs,
batch_size=conf.batch_size, shuffle=True)
print "Evaluating testing set..."
confidences = model.predict(test[:, :-1])
predictions = [numpy.argmax(x) for x in confidences]
return confidences, predictions, test[:, -1].astype(int).tolist()
def lstm(train, train_lengths, test, test_lengths):
model = kr.models.Sequential()
model.add(kr.layers.InputLayer(input_shape=(conf.look_back, train.shape[1] - 1)))
for layer in conf.rnn_layers:
if 'bidir' in layer and layer['bidir'] == True:
model.add(kr.layers.Bidirectional(nn_layers[layer['type']](**layer['params']), **layer['bidir_param']))
else:
model.add(nn_layers[layer['type']](**layer['params']))
model.add(kr.layers.Dense(units=len(conf.damage_types) + 1, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer=optimizers[conf.optimizer['type']](**conf.optimizer['params']))
model.fit_generator(preprocessing.batch_generator(train, train_lengths), epochs=conf.epochs,
steps_per_epoch=utils.samples_per_epoch(train_lengths) / conf.batch_size)
generator = preprocessing.batch_generator(test, test_lengths)
test_labels = []
confidences = []
print "Evaluating testing set..."
for x in range(utils.samples_per_epoch(test_lengths) / conf.batch_size):
batch = next(generator)
confidences.extend(model.predict_on_batch(batch[0]))
test_labels.extend(numpy.argmax(x) for x in batch[1])
predictions = [numpy.argmax(x) for x in confidences]
return confidences, predictions, test_labels
def SVM(train, test, cost=1.0, ker='rbf', verb=False):
clf = svm.SVC(C=cost, kernel=ker, decision_function_shape='ovr', verbose=verb)
clf.fit(train[:, :-1], train[:, -1])
conf = numpy.array(clf.decision_function(test[:, :-1]))
pred = numpy.array(clf.predict(test[:, :-1]))
return conf, pred, test[:, :-1].astype(int).tolist()
def random_forest(train, test):
clf = RandomForestClassifier()
clf.fit(train[:, :-1], train[:, -1])
pred = clf.predict(test[:, :-1])
return pred, pred, test[:, :-1].astype(int).tolist()
def classification(train, test, train_lengths, test_lengths):
if conf.classifier == 'mlp':
return mlp(train, test)
elif conf.classifier == 'lstm':
return lstm(train, train_lengths, test, test_lengths)
elif conf.classifier == 'svm':
return SVM(train, test)
elif conf.classifier == 'rf':
return random_forest(train, test)
| true
|
ed398b9242baea0828f041b66f988066aec24ec6
|
Python
|
dorlivne/Segmentation
|
/Augmentations.py
|
UTF-8
| 3,415
| 2.828125
| 3
|
[] |
no_license
|
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import random
import matplotlib.pyplot as plt
from configs import config
IMAGE_HEIGHT = 512
IMAGE_WIDTH = 640
def flip_randomly(image, seg):
prob = random.random()
if random.random() > prob:
image = np.fliplr(image)
seg = np.fliplr(seg)
if random.random() > prob:
image = np.flipud(image)
seg = np.flipud(seg)
return image, seg
def jitter_image(train_batch, train_seg_batch):
height = IMAGE_HEIGHT
width = IMAGE_WIDTH
PLOT = False
jitter_images = np.zeros((train_batch.shape[0], height, width, 1))
jitter_seg = np.zeros((train_batch.shape[0], height, width, 1))
for i, image in enumerate(train_batch):
seg = train_seg_batch[i]
if PLOT:
imshow_noax(image.squeeze())
imshow_noax(seg.squeeze())
if random.random() < config.elastic_threshold:
transformed_image, transformed_seg = elastic_transformation(image=image.squeeze(), seg=seg.squeeze())
else:
transformed_image = image.squeeze()
transformed_seg = seg.squeeze()
if PLOT:
imshow_noax(transformed_image)
imshow_noax(transformed_seg)
distorted_image, distorted_seg = flip_randomly(transformed_image, transformed_seg)
if PLOT:
imshow_noax(distorted_image)
imshow_noax(distorted_seg)
jitter_images[i] = np.expand_dims(distorted_image, -1)
jitter_seg[i] = np.expand_dims(distorted_seg, -1)
return jitter_images.astype(np.float32), jitter_seg.astype(np.float32)
def elastic_transformation(image, seg, alpha=np.random.random(1)*8, sigma=8, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert len(image.shape) == 2
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
transformed_image = map_coordinates(image, indices, order=1).reshape(shape)
transformed_seg = map_coordinates(seg, indices, order=1).reshape(shape)
return transformed_image, transformed_seg
def imshow_noax(img, normalize=False):
""" Tiny helper to show images remove axis labels """
if normalize:
img_max, img_min = np.max(img), np.min(img)
img = 55.0 * (img - img_min) / (img_max - img_min)
plt.imshow(img)
plt.gca().axis('off')
plt.show()
| true
|
25a3f4d1d009ed9ed88f9d2d20c5eb3a23b7a080
|
Python
|
Cristiandsh/Scaffold
|
/hello.py
|
UTF-8
| 127
| 3
| 3
|
[] |
no_license
|
def toyou(x):
return print("hi %s" % x)
def add(x):
return x + 2
def subtract(x):
return x - 1
toyou(2)
| true
|
8d759f2fa9a5e38bda7d072809fe19310855023d
|
Python
|
TheWinch/flask-tuto
|
/tests/test_customer_api.py
|
UTF-8
| 1,802
| 2.640625
| 3
|
[] |
no_license
|
from flask import json
from app.apis.customer_api import customer_model
from app.models import Customer
from tests.base import FlaskTestCase, BasicAPITester
class TestCustomerApi(FlaskTestCase, BasicAPITester):
def setup(self):
FlaskTestCase.setup(self)
self.api_endpoint = '/api/customers/'
self.item_cls = Customer
self.reference_item = dict(firstname='Vincent', lastname='Girard-Reydet', email='vgr@test.com', phone='0102030405')
self.alternate_item = dict(firstname='Toto', lastname='Bozo', email='toto@bozo.com', phone='0123456789')
self.json_to_db = customer_model
def test_can_filter_customers_by_name(self):
reference = self.create_in_db(self.reference_item)
self.create_in_db(self.alternate_item)
response = self.app.get(self.api_endpoint + '?name=Girard')
assert response.status_code == 200, 'Should be able to filter by valid part of last name'
assert json.loads(response.data) == [self.enrich_with_id(self._to_api(self.reference_item), reference.id)], 'Should receive a JSON array with a unique element matching the DB content'
def test_can_pass_non_matching_filter(self):
self.create_in_db(self.reference_item)
self.create_in_db(self.alternate_item)
response = self.app.get(self.api_endpoint + '?name=invalid_part')
assert response.status_code == 200, 'Should be able to filter by invalid part of last name'
assert json.loads(response.data) == []
def test_conversion(self):
for key, field in customer_model.items():
if not key in ['id', 'uri']:
thefield = field() if isinstance(field, type) else field
print(key + ' => ' + str(thefield.format(thefield.output(key=key, obj=self.reference_item))))
| true
|
869a964f41eb432e5b292729ee9465791a09b830
|
Python
|
BYU-University/Robot_Soccer
|
/src/robot_soccer/scripts/mat.py
|
UTF-8
| 2,710
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/python
from numpy import matrix
from numpy import linalg
import math
#define s1,s2,s3
realWorldOffset = 1 #1.698
s = .0282977488817 #radius of wheel
r = .092 #radius from center to center of wheel
r1theta = -math.pi/3.0
r1x = math.cos(r1theta)*r
r1y = math.sin(r1theta)*r
r2theta = math.pi/3.0
r2x = math.cos(r2theta)*r
r2y = math.sin(r2theta)*r
r3theta = math.pi
r3x = math.cos(r3theta)*r
r3y = math.sin(r3theta)*r
print "this prints info from Mat values of r1,r2,r3 in x,y direction"
print r1x
print r1y
print r2x
print r2y
print r3x
print r3y
print "finish printing the directions"
s1theta = r1theta - math.pi/2
s1x = math.cos(s1theta)
s1y = math.sin(s1theta)
s2theta = r2theta - math.pi/2
s2x = math.cos(s2theta)
s2y = math.sin(s2theta)
s3theta = r3theta - math.pi/2
s3x = math.cos(s3theta)
s3y = math.sin(s3theta)
print "here for s1,s2,s3 in x,y direction"
print s1x
print s1y
print s2x
print s2y
print s3x
print s3y
mSub = matrix( [[s1x,s1y,(s1y*r1x - s1x*r1y)],
[s2x,s2y,(s2y*r2x - s2x*r2y)],
[s3x,s3y,(s3y*r3x - s3x*r3y)]] )
print "this is the MSub MAtrix",mSub
M = realWorldOffset*(1.0/s)*mSub
#this is the rotation matrix where turns in the x and y directions
R = lambda theta: matrix( [[math.cos(theta),math.sin(theta),0.0],
[-math.sin(theta),math.cos(theta),0.0],
[0.0,0.0,1.0]] )
print "this is the M value:", M
#here starts the defs
#this return a tuple with x, y, omega(or theta)
def getRobotXYOmega(x,y,omega,theta):
desired = matrix( [[x],
[y],
[omega]] )
desired = R(theta)*desired
return desired
def getRobotXYOmegatest(x,y,omega):
#for now, I changed omega=0
omegaZero = 0
desired = matrix( [[x],
[y],
[omegaZero]] )
desired = R(omega)*desired
return desired
def getWheelVel(x,y,omega):
desired = matrix( [[x],
[y],
[omega]] )
result = M*desired
return result.getA()[0][0], result.getA()[1][0], result.getA()[2][0]
def getWheelVelTheta(x,y,omega,theta):
desired = getRobotXYOmega(x, y, omega, theta)
result = M*desired
return result.getA()[0][0], result.getA()[1][0], result.getA()[2][0]
def getRobotXYOmegaTheta(x,y,omega,theta):
desired = matrix( [[x],
[y],
[omega]] )
desired = R(theta)*desired
#print "this is GetRobotXYOmega",desired
return desired
def getWheelVelOmega(x,y,Omega):
desired = getRobotXYOmegatest(x, y,Omega)
result = M*desired
#print "this is getRobotXYOMEGAASTuple",result
return result.getA()[0][0], result.getA()[1][0], result.getA()[2][0]
| true
|
1afe668ba940b09e16c63a1df4ad361a0ec971d7
|
Python
|
sidorkinandrew/stepik
|
/course-4852-introToDSandML/lesson-1.5-step-6.py
|
UTF-8
| 336
| 2.53125
| 3
|
[] |
no_license
|
import requests, zipfile, io
import pandas as pd
import numpy as np
dataset_url = 'https://stepik.org/media/attachments/course/4852/StudentsPerformance.csv'
r = requests.get(dataset_url)
df = pd.read_csv(io.BytesIO(r.content))
# count share of 'free/reduced'-lunch students
print(np.sum(df['lunch'].isin(['free/reduced']) / len(df)))
| true
|
9240c0f92984232f8581236979e3db63b0731081
|
Python
|
kartikanand/wikilooper-cli
|
/main.py
|
UTF-8
| 1,123
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
import requests
from bs4 import BeautifulSoup
args = input("Enter Starting wiki topic : ")
while(args != "Philosophy"):
print(args)
wiki_url = "http://en.wikipedia.org/wiki/"
r = requests.get(wiki_url+args)
if r.status_code != 200:
print(args + "Not a valid wiki link")
break
data = r.text
soup = BeautifulSoup(data)
div = soup.find_all(id="mw-content-text")[0]
para = None
for child in div.children:
if child.name == 'p':
para = child
break
if para is None:
break
#print(para)
bracket = 0
next_link = None
for tag in para.children:
if tag.name is None and tag.string is not None:
if '(' in tag.string.strip():
bracket = bracket + 1
if ')' in tag.string.strip():
bracket = bracket - 1
if tag.name == 'a' and bracket == 0:
next_link = tag['href']
break
if next_link is not None:
args = next_link.split("/")[-1]
else:
break
print(args)
| true
|
c9e0c14c6fb60472284003b42cf859c92e0bbe91
|
Python
|
majo-z/ChessProject
|
/src/main/python/chess-bot/tests.py
|
UTF-8
| 2,460
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
from game import *
from pieces import *
import unittest
def init_board():
return Board({
"G1": "wKnight",
"G2": "wPawn",
"E1": "wQueen",
"E2": "wPawn",
"C1": "wBishop",
"C2": "wPawn",
"A1": "wRook",
"G7": "bPawn",
"A2": "wPawn",
"G8": "bKnight",
"E7": "bPawn",
"E8": "bQueen",
"C7": "bPawn",
"C8": "bBishop",
"A7": "bPawn",
"A8": "bRook",
"H1": "wRook",
"H2": "wPawn",
"F1": "wBishop",
"F2": "wPawn",
"D1": "wKing",
"D2": "wPawn",
"B1": "wKnight",
"H7": "bPawn",
"B2": "wPawn",
"H8": "bRook",
"F7": "bPawn",
"F8": "bBishop",
"D7": "bPawn",
"D8": "bKing",
"B7": "bPawn",
"B8": "bKnight"
})
class BoardTest(unittest.TestCase):
def test_get_piece(self):
board = init_board()
self.assertEqual(board.get_piece("B8").name, "Knight")
self.assertEqual(board.get_piece("D7").name, "Pawn")
self.assertEqual(board.get_piece("C3"), None)
def test_make_move(self):
board = init_board()
pawn = board.get_piece("B7")
board.make_move(Move("B7", "B6"))
self.assertTrue(board.pos_is_empty("B7"))
self.assertEqual(board.get_piece("B6"), pawn)
knight = board.get_piece("B1")
board.make_move(Move("B1", "C3"))
self.assertTrue(board.pos_is_empty("B1"))
self.assertEqual(board.get_piece("C3"), knight)
def test_undo_move(self):
board = init_board()
pawn = board.get_piece("B7")
board.make_move(Move("B7", "B6"))
knight = board.get_piece("B1")
board.make_move(Move("B1", "C3"))
board.undo_move()
board.undo_move()
self.assertTrue(board.pos_is_empty("B6"))
self.assertEqual(board.get_piece("B7"), pawn)
self.assertTrue(board.pos_is_empty("C3"))
self.assertEqual(board.get_piece("B1"), knight)
class PieceTest(unittest.TestCase):
def test_bishop_moves(self):
board = init_board()
bishop = board.get_piece("C8")
print(bishop.moves)
self.assertEqual(len(bishop.moves), 0)
bishop = Bishop(board, Colour.WHITE)
board.set_at("H4", bishop)
self.assertEqual(len(bishop.moves), 4)
if __name__ == "__main__":
unittest.main()
| true
|
5d7edd3a69dbb2f7a0da566e0d505bd71e4d6d82
|
Python
|
RickyHuo/leetcode
|
/python/python2/minimum-index-sum-of-two-lists.py
|
UTF-8
| 590
| 3.375
| 3
|
[] |
no_license
|
class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
items = {}
for i in list1:
items[i] = 0
values = []
for i in list2:
try:
item = items[i]
values.append(i)
except:
pass
return values
if __name__ == '__main__':
print Solution().findRestaurant(["Shogun", "Tapioca Express", "Burger King", "KFC"], ["KFC", "Shogun", "Burger King"])
| true
|
c1df504737ff558bd8603a4baba322f8f78593c3
|
Python
|
franloza/hackerrank
|
/warmup/acm.py
|
UTF-8
| 944
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/python3
from itertools import combinations
import os
# Complete the acmTeam function below.
def acmTeam(topic):
n_combinations = 0
max_n_topics = 0
for topic_items in combinations(topic, 2):
n_topics = 0
for i in range(len(topic_items[0])):
if int(topic_items[0][i]) or int(topic_items[1][i]):
n_topics += 1
if n_topics > max_n_topics:
max_n_topics = n_topics
n_combinations = 1
elif n_topics == max_n_topics:
n_combinations += 1
return max_n_topics, n_combinations
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
topic = []
for _ in range(n):
topic_item = input()
topic.append(topic_item)
result = acmTeam(topic)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| true
|
d1224573eeafde63dcfbfc70b35f44b32244279d
|
Python
|
snow-king/software-engineering
|
/Task_one/sum_and_multiplication.py
|
UTF-8
| 1,773
| 4.21875
| 4
|
[] |
no_license
|
import re
from functools import reduce
class Num(object):
sumNumbers = 0
multiplyNumbers = 1
def __init__(self):
self.numbers = []
# I know what is so stupid , but I needed to add this method :D
def sum(self):
self.sumNumbers = sum(self.numbers)
return self.sumNumbers
# I now what in Python 3.0+ not recommended use reduce, but this is more convenient than writing it all through a
# loop
def multiplication(self):
try:
self.multiplyNumbers = reduce(lambda x, y: x * y, self.numbers)
return self.multiplyNumbers
except ValueError:
print("the input string contains letters or is empty")
def getNumbers(self, answer):
try:
self.numbers = list(map(int, answer.split()))
except ValueError:
print("input string the input string contains letters")
def getNumbersInput(self):
print("Please, enter a sequence of numbers separated by a space \nExample: 1 2 3 4 5")
answer = input() + ' '
# print(re.search(r'[a-zA-Z]+', answer))
if re.match(r'\d+\s+', answer) and not re.search(r'[a-zA-Z]+', answer):
self.numbers = list(map(int, answer.split()))
print(self.numbers)
else:
self.getNumbersInput()
def run(self):
print("Please select : \n1. Sum \n2. Multiplication")
answer = str(input())
if answer == "1":
self.sum()
print(self.sumNumbers)
elif answer == "2":
self.multiplication()
print(self.multiplyNumbers)
else:
print("unknown request (。╯︵╰。) \nplease enter correct data ")
example = Num()
example.getNumbers("1 2 3 4 5")
| true
|
75de723c91b7111376a7226827f34798d6d38d6e
|
Python
|
TermanEmil/CartpoleV1_OpenAIGym
|
/ddqn_carpole/train.py
|
UTF-8
| 4,276
| 2.6875
| 3
|
[] |
no_license
|
import random
import gym
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from collections import deque
# Constants
c_env_name = "CartPole-v1"
c_max_nb_of_steps = 2000
c_discount_rate = 0.99
c_learning_rate = 0.001
c_memory_size = 20000
c_batch_size = 64
# Required memory to start training
c_mem_len_train_start = 1000
c_exploration_max = 1.0
c_exploration_min = 0.01
c_exploration_decay = 0.999
# Globals
g_state_size = None
g_action_size = None
g_env = None
g_memory = deque(maxlen=c_memory_size)
g_model = None
g_target_model = None
g_epsilon = c_exploration_max
def build_model():
model = Sequential()
model.add(Dense(24, activation='relu', input_dim=g_state_size, kernel_initializer='he_uniform'))
model.add(Dense(24, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(g_action_size, kernel_initializer='he_uniform'))
model.compile(Adam(lr=c_learning_rate), 'mse')
return model
def update_target_model():
g_target_model.set_weights(g_model.get_weights())
def get_action(state):
if np.random.rand() < g_epsilon:
return g_env.action_space.sample()
else:
q_values = g_model.predict(state)[0]
return np.argmax(q_values)
def append_memory(state, action, reward, next_state, done):
global g_epsilon
g_memory.append((state, action, reward, next_state, done))
if g_epsilon > c_exploration_min:
g_epsilon = max(g_epsilon * c_exploration_decay, c_exploration_min)
def can_train():
mem_len = len(g_memory)
return mem_len >= c_mem_len_train_start and mem_len >= c_batch_size
def train_model():
if not can_train():
return
mini_batch = random.sample(g_memory, c_batch_size)
update_input = np.zeros((c_batch_size, g_state_size))
update_target = np.zeros((c_batch_size, g_state_size))
action, reward, done = [], [], []
for i in range(c_batch_size):
update_input[i] = mini_batch[i][0]
action.append(mini_batch[i][1])
reward.append(mini_batch[i][2])
update_target[i] = mini_batch[i][3]
done.append(mini_batch[i][4])
target = g_model.predict(update_input)
target_next = g_model.predict(update_target)
target_val = g_target_model.predict(update_target)
for i in range(c_batch_size):
if done[i]:
target[i][action[i]] = reward[i]
else:
# The key point of Double DQN:
# Selection of action is from model
# Update is from target model
a = np.argmax(target_next[i])
target[i][action[i]] = reward[i] + c_discount_rate * target_val[i][a]
g_model.fit(
update_input,
target,
batch_size=c_batch_size,
epochs=1,
verbose=0)
def reshape_state(state):
return np.reshape(state, [1, g_state_size])
def run_episode():
done = False
score = 0
state = reshape_state(g_env.reset())
while not done:
g_env.render()
action = get_action(state)
next_state, reward, done, info = g_env.step(action)
next_state = reshape_state(next_state)
if done and score < c_max_nb_of_steps - 1:
reward = -100
append_memory(state, action, reward, next_state, done)
train_model()
score += 1
state = next_state
return score
if __name__ == '__main__':
g_env = gym.make(c_env_name)
g_env._max_episode_steps = c_max_nb_of_steps
g_state_size = g_env.observation_space.shape[0]
g_action_size = g_env.action_space.n
g_model = build_model()
g_target_model = build_model()
scores = []
for episode in range(10000):
update_target_model()
score = run_episode()
scores.append(score)
score_mean = float(np.mean(scores[-min(10, len(scores)):]))
print(
"episode: %3d, score: %4d, epsilon: %.4f, mean: %4.2f" %
(episode, score, g_epsilon, score_mean))
# if the mean of scores of last 10 episode is bigger than 80%
# stop training
if score_mean > 0.8 * c_max_nb_of_steps:
g_model.save('trained_models/trained_v0.h5')
exit(0)
| true
|
5c6aa71d008eff3699a90ee122ff1b6a188f047f
|
Python
|
0xDmtri/Gradient_Descent
|
/GradientDescent/usage_examples.py
|
UTF-8
| 2,842
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
from GradientDescent.NesterovDescent import NesterovAcceleratedGradient
from GradientDescent.CoordinateDescent import CoordinateGradientDescent
from GradientDescent.SteepestDescent import SteepestGradientDescent
""" Three-Hump Camel Function (thcf) is taken in order to find global minimum
and demostrate the capability of the algorithm.
This particular function depends on two variables and features a valley.
"""
def thcf(x):
"""Standard equation of the Three-Hump Camel Function.
"""
eqn = 2 * (x[0] ** 2) - 1.05 * (x[0] ** 4) + \
(x[0] ** 6) / 6 + x[0] * x[1] + (x[1] ** 2)
return eqn # Function to minimise
def thcf_partial1(x):
""" Partial derivative of the Three-Hump Camel Function with respect to x1.
"""
partial = x[1] + 4 * x[0] - 4.2 * (x[0] ** 3) + (x[0] ** 5)
return partial # Gradient 1
def thcf_partial2(x):
"""Partial derivative of the Three-Hump Camel Function with respect to x2.
"""
partial = x[0] + 2 * x[1]
return partial # Gradient 2
# Nesterov Accelerated Gradient Usage Examples:
nag = NesterovAcceleratedGradient([5, 5], [5, 5], [5, 5],
1e-3, 1e-6, 1, 10000, 0.4)
graph_full_function_via_nag_class = nag.graph_full_function(thcf)
graph_interval_function_via_nag_class = nag.graph_interval_function(thcf)
calc_nag = nag.calculate(thcf, thcf_partial1, thcf_partial2)
graph_full_nag = nag.graph_full_nesterov(thcf, thcf_partial1, thcf_partial2)
graph_partial_nag = nag.graph_partial_nesterov(thcf, thcf_partial1,
thcf_partial2)
# Coordinate Gradient Descent Usage Examples:
cgd = CoordinateGradientDescent([5, 5], [5, 5], [5, 5],
1e-3, 1e-6, 1, 10000)
graph_full_function_via_cgd_class = cgd.graph_full_function(thcf)
graph_interval_function_via_cgd_class = cgd.graph_interval_function(thcf)
calc_cgd = cgd.calculate(thcf, thcf_partial1, thcf_partial2)
graph_full_cgd = cgd.graph_full_coordinate(thcf, thcf_partial1, thcf_partial2)
graph_partial_cgd = cgd.graph_partial_coordinate(thcf, thcf_partial1,
thcf_partial2)
# Steepest Gradient Descent Usage Examples:
sgd = SteepestGradientDescent([5, 5], [5, 5], [5, 5],
1e-3, 1e-6, 1, 10000)
graph_full_function_via_sgd_class = sgd.graph_full_function(thcf)
graph_interval_function_via_sgd_class = sgd.graph_interval_function(thcf)
calc_sgd = sgd.calculate(thcf, thcf_partial1, thcf_partial2)
graph_full_sgd = sgd.graph_full_steepest(thcf, thcf_partial1, thcf_partial2)
graph_partial_sgd = sgd.graph_partial_steepest(thcf, thcf_partial1,
thcf_partial2)
| true
|
11efdc9d17e48f717d9ee7eeed28084c2707c84f
|
Python
|
lewis-cooper/SUVAT-Calculator
|
/suvat_calculator.py
|
UTF-8
| 4,510
| 3.484375
| 3
|
[] |
no_license
|
import math
'''
__ _______ __________ _____ ______ __ ________
/ / / / ___// ____/ __ \ / _/ | / / __ \/ / / /_ __/
/ / / /\__ \/ __/ / /_/ / / // |/ / /_/ / / / / / /
/ /_/ /___/ / /___/ _, _/ _/ // /| / ____/ /_/ / / /
\____//____/_____/_/ |_| /___/_/ |_/_/ \____/ /_/
'''
def user_input() -> float:
while True:
try:
inpt = input("") #Stores user input in inpt variable
return float(inpt) #Tries to convert input to a float
except ValueError: #Checks if user input fails to convert to a float
if inpt == (""): #If user input fails to convert to float and is empty the program carries on
return print("")
else: #If user input fails to convert to float and isn't empty then the program asks for a new value
print("Input only accepts decimal numbers.")
#Storing values for each of the SUVAT variables
print('please enter a value for s:')
s = user_input()
print('please enter a value for u:')
u = user_input()
print('please enter a value for v:')
v = user_input()
print('please enter a value for a:')
a = user_input()
print('please enter a value for t:')
t = user_input()
#Result messages
s_result = 'The value of s is %.3f m'
u_result = 'The value of u is %.3f m/s'
v_result = 'The value of v is %.3f m/s'
a_result = 'The value of a is %.3f m/s^2'
t_result = 'The value of t is %.3f s'
'''
_________ __ ________ ____ ___ __________ ____
/ ____/ | / / / ____/ / / / / / |/_ __/ __ \/ __ \
/ / / /| | / / / / / / / / / / /| | / / / / / / /_/ /
/ /___/ ___ |/ /___/ /___/ /_/ / /___/ ___ |/ / / /_/ / _, _/
\____/_/ |_/_____/\____/\____/_____/_/ |_/_/ \____/_/ |_|
'''
#SUVAT EQUATIONS
#When s == None
if s == None and u == None:
u = v - a*t
print(u_result % u)
elif s == None and v == None:
v = u + a*t
print(v_result % v)
elif s == None and a == None:
a = (v-u)/t
print(a_result % a)
elif s == None and t == None:
t = (v-u)/a
print(t_result % t)
if s == None:
s=(u+v)*t*0.5
print(s_result % s)
#When v == None
if v == None and u == None:
u=(s/t)-0.5*a*t
print(u_result % u)
elif v == None and a == None:
a = (2*(s-t*u))/t**2
print(a_result % a)
elif v == None and t == None:
try:
t = (math.sqrt(2*a*s+u**2)-u)/a #Checks whether the calculation is possible
print(t_result % t) #If calculation is possible, the value is printed
except ValueError: #If calculation is impossible, error message is printed
print("")
if v == None:
try: #Checks whether the calculation is possible
v = u+a*t
print(v_result % v) #If calculation is possible, the value is printed
except TypeError:
print("No real roots for v") #If calculation is impossible, error message is printed
#When a == None
if a == None and u == None:
u = ((2*s)/t)-v
print(u_result % u)
elif a == None and t == None:
t = (2*s)/(u+v)
print(t_result % t)
if a == None:
a = (v-u)/t
print(a_result % a)
#When t == None
if t == None and u == None:
try:
u = math.sqrt((v**2)-(2*a*s)) #Checks whether the calculation is possible
print(u_result % u) #If calculation is possible, the value is printed
except ValueError:
print("No real roots for u") #If calculation is impossible, error message is printed
if t == None:
try: #Checks whether the calculation is possible
t = (v-u)/a #If calculation is possible, the value is printed
print(t_result % t)
except TypeError:
print("Can not solve for t") #If calculation is impossible, error message is printed
#In memory of David Rogers
| true
|
af05cb578366f4d9f35c7504f18aeb9c05387069
|
Python
|
Zhangbeibei1991/Stock-Embeddings
|
/code/run.py
|
UTF-8
| 3,093
| 2.828125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 11 12:27:31 2021
@author: 莱克斯
"""
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from model import GRU,Mydata
from tool import train_test_split,get_sj,get_mj,gru_data
import random
seed=2021
random.seed(seed)
#np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子`
class __init__():
train,test=train_test_split('../data/stock price/AAPL历史数据.csv')
S_j=get_sj(train,test,eval_=True)
M_j=get_mj(S_j,weighted=True)
print('训练数据处理完毕')
for t in range(10):
train_x,train_y=gru_data(train,M_j)
train_dataset=Mydata(train_x,train_y)
test_x,test_y=gru_data(test,M_j)
test_dataset=Mydata(test_x,test_y)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_size = 256 # 256维市场向量
hidden_size = 256 # 隐藏层大小
num_classes = 2 # 涨跌二元判断
num_epochs = 50 # 将num_epochs提高到50
batch_size = 100 # 每一个batch的大小
learning_rate = 0.001 # 学习率
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 构造GRU模型
model = GRU(input_size, hidden_size, num_classes).to(device)
model.train()
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# 训练模型
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (data, labels) in enumerate(train_loader):
# Move tensors to the configured device
data = data.reshape(-1, 5, 256).to(device).to(torch.float32)
labels = labels.to(device)
# 前向传播
outputs = model(data)
loss = criterion(outputs, labels)
# 后向优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
#测试模型
model.eval()
with torch.no_grad():
correct = 0
total = 0
for m_v, labels in test_loader:
m_v = m_v.reshape(-1, 5 , 256).to(device)
labels = labels.to(device)
outputs = model(m_v.to(torch.float32))
pred = outputs.argmax(dim = 1)
total += labels.size(0)
correct += (pred == labels).sum().item()
print('The {}th Accuracy of bi-gru on test dataset: {} %'.format(t+1,100 * correct / total))
| true
|
ea2d4f9c4e843f4bdfd51fb163f6e6623ee4a8fe
|
Python
|
pianowow/projecteuler
|
/121/121.py
|
UTF-8
| 1,108
| 3.375
| 3
|
[] |
no_license
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: CHRISTOPHER_IRWIN
tries = 15
bluedraws = [0]*tries
reddraws = [0]*tries
totaldraws = [0]*tries
def binarystrings(n):
if n == 1:
for x in range(2):
yield str(x)
else:
for x in range(2):
for y in binarystrings(n-1):
yield str(x)+y
blues = 1
reds = 1
for draw in range(tries):
totaldraws[draw] = (blues+reds)
reddraws[draw] = reds
bluedraws[draw] = blues
reds+=1
print (totaldraws)
print (reddraws)
print (bluedraws)
winprob = 0
outcomes = list(binarystrings(tries))
for i,outcome in enumerate(outcomes):
prob = 1
if outcome.count('1') > outcome.count('0'):
for trie,draw in enumerate(outcome):
if draw == '1':
prob *= bluedraws[trie]/totaldraws[trie]
else:
prob *= reddraws[trie]/totaldraws[trie]
winprob += prob
print(winprob)
print('payout:',int(1/winprob))
| true
|
df18301908c36838a2b4f67b8dbbfdb973c2533d
|
Python
|
celsopa/theHuxley
|
/HUX - 2066.py
|
UTF-8
| 105
| 3.296875
| 3
|
[] |
no_license
|
qtd = int(input())
numeros = []
for x in range(qtd):
numeros.append(int(input()))
print(sum(numeros))
| true
|
2e59a2eebe7ce8fb87a8c66b32f7b9fd2a0e0267
|
Python
|
namakemono-sub/Test-Repository
|
/modules/functions.py
|
UTF-8
| 1,223
| 2.765625
| 3
|
[] |
no_license
|
import sys
import json
import crayons
import datetime
config = open("conf/config.json","r",encoding="UTF-8")
config = json.load(config)
def now():
return datetime.datetime.now().strftime('%H:%M:%S')
def lang(key: str, value: str):
try:
if config["lang"] == "ja":
lang = open("lang/lang_ja.json","r",encoding="UTF-8")
lang = json.load(lang)
elif config["lang"] == "en":
lang = open("lang/lang_en.json","r",encoding="UTF-8")
lang = json.load(lang)
else:
print(crayons.red("存在しない言語を指定されました、'ja','en'の中から選んでください。"))
print(crayons.red("You have specified a language that does not exist, please choose one of 'ja' or 'en'"))
sys.exit()
except KeyError as e:
print(crayons.red('lang ファイルの読み込みに失敗しました。キーの名前が間違っていないか確認してください。アップデート後の場合は、最新のlangファイルを確認してください。'))
print(crayons.red(f'{str(e)} がありません。'))
sys.exit()
return lang[str(key)][str(value)]
| true
|
634e30904b133ba3a550d918d4e9c3b84fd9b83b
|
Python
|
rohitishu/SpyChat-AV
|
/spy_details1.py
|
UTF-8
| 1,010
| 2.984375
| 3
|
[] |
no_license
|
# PROJECT : *****SPY-CHAT***** ! [ ACADVIEW ] ||||| spy_details1.py
from datetime import datetime
# CLASS SPY WHICH CONTAINS ALL THE DETAILS OF THE SPY
class Spy:
def __init__(self,name,salutation,rating,age):
self.name = name
self.salutation = salutation
self.rating = rating
self.age = age
self.chats = []
self.is_online = True
self.current_status_message = None
# CLASS TO STORE THE CHATS
class ChatMessage:
def __init__(self,message,sent_by_me,avg_words):
self.message = message
self.sent_by_me = sent_by_me
self.time = datetime.now()
if avg_words is not 0:
self.avg_words = avg_words
else:
self.avg_words = 0
spy = Spy('JAYRAJ', 'Mr.' , 4.9,20)
friend_one = Spy('Gaurav', 'Mr.', 4.9, 20)
friend_two = Spy('Harsh', 'Mr.', 4.39, 19)
friend_three = Spy('Aditya', 'Mr.', 4.95, 18)
friend_four = Spy('Rohit' , 'Mr.' , 4.5 , 20)
friends = [friend_one, friend_two, friend_three]
| true
|
905b2316bd3a042d989f66af656ae3d7fa32369a
|
Python
|
henryiii/hepvector
|
/hepvector/numpyvector.py
|
UTF-8
| 15,380
| 3.078125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Vector classes
==============
Three vector classes are available:
* ``Vector2D`` : a 2-dimensional vector.
* ``Vector3D`` : a 3-dimensional vector.
* ``LorentzVector``: a Lorentz vector, i.e. a 4-dimensional Minkowski space-time vector
or a 4-momentum vector.
The metric is (-1,-1,-1,+1).
These are based on the base class ``Vector``, this can be subclassed to make new vectors with new metrics.
"""
# -----------------------------------------------------------------------------
# Import statements
# -----------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
import numpy as np
import doctest
from .numbautils import overload
# -----------------------------------------------------------------------------
# Vector class (base)
# -----------------------------------------------------------------------------
def _add_names(cls):
for n,x in enumerate(cls.NAMES):
# Add .x, .y, etc. property setters/getters
def make_get_set(n):
def getter(self):
return self[n].view(np.ndarray)
def setter(self, item):
self[n] = item
return getter, setter
setattr(cls, x, property(*make_get_set(n)))
# Add X, Y, etc basis vectors
ze = [0]*len(cls.NAMES)
ze[n] = 1
setattr(cls, x.upper(), cls(*ze))
class Vector(np.ndarray):
# All valid subclasses must have NAMES in class
__slots__ = ()
def __new__(cls, *args, **kargs):
dtype=kargs.get('dtype', np.double)
if len(args)==0:
args = np.zeros(len(cls.NAMES))
args = [np.asarray(a).astype(dtype) for a in args]
args = np.broadcast_arrays(*args)
if hasattr(np, 'stack'): # Support 1.08, but 1.10 is better
return np.stack(args).view(cls)
else:
args = [np.expand_dims(a,0) for a in args]
return np.concatenate(args,0).view(cls)
# Special constructors
@classmethod
def origin(cls):
"""Shortcut constuctor for the origin (x=0.,y=0., ...).
Equivalent to the default constructor.
"""
return cls()
@classmethod
def from_pandas(cls, pd_dataframe):
items = (pd_dataframe[n] for n in cls.NAMES)
return cls(*items)
@classmethod
def from_vector(cls, other):
"""Copy constructor."""
return cls(*other)
# Not including fromiterable because it is available directly as Vector(*iterable)
def dot(self, other):
'''
This currently returns a 1D array always.
>>> v1 = Vector3D(1, 2, 3)
>>> v2 = Vector3D(2, 3, 5)
>>> v1p = Vector3D([1,2,3], [2,3,5], [3,3,1])
>>> v2p = Vector3D([1,2,3], [3,5,2], [5,3,2])
>>> v1.dot(v2)
array([ 23.])
>>> v1p.dot(v2p)
array([ 22., 28., 21.])
'''
if hasattr(self.__class__, 'METRIC'):
metric = self.METRIC.copy()
for axis in range(len(self.shape) - 1):
metric = np.expand_dims(metric, -1)
return np.sum((np.asarray(self)*metric) * np.asarray(other), 0)
else:
return np.sum(np.asarray(self) * np.asarray(other), 0)
@property
def mag(self):
'''
This currently returns a 1D array always.
>>> v1 = Vector3D(1, 2, 3)
>>> v1p = Vector3D([1,2,3], [2,3,5], [3,3,1])
>>> np.all(v1.mag == np.sqrt(14))
True
>>> v1p.mag
array([ 3.74165739, 4.69041576, 5.91607978])
'''
return np.sqrt(np.abs(self.mag2))*np.sign(self.mag2)
@property
def mag2(self):
'''
>>> v1 = Vector3D(1, 2, 3)
>>> v1p = Vector3D([1,2,3], [2,3,5], [3,3,1])
>>> np.all(v1.mag2 == 14)
True
>>> v1p.mag2
array([ 14., 22., 35.])
>>> v = LorentzVector(1,2,3,.5)
>>> v.mag2
array([-13.75])
'''
return self.dot(self)
@property
def unit(self, inplace=False):
if inplace:
self /= self.mag
else:
return self / self.mag
@property
def T(self):
return super(Vector, self).T.view(np.ndarray)
@T.setter
def T(self, val):
super(Vector, self).T = val
def to_pd(self):
'''Support for easy conversion to pandas'''
import pandas as pd
return pd.DataFrame({name:getattr(self, name) for name in self.NAMES})
def angle(self, other, normal=None):
'Angle between vectors, might not be normalized.'
a = self.unit
b = other.unit
# Protection vs. round off error
ang = np.arccos(np.clip(a.dot(b),-1,1))
# Only defined for Vector3
if normal is not None:
ang *= np.sign(normal.dot(a.cross(b)))
return ang
def __array_finalize__(self, obj):
if self.shape[0] != len(self.NAMES):
raise RuntimeError("Vectors must have the correct number of elements in the first diminsion, expected {0}, got {1}".format(len(self.NAMES), self.shape))
def __array_wrap__(self, out_arr, context=None):
"Correctly handle ufuncts"
if len(out_arr.shape) == 0 or out_arr.shape[0] != len(self.NAMES):
out_arr = out_arr.view(np.ndarray)
return np.ndarray.__array_wrap__(self, out_arr, context)
def __getitem__(self, item):
'I have chosen for x and [0] to be the same, to simplify calcs (a lot)'
if (isinstance(item, tuple)
and len(item)>0
and ((isinstance(item[0], slice)
and item[0] == slice(None,None,None)
) or (
len(item) < len(self.shape)
and item[0] is Ellipsis))):
# If [:,...] then keep vector
return super(Vector,self).__getitem__(item)
elif isinstance(item, slice) and item == slice(None,None,None):
return super(Vector,self).__getitem__(item)
else:
return self.view(np.ndarray).__getitem__(item)
def __setitem__(self, item, value):
self.view(np.ndarray).__setitem__(item, value)
def __eq__(self, other):
return np.asarray(self) == np.asarray(other)
@property
def dims(self):
return len(self.__class__.NAMES)
def _repr_html_(self):
shape = self.shape[1:]
shape_txt = " x ".join(map(str,shape))
vals = np.reshape(self, (self.dims,-1))
len_v = max(sum(shape),1)
header = r"<h3> {0} ({1}) </h3>".format(self.__class__.__name__, shape_txt)
header += "<table>"
header += "<tr>"
for name in self.__class__.NAMES:
header += r"<td><b>{}</b></td>".format(name)
header += r"</tr>"
content = ""
for i in range(min(len_v,5)):
content += "<tr>"
for name in self.__class__.NAMES:
content += r"<td>{:.4}</td>".format(getattr(vals,name)[i])
content += r"</tr>"
if len_v > 5:
for name in self.__class__.NAMES:
content += r"<td> • • • </td>"
footer = r"</table>"
return header + content + footer
class Vector2D(Vector):
__slots__ = ()
NAMES = ('x', 'y')
def __new__(cls, x=0, y=0, dtype=np.double):
return Vector.__new__(cls, x, y, dtype=dtype)
@property
def phi(self):
return np.arctan2(self.y, self.x).view(np.ndarray)
@property
def rho(self):
return self[:2].view(Vector2D).mag.view(np.ndarray)
def angle(self, other):
'Angle between two vectors'
return super(Vector2D, self).angle(other)
@property
def pt2(self):
'Tranverse compenent squared'
return self[:2].view(Vector2D).mag2.view(np.ndarray)
@property
def pt(self):
'Tranverse compenent'
return self.rho
class Vector3D(Vector2D):
__slots__ = ()
NAMES = ('x', 'y', 'z')
def __new__(cls, x=0, y=0, z=0, dtype=np.double):
return Vector.__new__(cls, x, y, z, dtype=dtype)
def cross(self, other):
return Vector3D(self.y*other.z - self.z*other.y,
self.z*other.x - self.x*other.z,
self.x*other.y - self.y*other.x)
@property
def theta(self):
prep = np.sqrt(self.x*self.x + self.y*self.y)
return np.arctan2(prep,self.z).view(np.ndarray)
@property
def r(self):
return self[:3].view(Vector3D).mag.view(np.ndarray)
def in_basis(self, xhat, yhat, zhat):
'''Must be unit vectors, should be orthogonal'''
return Vector3D(self.dot(xhat),
self.dot(yhat),
self.dot(zhat))
angle = Vector.angle
def rotate_axis(self, axis, angle):
"""Rotate vector by a given angle (in radians) around a given axis."""
u = axis.unit
c, s = np.cos(angle), np.sin(angle)
c1 = 1. - c
output = self.copy()
output.x = (c + u.x ** 2 * c1) * self.x + (u.x * u.y * c1 - u.z * s) * self.y \
+ (u.x * u.z * c1 + u.y * s) * self.z
output.y = (u.x * u.y * c1 + u.z * s) * self.x + (c + u.y ** 2 * c1) * self.y \
+ (u.y * u.z * c1 - u.x * s) * self.z
output.z = (u.x * u.z * c1 - u.y * s) * self.x + (u.y * u.z * c1 + u.x * s) * self.y \
+ (c + u.z ** 2 * c1) * self.z
return output
def rotate_euler(self, phi=0, theta=0, psi=0):
# Rotate Z (phi)
c1 = np.cos(phi)
s1 = np.sin(phi)
c2 = np.cos(theta)
s2 = np.sin(theta)
c3 = np.cos(psi)
s3 = np.sin(psi)
# Rotate Y (theta)
fzx2 =-s2*c1
fzy2 = s2*s1
fzz2 = c2
# Rotate Z (psi)
fxx3 = c3*c2*c1 - s3*s1
fxy3 =-c3*c2*s1 - s3*c1
fxz3 = c3*s2
fyx3 = s3*c2*c1 + c3*s1
fyy3 =-s3*c2*s1 + c3*c1
fyz3 = s3*s2
# Transform v
output = self.copy()
output.x = fxx3*self.x + fxy3*self.y + fxz3*self.z
output.y = fyx3*self.x + fyy3*self.y + fyz3*self.z
output.z = fzx2*self.x + fzy2*self.y + fzz2*self.z
return output
@classmethod
def from_spherical_coords(cls, r, theta, phi):
"""Constructor from a space point specified in spherical coordinates.
Parameters
----------
r : radius, the radial distance from the origin (r > 0)
theta : inclination in radians (theta in [0, pi] rad)
phi : azimuthal angle in radians (phi in [0, 2pi) rad)
"""
return cls(r * np.sin(theta) * np.cos(phi), r * np.sin(theta) * np.sin(phi), r * np.cos(theta))
@classmethod
def from_cylindrical_coords(cls, rho, phi, z):
"""Constructor from a space point specified in cylindrical coordinates.
Parameters
----------
rho : radial distance from the z-axis (rho > 0)
phi : azimuthal angle in radians (phi in [-pi, pi) rad)
z : height
"""
return cls(np.cos(phi) * rho, np.sin(phi) * rho, z)
class LorentzVector(Vector3D):
NAMES = ('x', 'y', 'z', 't')
METRIC = np.array([-1,-1,-1,1])
__slots__ = ()
def __new__(cls, x=0, y=0, z=0, t=0, dtype=np.double):
return Vector.__new__(cls, x, y, z, t, dtype=dtype)
@classmethod
def from_pt_eta_phi(cls, pt, eta, phi, t):
"""Set the transverse momentum, eta, and phi value. The remaining parameter on LorentzVector is untouched
:param pt: Transverse momentum
:param eta: Pseudorapidity
:param phi: Phi
:param t: t or enegry of LorentzVector
:return: New instance
"""
return cls(pt * np.cos(phi), pt * np.sin(phi), pt * np.sinh(eta), t)
@classmethod
def from_pt_eta_phi_m(cls, pt, eta, phi, m):
""" Set the listed properties, plus the mass"""
self = cls.from_pt_eta_phi(pt, eta, phi, 0)
self.t = np.sqrt(self.x**2 + self.y**2 + self.z**2 + np.sign(m)*m**2)
return self
@property
def p3(self):
return self[:3].view(Vector3D)
@p3.setter
def p3(self, obj):
self[:3] = obj
@property
def p(self):
'''
>>> v = LorentzVector(1,2,3,.5)
>>> v.p
array([ 3.74165739])
'''
return np.sqrt(self.p3.mag2)
@property
def e(self):
'''
>>> v = LorentzVector(1,2,3,.5)
>>> v.e
array([ 5.26782688])
'''
return self.t
@property
def eta(self):
"The Psuedorapitiy"
return -0.5 * np.log((1. - np.cos(self.theta)) / (1. + np.cos(self.theta)))
@property
def gamma(self):
'''
>>> v = LorentzVector(1,2,3,.5)
>>> v.gamma
array([ 2.01818182])
'''
return 1/np.sqrt(1 - self.beta**2)
@property
def beta(self):
'''
>>> v = LorentzVector(1,2,3,.5)
>>> v.beta
array([ 0.71028481])
'''
return self.p / self.e
@property
def boostp3(self):
'''
>>> v = LorentzVector(-0.212, 0.0327, 0.0327, -0.099)
>>> v.boostp3()
Vector3D([[ 2.14141414],
[-0.33030303],
[-0.33030303]])
>>> v = LorentzVector(1,2,3,4)
>>> v.boostp3()
Vector3D([[ 0.25],
[ 0.5 ],
[ 0.75]])
'''
return (self[:3] / self[3]).view(Vector3D)
def boost(self, vector3, inplace=False):
'''
>>> v = LorentzVector(1,2,3,.5)
>>> bv = Vector3D(.1,.2,.3)
>>> v.Boost(bv)
LorentzVector([[ 1.13224412],
[ 2.26448824],
[ 3.39673236],
[ 2.04882269]])
'''
b2 = vector3.mag2
gamma = 1.0 / np.sqrt(1-b2)
gamma2 = np.zeros_like(b2)
mask = b2 != 0
gamma2[mask] = (gamma[mask] - 1) / b2[mask]
del mask
bp = self.p3.dot(vector3)
if inplace:
self.p3 += gamma2*bp*vector3 + gamma*vector3*self.t
self.t += bp
self.t *= gamma
else:
v = self.p3 + gamma2*bp*vector3 + gamma*vector3*self.t
return self.__class__(v[0], v[1], v[2], gamma*(self.t+bp))
def delta_r(self, other):
"""Return :math:`\\Delta R` the distance in (eta,phi) space with another Lorentz vector, defined as:
:math:`\\Delta R = \\sqrt{(\\Delta \\eta)^2 + (\\Delta \\phi)^2}`
"""
delta_phi = np.mod(self.phi - other.phi + np.pi, np.pi*2) - np.pi
return np.sqrt((self.eta - other.eta)**2 + delta_phi**2)
def pseudorapidity(self):
""""Return the pseudorapidity. Alternative to eta method."""
return self.eta
@property
def rapidity(self):
"""Return the rapidity."""
return 0.5 * np.log( (self.e + self.z)/(self.e - self.z) )
_add_names(Vector2D)
_add_names(Vector3D)
_add_names(LorentzVector)
@overload(Vector2D)
def Vector2D_t(*args, **kargs):
return np.array
@overload(Vector3D)
def Vector3D_t(*args, **kargs):
return np.array
@overload(LorentzVector)
def LorentzVector_t(*args, **kargs):
return np.array
| true
|
6d463d5bbf79fdbf281aa74801d438c69292e2f3
|
Python
|
lucas-ipsum/rendite-pv-neu
|
/backend/functions/ephemeris.py
|
UTF-8
| 3,017
| 2.765625
| 3
|
[] |
no_license
|
def ephemeris(time, latitude, longitude, pressure=101325, temperature=12):
import pandas as pd
import numpy as np
Latitude = latitude
Longitude = -1 * longitude
Abber = 20 / 3600.
LatR = np.radians(Latitude)
# the SPA algorithm needs time to be expressed in terms of
# decimal UTC hours of the day of the year.
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time
# strip out the day of the year and calculate the decimal hour
DayOfYear = time_utc.dayofyear
DecHours = (time_utc.hour + time_utc.minute/60. + time_utc.second/3600. +
time_utc.microsecond/3600.e6)
# np.array needed for pandas > 0.20
UnivDate = np.array(DayOfYear)
UnivHr = np.array(DecHours)
Yr = np.array(time_utc.year) - 1900
YrBegin = 365 * Yr + np.floor((Yr - 1) / 4.) - 0.5
Ezero = YrBegin + UnivDate
T = Ezero / 36525.
# Calculate Greenwich Mean Sidereal Time (GMST)
GMST0 = 6 / 24. + 38 / 1440. + (
45.836 + 8640184.542 * T + 0.0929 * T ** 2) / 86400.
GMST0 = 360 * (GMST0 - np.floor(GMST0))
GMSTi = np.mod(GMST0 + 360 * (1.0027379093 * UnivHr / 24.), 360)
# Local apparent sidereal time
LocAST = np.mod((360 + GMSTi - Longitude), 360)
EpochDate = Ezero + UnivHr / 24.
T1 = EpochDate / 36525.
ObliquityR = np.radians(
23.452294 - 0.0130125 * T1 - 1.64e-06 * T1 ** 2 + 5.03e-07 * T1 ** 3)
MlPerigee = 281.22083 + 4.70684e-05 * EpochDate + 0.000453 * T1 ** 2 + (
3e-06 * T1 ** 3)
MeanAnom = np.mod((358.47583 + 0.985600267 * EpochDate - 0.00015 *
T1 ** 2 - 3e-06 * T1 ** 3), 360)
Eccen = 0.01675104 - 4.18e-05 * T1 - 1.26e-07 * T1 ** 2
EccenAnom = MeanAnom
E = 0
while np.max(abs(EccenAnom - E)) > 0.0001:
E = EccenAnom
EccenAnom = MeanAnom + np.degrees(Eccen)*np.sin(np.radians(E))
TrueAnom = (
2 * np.mod(np.degrees(np.arctan2(((1 + Eccen) / (1 - Eccen)) ** 0.5 *
np.tan(np.radians(EccenAnom) / 2.), 1)), 360))
EcLon = np.mod(MlPerigee + TrueAnom, 360) - Abber
EcLonR = np.radians(EcLon)
DecR = np.arcsin(np.sin(ObliquityR)*np.sin(EcLonR))
RtAscen = np.degrees(np.arctan2(np.cos(ObliquityR)*np.sin(EcLonR),
np.cos(EcLonR)))
HrAngle = LocAST - RtAscen
HrAngleR = np.radians(HrAngle)
SunAz = np.degrees(np.arctan2(-np.sin(HrAngleR),
np.cos(LatR)*np.tan(DecR) -
np.sin(LatR)*np.cos(HrAngleR)))
SunAz[SunAz < 0] += 360
SunEl = np.degrees(np.arcsin(
np.cos(LatR) * np.cos(DecR) * np.cos(HrAngleR) +
np.sin(LatR) * np.sin(DecR)))
# make output DataFrame
DFOut = pd.DataFrame(index=time_utc)
DFOut['azimuth'] = SunAz
DFOut['elevation'] = SunEl
DFOut['zenith'] = 90 - SunEl
DFOut.index = time
return DFOut
| true
|
6da5c768bfd212984edebe3adb786d26b84fbe68
|
Python
|
DanielGeorgeMathew/EGEN_CAPSTONES
|
/cloud_function_capstone1.py
|
UTF-8
| 1,361
| 2.59375
| 3
|
[] |
no_license
|
import logging
from base64 import b64decode
from pandas import DataFrame
from json import loads
from google.cloud.storage import Client
class LoadToStorage:
def __init__(self,event,context):
self.event = event
self.context = context
self.bucket_name = "capstone1-crypto-storage"
def get_message_data(self):
if data in self.event:
pubsub_message = b64decode(self.event['data'].decode("utf-8"))
return pubsub_message
else:
return ""
def transform_payload_to_dataframe(self, message):
try:
df = DataFrame(loads(message))
return df
except Exception as e:
raise
def upload_to_bucket(self,df, file_name : str = 'payload'):
storage_client = Client()
bucket = storage.client.bucket(self.bucket_name)
blob = bucket.blob(f"{file_name}.csv")
blob.upload_from_string(data=df.to_csv(index=False), content_type = "text/csv")
def process(event, context):
svc = LoadToStorage(event,context)
message = svc.get_message_data()
upload_df = svc.transform_payload_to_dataframe(message)
payload_timestamp = upload_df['price_timestamp'].unique().tolist()[0]
svc.upload_to_bucket(upload_df,'capstone1-crypto-storage'+str(payload_timestamp))
| true
|
c72f2cb4a42f7c6132fac3ee5a7383ec51503729
|
Python
|
slawektestowy/czyst_selenium
|
/Amazontest.py
|
UTF-8
| 1,067
| 2.75
| 3
|
[] |
no_license
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import time
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("https://www.amazon.com/")
driver.maximize_window()
# rozne metody lokalizacji elemntow
#driver.find_element_by_xpath('//*[@id="a-autoid-0-announce"]').click()
# driver.find_element_by_id('a-autoid-0-announce')
# driver.find_element_by_link_text('Shop now')
#driver.find_element_by_xpath('//*[@id="asin-shoveler-ns_4C58EYS6PYTZ7ZYJ6W5Q_2024_"]/div[2]//li[3]/span[1]//img')
#a = driver.find_element_by_xpath('//*[@alt="Nintendo Switch - Neon Red and Neon Blue Joy-Con"]').get_attribute("src")
##print(a)
# for i in driver.find_elements_by_id("img"):
# print(i.src)
cv = (driver.find_elements_by_xpath('//link'))
print(type(cv))
b = len(driver.find_elements_by_xpath('//link')) # ilosc elemenow link na stronie
print(b)
########
# Sprawdzenie czy obraz znajdue sie na stronie:
#print(driver.find_element_by_xpath("//*[@alt='Cellphones']").get_attribute('naturalHeight'))
driver.close()
| true
|
71d826a7af222040708c816f939b4de81635e099
|
Python
|
fw1121/galaxy_tools
|
/transFIC_web/transFIC_web.py
|
UTF-8
| 4,402
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import requests
import pycurl
import os
from os.path import getsize
import argparse
import sys
import cStringIO
from functools import wraps
import tempfile
import shutil
import time
__url__ = "http://bg.upf.edu/transfic/taskService"
def stop_err(msg, err=1):
sys.stderr.write('%s\n' % msg)
sys.exit(err)
def retry(ExceptionToCheck, tries=12000000, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
#msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
msg = "Retrying in %d seconds..." % (mdelay)
if logger:
logger.warning(msg)
else:
# print msg
pass
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
class TransficUploader:
def __init__(self):
self.c = pycurl.Curl()
self.c.setopt(pycurl.URL, __url__)
self.c.setopt(pycurl.UPLOAD, 1)
try:
proxy = os.environ['http_proxy']
self.c.setopt(pycurl.PROXY, proxy)
except KeyError:
pass
self.c.setopt(pycurl.HTTPHEADER, ['Expect:'])
self.c.setopt(pycurl.UPLOAD, 1)
self.c.setopt(pycurl.NOPROGRESS, 1)
self.c.setopt(pycurl.USERAGENT, "curl/7.27.0")
self.c.setopt(pycurl.SSL_VERIFYPEER, 1)
self.c.setopt(pycurl.CUSTOMREQUEST, "PUT")
self.c.setopt(pycurl.TCP_NODELAY, 1)
self.buf = cStringIO.StringIO()
self.c.setopt(self.c.WRITEFUNCTION, self.buf.write)
def upload_file(self, filepath):
f = open(filepath)
self.c.setopt(pycurl.INFILE, f)
self.c.setopt(pycurl.INFILESIZE, getsize(filepath))
def run(self):
self.c.perform()
def get_url(self):
return self.buf.getvalue().strip()
@retry(requests.exceptions.HTTPError)
def result_exists(self, url):
download_request = requests.request("GET", url)
print download_request.text
if download_request.status_code == 404 or download_request.status_code == 500:
raise requests.HTTPError()
elif "Task status is : error" in download_request.text:
stop_err("No SNVs found!")
else:
return url
@retry(requests.exceptions.HTTPError)
def download_result(self, url, outpath):
tmp_dir = tempfile.mkdtemp()
r = requests.get(url, stream=True)
if r.status_code == 500:
raise requests.HTTPError()
else:
path = os.path.join(tmp_dir, "results.csv")
with open(path, 'wb') as f:
for chunk in r.iter_content(128):
f.write(chunk)
shutil.move(path, outpath)
shutil.rmtree(tmp_dir)
def main(params):
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args(params)
uploader = TransficUploader()
uploader.upload_file(args.input)
uploader.run()
url = uploader.get_url()
url = uploader.result_exists(url)
uploader.download_result(url, args.output)
if __name__ == "__main__":
main(sys.argv[1:])
| true
|
e0ff484ee170b218fa394ba9dd80d8c8ea12743c
|
Python
|
cordis/pycloudia
|
/pycloudia/services/channels.py
|
UTF-8
| 890
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
from pycloudia.services.beans import Channel
from pycloudia.services.interfaces import IServiceChannelFactory, IChannelsFactory
class ChannelsFactory(IChannelsFactory):
channel_cls = Channel
def create_by_address(self, service, address):
return self.channel_cls(service=service, address=address)
def create_by_runtime(self, service, runtime):
return self.channel_cls(service=service, runtime=runtime)
class ServiceChannelFactory(IServiceChannelFactory):
channels_factory = ChannelsFactory()
def __init__(self, service):
"""
:type service: C{str}
"""
self.service = service
def create_by_address(self, address):
return self.channels_factory.create_by_address(self.service, address)
def create_by_runtime(self, runtime):
return self.channels_factory.create_by_runtime(self.service, runtime)
| true
|
1163f980af22007ffa16cc6e3fc51d6a9930de7a
|
Python
|
luckydimdim/grokking
|
/in_place_reversal_of_a_linked_list/reverse_a_sub_list/main.py
|
UTF-8
| 2,722
| 4.1875
| 4
|
[] |
no_license
|
from __future__ import print_function
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def print_list(self):
temp = self
while temp is not None:
print(temp.value, end=" ")
temp = temp.next
print()
def reverse_sub_list2(head, p, q):
'''
Given the head of a LinkedList and two positions ‘p’ and ‘q’,
reverse the LinkedList from position ‘p’ to ‘q’.
'''
if p == q:
return head
curr, prev, counter = head, None, 0
while curr is not None and counter < p - 1:
counter += 1
prev = curr
curr = curr.next
last_node_of_first_part = prev
last_node_of_sublist = curr
while curr is not None and counter < q:
next = curr.next
curr.next = prev
prev = curr
curr = next
counter += 1
if last_node_of_first_part is not None:
last_node_of_first_part.next = prev
else:
head = prev
last_node_of_sublist.next = curr
return head
def reverse_sub_list3(head, p, q):
'''
Given the head of a LinkedList and two positions ‘p’ and ‘q’,
reverse the LinkedList from position ‘p’ to ‘q’.
'''
if p == q:
return head
prev, curr, counter = None, head, 0
while curr is not None and counter < p - 1:
prev = curr
curr = curr.next
counter += 1
tail_of_first_part = prev
tail_of_mid_part = curr
while curr is not None and counter < q:
next = curr.next
curr.next = prev
prev = curr
curr = next
counter += 1
if tail_of_first_part is not None:
tail_of_first_part.next = prev
else:
head = prev
tail_of_mid_part.next = curr
return head
def reverse_sub_list(head, p, q):
'''
Given the head of a LinkedList and two positions ‘p’ and ‘q’,
reverse the LinkedList from position ‘p’ to ‘q’.
'''
if p == q or head is None:
return head
counter, prev, curr = 0, None, head
while counter < p - 1 and curr is not None:
prev = curr
curr = curr.next
counter += 1
tail_of_first_half = prev
tail_of_second_half = curr
prev = None
while curr is not None and counter < q:
next = curr.next
curr.next = prev
prev = curr
curr = next
counter += 1
if tail_of_first_half is not None:
tail_of_first_half.next = prev
else:
head = prev
tail_of_second_half.next = curr
return head
def main():
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
print("Nodes of original LinkedList are: ", end='')
head.print_list()
result = reverse_sub_list(head, 2, 4)
print("Nodes of reversed LinkedList are: ", end='')
result.print_list()
main()
| true
|
ce8f4084254894d6a66a87e56c5a36196f5d86d0
|
Python
|
brian-green/User-Unmerge
|
/user_unmerge.py
|
UTF-8
| 1,996
| 2.609375
| 3
|
[] |
no_license
|
# Import modules
import requests
import json
# Authentication Data and Routes
url = 'https://SUBDOMAIN.zendesk.com/api/v2/users/SOURCE-USER-ID/tickets/requested.json'
user = 'user@email.com/token'
token = 'TOKEN'
print('Creating the Session')
s = requests.Session()
s.auth = (user, token)
s.headers = {'Content-Type':'application/json'}
print('Craeting the list of ticket to process')
ticket_list = []
while url:
response = s.get(url)
data = response.json()
for ticket in data['tickets']:
ticket_id_string = ticket['id']
ticket_list.append(ticket['id'])
url = data['next_page']
print('Done making ticket list.')
print('Making function')
def repost(list):
for ticket_id in ticket_list:
print('Getting ticket data')
ticket_data = s.get('https://SUBDOMAIN.zendesk.com/api/v2/tickets/' + str(ticket_id) + '.json')
ticket_data = ticket_data.json()
print('Modifying ticket data')
del ticket_data['ticket']['satisfaction_probability']
del ticket_data['ticket']['satisfaction_rating']
ticket_data['ticket']['requester_id'] = TARGET_USER_ID_INT
print('Getting comments')
comment_data = s.get('https://SUBDOMAIN.zendesk.com/api/v2/tickets/' + str(ticket_id) + '/comments.json')
comment_data = comment_data.json()
comments = comment_data['comments']
print('Making final payload')
ticket_data['comments'] = comments
ticket_data = json.dumps(ticket_data)
post = s.post('https://SUBDOMAIN.zendesk.com/api/v2/imports/tickets.json', data = ticket_data)
if post.status_code == 201:
print("Posted ticket #" + str(ticket_id))
else:
print("bad post")
print(post.status_code)
print(post.headers)
print(post.text)
print("-===========-")
print(json.dumps(ticket_data))
break
repost(ticket_list)
| true
|
b20ed704e5cb2f58c39aa7d6f85d9a08327f91f9
|
Python
|
walterwsmf/astroscripts
|
/astroscripts/mlstats.py
|
UTF-8
| 1,576
| 3.328125
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
"""
MLSTATS: MACHINE LEARNING AND STATISTICS ROUTINES
This package has an optimized set of functions for my daily work.
"""
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
import scipy #pearson correlation
import matplotlib.pyplot as plt
def rotate_axis(x):
mean_value = np.mean(x)
x = x - mean_value
x_new = -1 * x#invert the sign
return x_new + mean_value
def pearson_ica_test(ica_signal,original_signal,save_dir):
'''
Return Pearson Statistcs about which column in the ica output is
correlate with the main first principal component that corresponds
to the light curve transit.
Input:
ica_signal: pandas dataframe
original_signal: pandas dataframe
'''
pca = PCA(n_components=len(original_signal.columns))
H = pca.fit_transform(original_signal)
H = pd.DataFrame(H)
H.plot(grid=True)
print('Scatter 1st component = ',np.std(H[0]))
plt.title('PCA Components')
plt.savefig(save_dir+'PCA_components_.png')
plt.savefig(save_dir+'PCA_components_.pdf')
plt.close()
pearson,pvalue = np.zeros(ica_signal.shape[1]), np.zeros(ica_signal.shape[1])
component_id = 0
for i in range(ica_signal.shape[1]):
pearson[i], pvalue[i] = scipy.stats.pearsonr(H[0],ica_signal[i])
print(pearson[i], pvalue[i])
if abs(pearson[i]) == abs(pearson).max():
print('** Light curve on column = ',i,'\n')
component_id = i
else:
print('** Probabily, this is not the light curve \n')
return component_id
| true
|
8c99e8c53ee936db9ef0aaf7f0586711eb793f2d
|
Python
|
madhuri-majety/IK
|
/Leetcode/repeating_elements.py
|
UTF-8
| 3,185
| 4.53125
| 5
|
[] |
no_license
|
"""
You are given an array of n+2 elements. All elements of the array are in range 1 to n.
And all elements occur once except two numbers which occur twice. Find the two repeating numbers.
For example, array = {4, 2, 4, 5, 2, 3, 1} and n = 5
The above array has n + 2 = 7 elements with all elements occurring once except 2 and 4 which occur twice.
So the output should be 4 2.
"""
from collections import Counter
class Solution(object):
def print_repeating_using_hash_map(self, nums):
"""
In this method we use hash map to store the value and its count.
While updating the count check if count exceeds 1 and add to the resulting array
TC = O(N)
SC = O(N+k) K being the number of elements repeating
:param nums:
:return:
"""
count_to_map = {}
result = []
for i in range(len(nums)):
count_to_map[nums[i]] = count_to_map.get(nums[i], 0) + 1
if count_to_map[nums[i]] > 1:
result.append(nums[i])
print("print_repeating_using_hash_map(): {}".format(result))
def print_repeating_elems_using_counter_mod(self, nums):
"""
In this method we use counter module to map items to its count
Iterate through the list and see it's value in dictionary is greater than 1 and
append that to th result.
TC = O(N)
SC = O(N + K) K being the number of elements repeating
:param nums:
:return:
"""
c = Counter(nums)
print("Debug: Printing Counter object - {}".format(c))
result = []
for item in nums:
if c[item] > 1:
if item not in result:
result.append(item)
print("print_repeating_elems_using_counter_mod(): {}".format(result))
def print_repeating_elements_optimal(self, arr):
"""
As the assumption is that the elements in the array are within the index range, we can use the following logic
"If the elements are repeating, that means multiple indices will have same value. As the arr have elements that
matches the range of elements, we can traverse the array get the value of index which in turn can be used as
index to the same array. Marking the element as visited my turning that into negative and compare the abs value
to comparisions will give us the element being repeated
As we are not using extra space, this is the optimal solution
TC - O(N)
SC = O(1)
:param nums:
:return:
"""
result = []
for i in range(len(arr)):
# **** Notice the double indexing below *******
if arr[abs(arr[i])] > 0:
arr[abs(arr[i])] = -1 * arr[abs(arr[i])]
else:
result.append(abs(arr[i]))
print("print_repeating_elements_optimal() : {}".format(result))
def main():
arr = [4, 2, 4, 5, 2, 3, 1]
sol = Solution()
sol.print_repeating_using_hash_map(arr)
sol.print_repeating_elems_using_counter_mod(arr)
sol.print_repeating_elements_optimal(arr)
if __name__ == '__main__':
main()
| true
|
732566f8f2653a1eea3756bb15e9174d639bf28f
|
Python
|
Rupesh-1901/Python-Task-1
|
/prgm 2.py
|
UTF-8
| 219
| 4.0625
| 4
|
[] |
no_license
|
def findTrailingZeros(n):
if(n < 0):
return -1
count = 0
while(n >= 5):
n //= 5
count += n
return count
n = 100
print("Count of trailing 0s " +
"in 100! is", findTrailingZeros(n))
| true
|
cfe2c5d25071473240f55b81274f7ee850856035
|
Python
|
Jason-Yuan/Interview-Code
|
/CTCI/Python/Chapter1-7.py
|
UTF-8
| 3,067
| 3.859375
| 4
|
[] |
no_license
|
# define a print matrix method
def ShowMatrix(matrix):
for row in matrix:
print row
# end define
##############################################################################################################################
# Method 1
# Ideas: Loop each elements in the M*N matrix, and keep record the row number and column number the 0 element
# Loop another time and change the corresponding rows and columns to be 0
# Use two array to keep record
# Time Complexity: O(m*n)
# Space Complexity: O(m+n)
##############################################################################################################################
def SetMatrixZero1(matrix):
zero_col = []
zero_row = []
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if matrix[row][col] == 0:
zero_row.append(row)
zero_col.append(col)
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if row in zero_row or col in zero_col:
matrix[row][col] = 0
return matrix
##############################################################################################################################
# Method 2
# Ideas: Loop each elements in the M*N matrix, and keep record the row number and column number the 0 element
# Loop another time and change the corresponding rows and columns to be 0
# Use the first row and first column to keep record, and we only need to flag to indicate if first row
# and first column should all be zero
# e.g. if matrix[m][n] == 0 we set matrix[0][n] == 0 and matrix[m][0] == 0 since they will be 0 finally, the flag is
# used to indicate the other elemtnets in first row and first column should be 0 or not
# Time Complexity: O(m*n)
# Space Complexity: O(1)
##############################################################################################################################
def SetMatrixZero2(matrix):
first_row = False
first_col = False
for i in range(len(matrix)):
if matrix[i][0] == 0:
first_row = True
for j in range(len(matrix[0])):
if matrix[0][j] == 0:
first_col = True
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if matrix[row][col] == 0:
matrix[0][col] = 0
matrix[row][0] = 0
for row in range(1, len(matrix)):
for col in range(1, len(matrix[0])):
if matrix[row][0] == 0 or matrix[0][col] == 0:
matrix[row][col] = 0
if first_row:
for i in range(len(matrix)):
matrix[i][0] = 0;
if first_col:
for j in range(len(matrix[0])):
matrix[0][j] = 0;
return matrix
##############################################################################################################################
def main():
matrix1 = [ [1, 0, 3, 8, 9],
[3, 7, 8, 6, 1],
[4, 1, 3, 5, 0] ]
matrix2 = [ [1, 0, 3, 8, 9],
[3, 7, 8, 6, 1],
[4, 1, 3, 5, 0] ]
print "Set Matrix zero method 1:"
ShowMatrix(SetMatrixZero1(matrix1))
print "Set Matrix zero method 2:"
ShowMatrix(SetMatrixZero2(matrix2))
if __name__ == '__main__':
main()
| true
|
efe61a3ed1f9e52781ecf4ee202ab9ff971bf6b2
|
Python
|
TythonLee/lop
|
/Code/main_lop.py
|
UTF-8
| 7,288
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Main script for LOP
import unicodecsv as csv
import os
import numpy as np
# Hyperopt
import pickle
import time
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
# Select a model (path to the .py file)
# Two things define a model : it's architecture and the time granularity
from Models.Temporal_RBM.temporal_binary_rbm import train, save
model_name = u'Temporal_RBM'
temporal_granularity = u'frame_level'
# Log file
MAIN_DIR = os.getcwd().decode('utf8') + u'/'
log_file_path = MAIN_DIR + u'log'
# Build the matrix database (stored in a data.p file in Data) from a music XML database
database = MAIN_DIR + u'../Data/data.p'
# Set hyperparameters (can be a grid)
result_folder = MAIN_DIR + u'../Results/' + temporal_granularity + u'/' + model_name + u'/'
result_file = result_folder + u'results.csv'
# Config is set now, no need to modify source below for standard use
############################################################################
############################################################################
############################################################################
def average_dict(dico):
mean_dic = 0
counter = 0
for v in dico.itervalues():
counter += 1
mean_dic += np.mean(v)
return mean_dic / counter
# Init log_file
log_file = open(log_file_path, 'wb')
log_file.write((u'## LOG FILE : \n').encode('utf8'))
log_file.write((u'## Model : ' + model_name + '\n').encode('utf8'))
log_file.write((u'## Temporal granularity : ' + temporal_granularity + '\n').encode('utf8'))
# Check if the result folder exists
if not os.path.exists(result_folder):
os.makedirs(result_folder)
########################################################################
# Hyper parameters
########################################################################
# Import hyperparams from a csv file (config.csv) and run each row in this csv
hyper_parameters = {}
config_file_path = MAIN_DIR + u'Models/' + model_name + u'/config.csv'
with open(config_file_path, 'rb') as csvfile:
config_csv = csv.DictReader(csvfile, delimiter=',')
headers_config = config_csv.fieldnames
config_number = 0
for row in config_csv:
hyper_parameters[config_number] = row
config_number += 1
config_number_to_train = config_number
# Import from result.csv the alreday tested configurations in a dictionnary
checked_config = {}
headers_result = [u'index'] + headers_config + [u'precision', u'recall', u'accuracy']
config_number_trained = 0
RESULT_FILE_ALREADY_EXISTS = False
if os.path.isfile(result_file) and (os.stat(result_file).st_size > 0):
# File exists and is not empty
RESULT_FILE_ALREADY_EXISTS = True
with open(result_file, 'rb') as csvfile2:
result_csv = csv.DictReader(csvfile2, delimiter=',')
headers_result = result_csv.fieldnames
result_number = 0
for row in result_csv:
# Extract sub-dictionary from the result_dictionary
checked_config[result_number] = dict([(i, row[i]) for i in headers_config if i in row])
result_number += 1
config_number_trained = result_number
log_file.write((u'## Number of config to train : %d \n' % config_number_to_train).encode('utf8'))
log_file.write((u'## Number of config already trained : %d \n' % config_number_trained).encode('utf8'))
log_file.write((u'\n###############################################\n\n').encode('utf8'))
########################################################################
# Train & evaluate
########################################################################
# Train the model, looping over the hyperparameters configurations
config_train = 0
for config_hp in hyper_parameters.itervalues():
log_file.write((u'\n###############################################\n').encode('utf8'))
log_file.write((u'## Config ' + str(config_train) + '\n').encode('utf8'))
print((u'\n###############################################\n').encode('utf8'))
print((u'## Config ' + str(config_train) + '\n').encode('utf8'))
# Check the temporal granularity
if not temporal_granularity == config_hp[u'temporal_granularity']:
log_file.write(u"The temporal granularity in the folder name is not the same as the one announced in the config file\n".encode('utf8'))
print(u"The temporal granularity in the folder name is not the same as the one announced in the config file\n".encode('utf8'))
continue
# Before training for an hyperparam point, check if it has already been tested.
# If it's the case, values would be stored in an other CSV files (result.csv), with its performance
NO_RUN = False
for result_hp in checked_config.itervalues():
if result_hp == config_hp:
NO_RUN = True
break
if NO_RUN:
log_file.write((u"This config has already been tested\n").encode('utf8'))
print((u"This config has already been tested\n").encode('utf8'))
continue
log_file.close()
# Train the model
trained_model, record = train(config_hp, database, log_file_path)
##########
# This is extremly important to keep in mind that when using
# k-fold cross-validation, only the last 10th network is returned, hence
# it can't be used as the "best" network.
# The best way to get a generative network at the end of the training process
# is to get the set of hyper-parameters that obtained the best mean performance
# over the k fold and train it on the whole database this time (no valid, no test)
##########
##########
precision = record['precision']
recall = record['recall']
accuracy = record['accuracy']
# Write logs
log_file = open(log_file_path, 'ab')
log_file.write((u'\n## Performance : \n').encode('utf8'))
log_file.write((u' Precision = {}\n'.format(average_dict(precision))).encode('utf8'))
log_file.write((u' Recall = {}\n'.format(average_dict(recall))).encode('utf8'))
log_file.write((u' Accuracy = {}\n\n'.format(average_dict(accuracy))).encode('utf8'))
# Store results in the configuration dictionary
config_hp[u'precision'] = 100 * average_dict(precision)
config_hp[u'recall'] = 100 * average_dict(recall)
config_hp[u'accuracy'] = 100 * average_dict(accuracy)
# Keep count of the number of config trained
config_index = config_number_trained + config_train # Index of the config
config_train += 1
# Index config
config_hp[u'index'] = config_index
# Store the net in a csv file
save_net_path = result_folder + unicode(str(config_index)) + u'/'
# Save the structure in a folder (csv files)
save(trained_model, save_net_path)
if not RESULT_FILE_ALREADY_EXISTS:
with open(result_file, 'ab') as csvfile:
# Write headers if they don't already exist
writerHead = csv.writer(csvfile, delimiter=',')
writerHead.writerow(headers_result)
RESULT_FILE_ALREADY_EXISTS = True
# Write the result in result.csv
with open(result_file, 'ab') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=headers_result)
count = 0
writer.writerow(config_hp)
log_file.close()
| true
|
def689badce47a93def3a4c75e36b117f0d2cdb0
|
Python
|
tushar176/Notepad-plus
|
/linebar.py
|
UTF-8
| 562
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
from tkinter import Text
'''here only gui of line bar is implemented its functionality is in statusbar class
dus to some reasons(if here functionality is defined then Allignment and calling made difficult)'''
class Linebar:
def __init__(self,main_application):
self.root=main_application
self.line_number_bar = Text(self.root,width=2, padx=3, takefocus=1,font=('Arial',18,'normal'),
border=0,background='DarkOliveGreen1', state='disabled', wrap='none')
self.line_number_bar.pack(side='left', fill='y')
| true
|
624e661a0d641cadb43f36fa141dd35bcb1120ab
|
Python
|
jesuswr/cp-codes-and-problems
|
/RPC_1_2022/aux.py
|
UTF-8
| 132
| 3.046875
| 3
|
[] |
no_license
|
import random
import string
X = 200000
s = ''.join(random.choice("abcdefghijklmnopqrstuvwxyz") for x in range(X))
print(s)
print(0)
| true
|
84005ca9c07783dc0aaf16a558470fb834f947c4
|
Python
|
Bzyli/PythonTradingShit
|
/TradingBotV3.py
|
UTF-8
| 1,478
| 3
| 3
|
[] |
no_license
|
from Wallet import *
from ApiGetter import *
def get_variation():
if not is_it_possible(): return
if get_values()[0] > get_values()[1] > get_values()[2]: # Not stonks
return 0
elif get_values()[0] < get_values()[1] < get_values()[2]: # Stonks
return 1
elif get_values()[0] > get_values()[1] < get_values()[2]: # Curve Draws a V
return 2
elif get_values()[0] < get_values()[1] > get_values()[2]: # Curve Draws a "Mountain"
return 3
else:
return 4
def is_it_possible():
if wallet.get_eur() <= 0:
return False
def action_order(action, amount):
if action == 0: wallet.buy(amount)
elif action == 1: wallet.sell(amount)
elif action == 2: wallet.buy(amount)
elif action == 3: wallet.sell(amount)
elif action == 4: print("Holding...")
def main():
while True:
action_order(get_variation(), 10)
wallet.get_sum()
sleep(delay)
def test():
wallet = Wallet()
delay = int(input("At what rate do you wanna trade in seconds"))
wallet.buy(float(input("How much do you wanna invest")))
wallet.set_eur(0)
get_initial_values(delay)
main()
if __name__ == "__main__":
wallet = Wallet()
delay = int(input("At what rate do you wanna trade in seconds"))
wallet.buy(float(input("How much do you wanna invest")))
wallet.set_eur(0)
get_initial_values(delay)
main()
| true
|
98d504f1aa5a2b0be4576b543986a18bd545d895
|
Python
|
esrabozkurt/programlama
|
/fonksiyonlar2-4.soru.py
|
UTF-8
| 1,270
| 3.640625
| 4
|
[] |
no_license
|
def donemBasi(koltuk,yatak,dolap):
stok=koltuk+yatak+dolap
global donemBasi
return stok
def donemSonu (satilanKoltuk=25,satilanYatak=20,satilanDolap=10,alinanKoltuk=10,alinanYatak=15,alinanDolap=5):
stokSon=(satilanKoltuk+satilanYatak+satilanDolap)-(alinanKoltuk+alinanYatak+alinanDolap)
global donemSonu
return stokSon
def ortalama (donemBasiStok,donemSonuStok,donem=2):
ortalamaStok=(donemBasiStok+donemSonuStok)/donem
global ortalama
return ortalamaStok
a=int(input("Dönem Başı Koltuk Sayısını Giriniz:"))
b=int(input("Dönem Başı Yatak Sayısını Giriniz:"))
c=int(input("Dönem Başı Dolap Sayısını Giriniz:"))
x=donemBasi(a,b,c)
y=x-(donemSonu (satilanKoltuk=25,satilanYatak=20,satilanDolap=10,alinanKoltuk=10,alinanYatak=15,alinanDolap=5))
z=ortalama(x,y)
print("Dönem Başı Stok Durumunuz",x)
print("Dönem İçi Satılan Koltuk Sayısı=25")
print("Dönem İçi Satılan Yatak Sayısı=20")
print("Dönem İçi Satılan Dolap Sayısı=10")
print("Dönem İçi Alınan Koltuk Sayısı=10")
print("Dönem İçi Alınan Yatak Sayısı=15")
print("Dönem İçi Alınan Dolap Sayısı=5")
print("Dönem Sonu Stok Durumunuz",y)
print("Yıllık Ortalama Stok Durumunuz=",z)
| true
|
66a1c503f5f82ef528fb69a9a7e186933fabfe7d
|
Python
|
spenceslx/code_dump
|
/EECE5698/Assignment1/TextAnalyzer.py
|
UTF-8
| 5,364
| 3.421875
| 3
|
[] |
no_license
|
import sys
import argparse
import numpy as np
from pyspark import SparkContext
def toLowerCase(s):
""" Convert a sting to lowercase. E.g., 'BaNaNa' becomes 'banana'
"""
return s.lower()
def stripNonAlpha(s):
""" Remove non alphabetic characters. E.g. 'B:a,n+a1n$a' becomes 'Banana' """
return ''.join([c for c in s if c.isalpha()])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Text Analysis through TFIDF computation',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('mode', help='Mode of operation',choices=['TF','IDF','TFIDF','SIM','TOP'])
parser.add_argument('input', help='Input file or list of files.')
parser.add_argument('output', help='File in which output is stored')
parser.add_argument('--master',default="local[20]",help="Spark Master")
parser.add_argument('--idfvalues',type=str,default="idf", help='File/directory containing IDF values. Used in TFIDF mode to compute TFIDF')
parser.add_argument('--other',type=str,help = 'Score to which input score is to be compared. Used in SIM mode')
args = parser.parse_args()
sc = SparkContext(args.master, 'Text Analysis')
if args.mode=='TF':
# Read text file at args.input, compute TF of each term,
# and store result in file args.output. All terms are first converted to
# lowercase, and have non alphabetic characters removed
# (i.e., 'Ba,Na:Na.123' and 'banana' count as the same term). Empty strings, i.e., ""
# are also removed
textrdd = sc.textFile(args.input)
textrdd.flatMap(lambda line: line.split())\
.map(toLowerCase)\
.map(stripNonAlpha)\
.map(lambda word: (word, 1))\
.reduceByKey(lambda val1, val2: val1 + val2)\
.filter(lambda pair: pair[0] != '')\
.saveAsTextFile(args.output)
if args.mode=='TOP':
# Read file at args.input, comprizing strings representing pairs of the form (TERM,VAL),
# where TERM is a string and VAL is a numeric value. Find the pairs with the top 20 values,
# and store result in args.output
outputfile = open(args.output, 'w')
top20 = sc.textFile(args.input)\
.map(eval)
.takeOrdered(20,lambda pair: -pair[1])
#print each top20 in the given output file
for element in top20:
print>>outputfile, element
if args.mode=='IDF':
# Read list of files from args.input, compute IDF of each term,
# and store result in file args.output. All terms are first converted to
# lowercase, and have non alphabetic characters removed
# (i.e., 'Ba,Na:Na.123' and 'banana' count as the same term). Empty strings ""
# are removed
#so far makes partitions for word frequency for each word in a document
dirrdd = sc.wholeTextFiles(args.input)
num_tf = len(set(dirrdd.keys().collect()))
dirrdd.flatMapValues(lambda line: line.split())\
.mapValues(toLowerCase)\
.mapValues(stripNonAlpha)\
.filter(lambda word: word[1] != '')\
.map(lambda (tf,word): (word,tf))\
.combineByKey(lambda tf: [tf],\
lambda l, tf: l + [tf],\
lambda l1, l2: l1 + l2)\
.map(lambda (word,l): (word,np.log(num_tf/(1.*len(set(l))))))\
.saveAsTextFile(args.output)
if args.mode=='TFIDF':
# Read TF scores from file args.input the IDF scores from file args.idfvalues,
# compute TFIDF score, and store it in file args.output. Both input files contain
# strings representing pairs of the form (TERM,VAL),
# where TERM is a lowercase letter-only string and VAL is a numeric value.
wf = sc.textFile(args.input)\
.map(eval)
idf = sc.textFile(args.idfvalues)\
.map(eval)
wf.join(idf)\
.mapValues(lambda (freq, idf): int(freq)*float(idf))\
.sortBy(lambda (word, tfidf): -tfidf)\
.saveAsTextFile(args.output)
if args.mode=='SIM':
# Read scores from file args.input the scores from file args.other,
# compute the cosine similarity between them, and store it in file args.output. Both input files contain
# strings representing pairs of the form (TERM,VAL),
# where TERM is a lowercase, letter-only string and VAL is a numeric value.
outputfile = open(args.output, 'w')
tfidf1 = sc.textFile(args.input)\
.map(eval)
tfidf2 = sc.textFile(args.other)\
.map(eval)
numerator = tfidf1.join(tfidf2)\
.values()\
.map(lambda (tfidf1_val, tfidf2_val): tfidf1_val*tfidf2_val)\
.reduce(lambda val1, val2: val1+val2)
tfidf1_sumsq = tfidf1.values()\
.map(lambda val: val**2)\
.reduce(lambda val1, val2: val1+val2)
tfidf2_sumsq = tfidf2.values()\
.map(lambda val: val**2)\
.reduce(lambda val1, val2: val1+val2)
print>>outputfile, numerator/np.sqrt(tfidf1_sumsq*tfidf2_sumsq)
| true
|
d18922a099127b32e0c487bc6d746832d6aa9ce7
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_201/1885.py
|
UTF-8
| 1,926
| 3.5625
| 4
|
[] |
no_license
|
"""
Created on 08/04/2017
@author: Dos
Problem C.
https://code.google.com/codejam/contest/3264486/dashboard#s=p2
***Sample***
Input
5
4 2
5 2
6 2
1000 1000
1000 1
Output
Case #1: 1 0
Case #2: 1 0
Case #3: 1 1
Case #4: 0 0
Case #5: 500 499
"""
def read_word(f):
return next(f).strip()
def read_int(f, b=10):
return int(read_word(f), b)
def read_words(f, d=' '):
return read_word(f).split(d)
def read_ints(f, b=10, d=' '):
return [int(x, b) for x in read_words(f, d)]
def read_decimals(f, d=' '):
return [float(x) for x in read_words(f, d)]
def solve(case, **kwargs):
# get problem data
N = kwargs['N']
K = kwargs['K']
l = r = 0
acc = [N]
# print "acc", acc
for _ in range(K):
m = acc.pop()
m -= 1
l = m / 2
r = m / 2 + (m % 2)
if l:
acc.append(l)
if r:
acc.append(r)
acc = sorted(acc)
# print "acc", acc, l, r
return "Case #{}: {} {}\n".format(case, max(l, r), min(l, r))
# INPUT_FILE_NAME = "C-sample.in"
INPUT_FILE_NAME = "C-small-1-attempt2.in"
# INPUT_FILE_NAME = "C-large.in"
# OUTPUT_FILE_NAME = "C-sample.out"
OUTPUT_FILE_NAME = "C-small-1-attempt2.out"
# OUTPUT_FILE_NAME = "C-large.out"
if __name__ == '__main__':
# create I/O files
input_file = open(INPUT_FILE_NAME, 'r')
output_file = open(OUTPUT_FILE_NAME, "w")
# read file size
T = read_int(input_file)
print("\nThere are %d cases to solve! :)\n" % T)
# iterate on each case
for case in xrange(1, T+1):
# read input args
line_1 = read_ints(input_file)
w1 = int(line_1[0])
w2 = int(line_1[1])
args = {'N': w1, 'K': w2}
print("Input #{}:\n{}".format(case, args))
out = solve(case, **args)
print(out)
output_file.write(out)
# close I/O files
input_file.close()
output_file.close()
| true
|
6529a7c8148cddb70d7d7c90db05eee5d13862a5
|
Python
|
ebenp/adventcode
|
/code/adventday2.py
|
UTF-8
| 1,051
| 3.078125
| 3
|
[] |
no_license
|
#http://adventofcode.com/day/2
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object)
from future import standard_library
standard_library.install_aliases()
if __name__ == '__main__':
import numpy as np
l=[]
w=[]
h=[]
data=[]
with open('/Users/eben/Desktop/input2.txt', "r") as myfile:
data = [line.rstrip('\n') for line in myfile]
for item in data:
itemint=[int(x) for x in item.split('x')]
l.append(itemint[0])
w.append(itemint[1])
h.append(itemint[2])
#convert to numpy
l=np.array(l)
w=np.asarray(w)
h=np.asarray(h)
#calculate sides
s1=l*w
s2=w*h
s3=h*l
#zip (dstack) together
s=np.dstack((s1,s2,s3))
sides=np.array(s[0,:])
slack=[min(x) for x in sides]
#determine final square footage
sq_ft=s1*2+s2*2+s3*2+slack
#print to screen
print('Sum of square ft: '+str(np.sum([sq_ft])))
| true
|
6bf301c7e624ab205d87eca126314a0b7b36f922
|
Python
|
kttaroha/AtCoder
|
/src/ABC0xx/ABC02x/ABC023/ABC023D.py
|
UTF-8
| 772
| 3.171875
| 3
|
[] |
no_license
|
def main():
N = int(input())
A = [list(map(int, input().split())) for _ in range(N)]
max_h = sorted(A, reverse=True, key=lambda x: x[0])[0][0]
max_s = sorted(A, reverse=True, key=lambda x: x[1])[0][1]
left = max_h - 1
right = max_h + max_s*N + 1
while abs(left-right) > 1:
mid = (left + right) // 2
if is_satisfied(A, mid):
right = mid
else:
left = mid
print(right)
def is_satisfied(A, x):
time_req = []
for a in A:
time_req.append(((x-a[0])/a[1]))
time_req = sorted(time_req)
time_curr = 0
for t in time_req:
if t >= time_curr:
time_curr += 1
else:
return False
return True
if __name__ == '__main__':
main()
| true
|
58e872467b1bf406c6231b9972bd693dbfefdd54
|
Python
|
calispotato/python-1
|
/triva
|
UTF-8
| 1,139
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import colors as c
from utils import ask
print(c.red + 'welcome 2 trivia!*hint no capital letters.' + c.reset)
print(c.orange + 'control c to quit' + c.reset)
def ask(question):
print(question)
answer = input(c.green + '> ')
print(c.reset)
return answer
def q1():
answer = ask('who is on the two dollar bill?')
if answer == 'thomas jefferson':
print('correct')
return True
print('incorrect')
return False
def q2():
answer = ask('how many pawns are there on a chessboard?')
if answer == '16':
print('correct')
return True
print('incorrect')
return False
def q3():
answer = ask('what is the capital of fiji?')
if answer == 'suva':
print('correct')
return True
print('incorrect')
return False
def q4():
answer = ask('what is the smallest kind of star destroyer from the original star wars?')
if answer == 'victory class':
print('correct')
return True
print(c.yellow + 'congradulations you have finished trivia! yay!')
print('incorrect')
return False
q1()
q2()
q3()
q4()
| true
|
40aebb2c3b1214e76a9c21a7d5c547bac647325f
|
Python
|
sungjun-ever/algorithm
|
/baekjoon/bj_1406.py
|
UTF-8
| 483
| 3.078125
| 3
|
[] |
no_license
|
import sys
stk = list(sys.stdin.readline().strip())
M = int(input())
temp_stk = []
for _ in range(M):
menu = sys.stdin.readline().strip().split()
if menu[0] == 'L':
if stk:
temp_stk.append(stk.pop())
elif menu[0] == 'D':
if temp_stk:
stk.append(temp_stk.pop())
elif menu[0] == 'B':
if stk:
stk.pop()
elif menu[0] == 'P':
stk.append(menu[1])
print(''.join(stk + list(reversed(temp_stk))))
| true
|
65c6fae3a0aa4e60c7ae794916050bced04ce0ac
|
Python
|
eurodev/conferences
|
/liveinstaller/gee3
|
UTF-8
| 1,788
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/python
# G u a d a l i n e x E a s t e r E g g
# by Alfonso E.M.
# Free (GPL) but a bit obfuscated code :-)
def stuff():
return '''
I05vbmUsNDAsMTAwLDIwMCwxMDAKI05vbmUsMzIsMTAwLDE1MCwxMDAKI05vbmUsMjgsOTAsMTIw
LDkwCiNOb25lLDI4LDEyMCwxMDAsMTAwCiNOb25lLDYwLDIwMCwyMDAsMjAwCgpAMApHdWFkYWxp
bmV4IFYzCgoKCkAxCkNhc3QKCkAxCkVtZXJneWEgVW5pdApAMgoKQW50b25pbyBPbG1vCkd1bWVy
c2luZG8gQ29yb25lbApDYXJsb3MgUGFycmEKCkAxCkludGVyYWN0b3JzIFVuaXQKQDIKCkphdmll
ciBMaW5hcmVzCkphdmllciBDYXJyYW56YQpKdWFuIEplc/pzIE9qZWRhClF1aW0gR2lsCgpAMQpZ
YWNvIFVuaXQKQDIKCkphdmllciBWafF1YWxlcyAiVmlndSIKVGVvZmlsbyBSdWl6CkRhbmllbCBD
YXJyafNuCkdhYnJpZWwgUm9kcu1ndWV6ICJDaGV3aWUiCgpAMQpDYW5vbmljYWwgVW5pdApAMgoK
Q29sbGluIFdhdHNvbgpNYXR0IFppbW1lcm1hbgoKQDEKVGVsZWbzbmljYSBVbml0CkAyCgpBbHZh
cm8gU2FpbnotUGFyZG8KVmljZW50ZSBKLiBSdWl6CgpAMQpTQURFU0kgVW5pdApAMgoKTWFyaW8g
R2Fs4W4KTHVpcyBNYXlvcmFsCk1hcmlvIFBpbm8KCkAxCkRpcmVjdGVkIGJ5CkAyCgpBbGZvbnNv
IGRlIENhbGEKCgpAMQpDb0RpcmVjdG9yCkAyCgpSYWZhZWwgTWFydO1uIGRlIEFnYXIKCgpAMQpC
ZXN0IEJveQpAMgoKRnJhbmNpc2NvIFl1c3RlCgoKQDEKQmVzdCBHaXJsCkAzCgpMZXRpY2lhIEdl
c3Rvc28KCgpAMQpDb25zdWx0YW50CkAyCgpSb2JlcnRvIE1hamFkYXMKCgpAMQpTdHVudHMKQDIK
CmFsZmFiZXQKc2VtYW51ZQp0cnVua3MKYWR2b2NhdHV4Ck1hcmR1awpJbmRpb0NhYnJlYW8KZ3Vh
ZGFmYW4KYmFzb2FsdG8KZXJmbGFrbwptYXJpbGxpb24KbWFycmFqbwpjaGVpdG96egpKb2VMdWkK
YW1iaWVudGFsClRyaWFOCgoKQDEKQ29zdHVtZSBEZXNpZ25lcgpAMgoKTG9zdEJpdC5uZXQKCgpA
MQpBcnQgRGVwYXJ0bWVudApAMgoKSm9zZSBBLiBHaWwgR2FsaWFubwoKCkAxClNldCBEZWNvcmF0
aW9uIGJ5CkAzCgpDaW50YSBDYXN0aWxsYQoKCkAxCkNhcnBlbnRlcnMKQDIKCkRlYmlhbiBEZXZl
bG9wZXJzCgoKQDEKU3BlY2lhbCBFZmZlY3RzCkAyCgpUaGUgVWJ1bnR1IENvbW11bml0eQoKCkAx
ClNvbmdzCgpAMgoiR3VhZGFsaW5leCBLaWxsZWQgVGhlIEhhc2Vmcm9jaCBTdGFyIgpQZXJmb3Jt
ZWQgYnkgWGFwaSBhbmQgdGhlIFdhdGVybWFpbnMKCiJEb24ndCBQaHVuY2sgV2l0aCBNeSBIYXQi
ClBlcmZvcm1lZCBieSBQaGVkb3JhIEphenogQmFuZAoKIkdyZWVuLUdyZWVuLiBZZWxsb3cgPyBQ
aW5rICEiClBlcmZvcm1lZCBieSBUaGUgUGhyZWFrZXIgQm95cwoKIkVudGVyIFRoZSBGbGFtaW5n
byAoQmV0dGVyIHRvIHJ1biBhd2F5KSIKUGVyZm9ybWVkIGJ5IFR4dVR4ZQoKCgpAMQpFeGVjdXRp
dmUgUHJvZHVjZXJzCkAyCgpNYW51ZWwgTWFydO1uIE1hdGEKSnVhbiBDb25kZQpKb3NlIFNhbG1l
cvNuCgoKQDEKQXNzb2NpYXRlIFByb2R1Y2VyCkAyCgpWaWN0b3IgRmVybuFuZGV6CgoKQDEKUHJv
ZHVjZWQgYnkKQDIKCkxhIEp1bnRhIGRlIEFuZGFsdWPtYQoKCgpAMgpSZWNvcmRlZCBpbiAiRWwg
U2VuZGVybyIgU3R1ZGlvcwoKCgoiQW5kYXR1eiIgY3JlYXR1cmUgZGVzaWduZWQgYnkKSm9zZSBB
LiBHaWwgR2FsaWFubwoKCl9fXwoKCkNvcHlsZWZ0IDIwMDUgSnVudGEgZGUgQW5kYWx1Y+1hIFBy
b2R1Y3Rpb25zCkZpbG1lZCBpbiBNZXRhZGlzdHJvdmlzaW9uCgpfX18KCgoKCgpObyBhbmltYWxz
IGhhdmUgYmVlbiBoYXJtZWQgZHVyaW5nIAp0aGUgbWFraW5nIG9mIHRoaXMgd29uZGVyZnVsIGRp
c3RyaWJ1dGlvbgoKCgoKCkAxClRoYW5rcyB0bwpAMgoKSWFuIE11cmRvY2sgZm9yIGludmVudGlu
ZyBEZWJpYW4KCk1hcmsgU2h1dHRsZXdvcnRoIGZvciBjb21pbmcgZG93biB0byBFYXJ0aCAKYW5k
IHNwb25zb3JpbmcgVWJ1bnR1LgoKTGluRXgsIExsaXVyZXgsIENhdGl4IGFuZCBNb2xpbnV4IChh
bmQgbWFueSBvdGhlcnMpCmZvciBzdXBwb3J0aW5nIGZyZWUgc29mdHdhcmUKCk91ciBnaXJsZnJp
ZW5kcyBhbmQgd2l2ZXMgZm9yIHN0YW5kaW5nIGJ5IHVzCgoKCgoKU3BlY2lhbCBUaGFua3MgdG8g
RXZlcnkgR3VhZGFsaW5leCB1c2VyIApmb3IgYmV0YS10ZXN0aW5nLCAKc2hhcmluZyBrbm93bGVk
Z2UgaW4gdGhlIGZvcnVtcywKYW5kIHRydXN0aW5nIHVzCgpWZXJ5IFNwZWNpYWwgVGhhbmtzIHRv
IE1hcmsgVHlsZXIKZm9yIGhpcyB3b25kZXJmdWwgY3V0ZSBkcmF3aW5nIHNvZnR3YXJlCk1UUEFJ
TlQgUk9DS1MgIQoKQW5kIHRvIE1hcmlvIE1hcnRpbgpmb3IgR3VhZGFsaW5leCBNaW5pIENvbnRy
b2wgQ2VudGVyIApldmVuIHdoZW4gaXQncyBwcm9ncmFtbWVkIGluIFBhc2NhbAoKCgoKCkVuam95
IEd1YWRhbGluZXggIQoKCgoKCgoKCgoKCgoKCgoKCgoKCgpANApUIEggRSAgIEUgTiBEIApAMgoK
CgoKCgoKCgoKCgoKCgoKCgoKCgpOby4gVGhlcmUgaXMgbm8gbW9yZSB0aXRsZXMuCgoKCgoKCgoK
CgoKCgoKCgoKCgoKCgpTdXJlICEKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCldoYXQgcGFydCBvZiAi
Tm8iIGRpZCB5b3UgbWlzdW5kZXJzdG9vZCA/CgoKCgoKCgoKCgoKCgoKCgoKCgoKCgpPaywgaGVy
ZSB5b3UgaGF2ZSB0aGUgbGFzdCB0aXRsZToKCgoKCgoKCgoKCgpTZWUgeW91IGluIEd1YWRhbGlu
ZXggVjQ6IAoiVGhlIFJldmVuZ2Ugb2YgUmVkIFJlZGhvIiAKCgoKCgoKQDQKOy0pCkAyCg==
'''
import pygame,sys,base64
from pygame.locals import *
bgcolor=(0,0,0)
size=width,height=640,480
delay=30
credits=base64.decodestring(stuff()).split("\n")
pygame.init()
pygame.display.set_caption('Guadalinex Credits')
pygame.mouse.set_visible(0)
screen=pygame.display.set_mode(size)
screen.fill(bgcolor)
vpos=height
renderedcredits=[]
fontcolor=(100,200,100)
fontsize=36
fontfamily=None
linefont=pygame.font.Font(fontfamily,fontsize)
linefonts=[]
fontcolors=[]
for line in credits:
line=line.rstrip("\n")
if line.startswith("#"):
name,size,red,green,blue=line[1:].split(",")
c=(int(red),int(green),int(blue))
linefonts.append(pygame.font.Font(None,int(size)))
fontcolors.append(c)
elif line.startswith("@") and line[1:2].isdigit():
n=int(line[1:2])
linefont=linefonts[n]
fontcolor=fontcolors[n]
else:
text = linefont.render(line, 1, fontcolor)
textpos=text.get_rect().move(width/2 - text.get_width()/2, vpos)
vpos=vpos+text.get_height()
renderedcredits.append(text)
vpos=height
while 1:
vpos=vpos-1
linevpos=vpos
screen.fill(bgcolor)
for line in renderedcredits:
linepos = line.get_rect().move(width/2-line.get_width()/2,linevpos)
if linepos > 0 and linevpos < height:
screen.blit(line, linepos)
linevpos=linevpos+text.get_height()
pygame.display.flip()
pygame.time.delay(delay)
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN:
keyboard=pygame.key.get_pressed()
if keyboard[K_s] == 1:
vpos=height
elif keyboard[K_g] == 1:
if delay != 30:
delay = 30
else:
delay = 1
else:
sys.exit()
| true
|
1363ece8d9caa8d840ab1cb79f23b02ebefc3913
|
Python
|
umyuu/Sample
|
/src/Python3/Q109871/exsample_1.py
|
UTF-8
| 2,092
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: UTF-8 -*
from tkinter import ttk
from tkinter import *
import functools
class VolumeWindow(Toplevel):
def __init__(self, root):
super().__init__()
# ウィンドウを閉じたときのイベントを登録
self.protocol('WM_DELETE_WINDOW', functools.partial(self.on_window_exit, param=2))
self.volumes = Scale(self, {'label': 'volume', 'from_': 0, 'to': 100,
'length': 200, 'command': self.__on_changed_scale_value})
self.volumes.pack()
# 初期状態は非表示
self.set_visible(False)
def __on_changed_scale_value(self, event):
# Scale(スライダ)を移動時にイベント発生
pass
def on_window_exit(self, param):
# VolumeWindowウィンドウを閉じた時に呼ばれる処理
if param == 2:
self.set_visible(False)
def set_visible(self, visible):
# ウィンドウの表示を切り替える
# visible:True 表示,False 非表示
if visible:
self.deiconify()
else:
self.withdraw()
class App(object):
def __init__(self):
self.root = Tk()
self.root.title('Q109871')
# ウィンドウを閉じたときのイベントを登録
self.root.protocol('WM_DELETE_WINDOW', functools.partial(self.on_window_exit, param=1))
# ボリュームウィンドウを作成して、self.volume_windowに変数を保持
self.volume_window = VolumeWindow(self.root)
# ボタンクリック時はVolumeWindow#set_visibleをTrueで呼び出す。
self.volume_button = Button(self.root, text='Volume',
command=functools.partial(self.volume_window.set_visible, visible=True))
self.volume_button.pack()
def on_window_exit(self, param):
# sys.exitを呼び出し。
if param == 1:
sys.exit(0)
def run(self):
self.root.mainloop()
def main():
app = App()
app.run()
if __name__ == '__main__':
main()
| true
|
7feae52230578ca784ced1f4a0e8c1e78469d68d
|
Python
|
twtmiss/Spider
|
/廖雪峰爬虫/d2/request.py
|
UTF-8
| 2,087
| 3.203125
| 3
|
[] |
no_license
|
import requests
import urllib.request
import json
class RequestSpider(object):
def __init__(self):
url = "https://www.baidu.com"
header = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
}
#request 参数自动转译
self.response = requests.get(url, headers = header)
def run(self):
data_run = self.response.content
# 1. 获取请求头
request_header = self.response.headers
print("请求头:" + str(request_header))
# 2. 获取响应头
response_header = self.response.headers
print("响应头" + str(response_header))
# 3. 相应状态码
code = self.response.status_code
print("状态码" + str(code))
# 4. 请求的cookie
requests_cookie = self.response.request._cookies
print("请求cookie:" + str(requests_cookie))
#5. 响应的cookie
response_cookie = self.response.cookies
print("响应cookie:" + str(response_cookie))
def res_data(self):
response = requests.get(url)
#content 属性 返回类型是bytes
data_content = response.content
print(data_content)
#加上decode('utf-8) 返回类型 str
data_utf = response.content.decode('utf-8')
print(data_utf)
#text属性 返回类型是文本str
data_text = response.text
print(type(data_text))
def js(self):
url = "http://api.github.com/user"
header = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
}
response = requests.get(url,headers=header)
data = response.content.decode('utf-8')
#str -> dict
data_dict = json.loads(data)
print(data_dict)
#json()自动将json字符串转换成str
data = response.json()
print(data)
RequestSpider().js()
| true
|
0a4a334076d7f12b91f016c82d4b9a50817c4793
|
Python
|
armando555/Calculate-Sum-Of-SubMatrix
|
/CalculateSubMatrix.py
|
UTF-8
| 3,014
| 3.21875
| 3
|
[] |
no_license
|
import java.util.Scanner;
public class CalculateSubMatrix{
private int matrix [][]=
{{ 1, 2, 3, 4, 5 },
{ 6, 7, 8, 9, 10 },
{ 6, 3, 4, 6, 2 },
{ 7, 3, 1, 8, 3 },
{ 1, 5, 7, 9, 4 }
};
private int sum [][];
public CalculateSubMatrix (){
Scanner sc = new Scanner (System.in);
System.out.print("Enter de Rows Size ");
int rows = sc.nextInt();
System.out.print("Enter de Colums Size ");
int colums = sc.nextInt();
//matrix = new int[rows][colums];
sum = new int[rows][colums];
/* for (int i = 0;i<rows;i++){
for (int j =0;j<colums;j++){
matrix [i][j] = i;
}
}*/
calculateSum();
printMatrix();
coordinates();
System.out.println();
}
public static void main (String args[]){
CalculateSubMatrix csm = new CalculateSubMatrix();
}
public void calculateSum (){
sum [0][0] = matrix[0][0];
for (int i = 1;i<matrix.length;i++){
sum[i][0]=matrix[i][0]+sum[i-1][0];
}
for (int i = 1;i<matrix[0].length;i++){
sum[0][i]=matrix[0][i]+sum[0][i-1];
}
for (int i = 1;i<matrix.length;i++){
for (int j = 1;j<matrix[0].length;j++){
sum[i][j]= matrix[i][j]-sum[i-1][j-1]+sum[i-1][j]+sum[i][j-1];
}
}
}
public void printMatrix(){
for (int i =0;i<matrix.length;i++){
for (int j =0;j<matrix[0].length;j++){
System.out.print(matrix[i][j]+String.valueOf('\t'));
}
System.out.println();
}
System.out.println();
for (int i =0;i<matrix.length;i++){
for (int j =0;j<matrix[0].length;j++){
System.out.print(sum[i][j]+String.valueOf('\t'));
}
System.out.println();
}
}
public void coordinates(){
Scanner sc = new Scanner (System.in);
int i1,j1,i2,j2;
String cord1[],cord2[];
int sumReturn;
System.out.println("Continously, enter the range coordinates as pairs separating them with a ',' Ex: 0,1 ; 2,3 ; etc.");
System.out.println("enter the first Coordinate ");
String coord1 = sc.next();
System.out.println("enter the second Coordinate ");
String coord2 = sc.next();
cord1 = coord1.split(",");
cord2 = coord2.split(",");
i1 = Integer.parseInt(cord1[0]);
j1 = Integer.parseInt(cord1[1]);
i2 = Integer.parseInt(cord2[0]);
j2 = Integer.parseInt(cord2[1]);
if (i1==0&&j1==0){
sumReturn = matrix[i2][j2];
System.out.println(sumReturn);
return;
}
if (i2<i1||j2<j1){
System.err.println("Error");
}else {
if (i1==0){
sumReturn=sum[i2][j2]-sum[i2][j1-1];
System.out.println(sumReturn);
return;
}else {
if (j1==0){
sumReturn=sum[i2][j2]-sum[i1-1][j2];
System.out.println(sumReturn);
return;
}
}
}
if (i1!=0&&j2!=0) {
sumReturn = sum[i2][j2] - sum[i2][j1 - 1] - sum[i1 - 1][j2] + sum[i1 - 1][j1 - 1];
System.out.println(sumReturn);
}
}
}
| true
|
2a551e9989a32e8a5f1dba96269cde240e155e20
|
Python
|
EugenenZhou/leetcode
|
/countdigitone.py
|
UTF-8
| 2,363
| 3.953125
| 4
|
[] |
no_license
|
####################################################################
# 我们可以观察到每 1010 个数,个位上的’1’ 就会出现一次。
# 同样的,每 100100 个数,十位上的’1’ 就会出现一次。
# 这个规律可以用 (n/(i*10))*i(n/(i∗10))∗i 公式来表示。
# 同时,如果十位上的数是 ’1’,那么最后’1’ 的数量要加上 x+1,其中 x 是个位上的数值。
# 如果十位上的数大于’1’,那么十位上为’1’ 的所有的数都是符合要求的,这时候最后’1’ 的数量要加 10。
# 这个规律可以用公式 min(max((n mod (i*10))-i+1,0),i)来表示。
# 我们来看一个例子吧,有一个数 n = 1234。
# 个位上’1’的数量 = 1234/10 (对应 1,11,21,...1221) + min(4,1) (对应 1231) = 124
# 十位上’1’的数量 = (1234/100)*10 (对应 10,11,12,...,110,111,...1919) + min(21,10) (对应 1210,1211,...1219) = 130130
# 百位上’1’的数量 = (1234/1000)*100(对应 100,101,102,...,199) + min(135,100) (对应1100,1101...1199) = 200200
# 千位上’1’的数量 = (1234/10000)*10000(1234/10000)∗10000 + min(235,1000) (对应1000,1001,...1234) = 235235
# 因此,总数 = 124+130+200+235 = 689124+130+200+235=689。
# 作者:LeetCode
# 链接:https://leetcode-cn.com/problems/two-sum/solution/shu-zi-1-de-ge-shu-by-leetcode/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
####################################################################
def countDigitOne(n,method):
if method == 1:
if n <= 0:
return 0
elif n < 10:
return 1
else:
n = str(n)
a,b = int(n[0]),int(n[1:])
if a == 1:
return 1+b+countDigitOne(b,1)+countDigitOne(10**(len(n)-1)-1,1)
else:
return 10**(len(n)-1)+countDigitOne(b,1)+a*countDigitOne(10**(len(n)-1)-1,1)
elif method == 2:
if n <= 0:
return 0
elif n < 10:
return 1
else:
result = 0
for i in range(len(str(n))):
result= result + (n // (10**(i+1)))*(10**i) + min(10**i, n%(10**(i+1))-10**i+1 if n%(10**(i+1))-10**i+1 >=0 else 0)
return result
num=134
result=countDigitOne(num,method=2)
| true
|
208b5bb7dab4c751231b2695974c80ae51a27ad9
|
Python
|
marcinpanfil/advent-of-code
|
/2020/day08.py
|
UTF-8
| 3,023
| 3.171875
| 3
|
[] |
no_license
|
import copy
from file_utils import file_reader
class Operation:
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return self.name + " " + str(self.value)
def __repr__(self):
return self.name + " " + str(self.value)
def __eq__(self, other):
return self.name == other.name and self.value == self.value
def __hash__(self):
return hash(self.name) ^ hash(self.value)
def operate(operations):
accumulator = 0
cur_pos = 0
run_ops = []
while cur_pos < len(operations):
operation = operations[cur_pos]
if cur_pos in run_ops:
return accumulator
else:
run_ops.append(cur_pos)
if operation.name == 'nop':
cur_pos += 1
elif operation.name == 'acc':
accumulator += operation.value
cur_pos += 1
elif operation.name == 'jmp':
cur_pos += operation.value
def operate_after_change(operations):
accumulator = 0
cur_pos = 0
run_ops = []
while cur_pos < len(operations):
operation = operations[cur_pos]
if cur_pos in run_ops:
return False, -1
else:
run_ops.append(cur_pos)
if operation.name == 'nop':
cur_pos += 1
elif operation.name == 'acc':
accumulator += operation.value
cur_pos += 1
elif operation.name == 'jmp':
cur_pos += operation.value
return True, accumulator
def find_wrong_operations(operations):
idx = find_ops_to_change(operations)
for id in idx:
curr_ops = copy.deepcopy(operations)
if curr_ops[id].name == 'jmp':
curr_ops[id].name = 'nop'
elif curr_ops[id].name == 'nop':
curr_ops[id].name = 'jmp'
result = operate_after_change(curr_ops)
if result[0]:
return result[1]
return -1
def find_ops_to_change(operations):
idx = []
cur_id = 0
for op in operations:
if op.name == 'jmp' or op.name == 'nop':
idx.append(cur_id)
cur_id += 1
return idx
def parse_input(values):
operations = []
for value in values:
split = value.split(' ')
name = split[0]
count = int(split[1].replace('+', ''))
operations.append(Operation(name, count))
return operations
def solve_1():
values = file_reader.read_str_from_file('input/day08_input.txt')
operations = parse_input(values)
return operate(operations)
def solve_2():
values = file_reader.read_str_from_file('input/day08_input.txt')
operations = parse_input(values)
return find_wrong_operations(operations)
test_case_1 = '''nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6'''
# part 1
assert 5 == operate(parse_input(test_case_1.split('\n')))
assert 1337 == solve_1()
# part 2
assert 8 == find_wrong_operations(parse_input(test_case_1.split('\n')))
assert 1358 == solve_2()
| true
|
a06aabb6f5597bda39c8cb66087df9b0a96a9e5d
|
Python
|
rxia/Data_Incubator
|
/project/allrecipes_scraper.py
|
UTF-8
| 1,409
| 2.625
| 3
|
[] |
no_license
|
from recipe_scrapers import scrape_me
import pickle
import numpy as np
import time
data_allrecipes = []
for ID in np.arange(129001,299999):
try:
scrape_result = scrape_me('http://allrecipes.com/Recipe/{}'.format(ID))
recipe_i = {}
recipe_i['id'] = ID
recipe_i['title'] = scrape_result.title()
recipe_i['total_time'] = scrape_result.total_time()
recipe_i['ingredients'] = scrape_result.ingredients()
recipe_i['instruction'] = scrape_result.instructions()
recipe_i['links'] = scrape_result.links()
data_allrecipes.append(recipe_i)
except:
print('ID {} is not valid'.format(ID))
if ID%100==0:
print('at ID {} data length = {}'.format(ID,len(data_allrecipes)))
if ID%1000==0:
print('file saved at {}'.format(ID))
with open('project/data/scraped_data_allrecipes.pickle', 'wb') as f:
pickle.dump(data_allrecipes, f)
##
def getRecipeLinks(id):
page = requests.get('http://allrecipes.com/recipe/' + str(id))
tree = html.fromstring(page.text)
# I want to get the text in the src="text" in order to get the imagesource url.
imageSrcURL = tree.xpath('//img[@class="rec-photo"]/src')
# gets the file from the source url from allrecipes website
file = cStringIO.StringIO(urllib.urlopen(imageSrcURL).read())
# gets the image data
img = Image.open(file)
| true
|
0ee45f9e7879fc4f9144af61e7654db90f3f67cc
|
Python
|
alanbernstein/geometry
|
/font.py
|
UTF-8
| 7,523
| 2.96875
| 3
|
[] |
no_license
|
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from mpltools import unpack_plot_kwargs
# implement a vector font for use in laser designs
# lower case letters:
# y=0 baseline
# centered horizontally
# n-width = 1
# https://mzucker.github.io/2016/08/03/miniray.html
# https://github.com/cmiscm/leonsans/blob/master/src/font/lower.js
# https://github.com/cmiscm/leonsans
h = .75 # height of an "h" extender, above the base "o" circle
d = .75 # depth of a "q" descender, below the base "o" circle
class Path(object):
def plot(self, offset=None, radius=0, italic_angle=0, **plot_kwargs):
# offset: translation vector
# radius: extrude a circle of this radius along the path (requires combination of distance functions?)
# TODO: thickness via radius (bold)
# TODO: italics via shear
offset = offset or np.array([0, 0])
plot_kw = unpack_plot_kwargs(plot_kwargs)
plt.plot(offset[0] + self.x, offset[1] + self.y, **plot_kw)
def plot_aux_lines(self):
# used by plot_debug in child
plt.plot([-1, 1], [0, 0], '-.', color='gray')
plt.plot([-1, 1], [1, 1], '-.', color='gray')
plt.plot([-1, 1], [1+h, 1+h], '-.', color='gray')
plt.plot([-1, 1], [-d, -d], '-.', color='gray')
class Line(Path):
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.x = np.array([p1[0], p2[0]])
self.y = np.array([p1[1], p2[1]])
def plot_debug(self, **plot_kwargs):
plt.plot(self.x, self.y, 'k.')
self.plot_aux_lines()
self.plot(**plot_kwargs)
class Arc(Path):
def __init__(self, c, r, a1=None, a2=None):
# center, radius, angle start, angle end (both in fractions of pi)
self.c = c
self.r = r
self.a1 = a1 or 0
self.a2 = a2 or 2
circ = self.r*np.exp(np.pi*1j*np.linspace(self.a1, self.a2, 65))
self.x = circ.real + self.c[0]
self.y = circ.imag + self.c[1]
def plot_debug(self, **plot_kwargs):
if self.a1 != 0 or self.a2 != 2:
circ = self.r*np.exp(np.pi*1j*np.linspace(0, 2, 65))
plt.plot(circ.real + self.c[0], circ.imag + self.c[1], '--', color='gray')
plt.plot(self.c[0], self.c[1], 'k.')
self.plot_aux_lines()
self.plot(**plot_kwargs)
a2 = -.25 # the beginning angle of the arc on the 2 TODO: ensure arc-line joint is continuous slope
chars = {
# character: list of lines/arcs
' ': [],
'0': [Arc([0, 0.5], 0.5)],
'1': [Line([-.5, 0], [.5, 0]), Line([0, 0], [0, 1+h]), Line([0, 1+h], [-h/2, 1+h/2])],
'2': [Arc([0, .5+h], .5, a2, 1), Line([.5*np.cos(a2*np.pi), .5+h+.5*np.sin(a2*np.pi)], [-.5, 0]), Line([-.5, 0], [.5, 0])],
'3': [Line([-.5, 1+h], [.5, 1+h]), Line([.5, 1+h], [0, 1]), Arc([0, .5], .5, -1, .5)],
'4': [Line([.5, 0], [.5, 1+h]), Line([.5, 1+h], [-.5, .5]), Line([-.5, .5], [.5, .5])],
'5': [Line([.5, 1+h], [-.5, 1+h]), Line([-.5, 1+h], [-.5, 1]), Line([-.5, 1], [0, 1]), Arc([0, .5], .5, -1, .5)],
'6': [Line([-.5, .5], [-.5, 1+h-.5]), Arc([0, .5], .5), Arc([0, 1+h-.5], .5, 0, 1)],
'7': [Line([-.5, 0], [.5, 1+h]), Line([.5, 1+h], [-.5, 1+h])],
'8': [Arc([0, .5], .5), Arc([0, 1+h/2], h/2)],
'9': [Line([.5, .5], [.5, 1+h-.5]), Arc([0, 1+h-.5], .5), Arc([0, .5], .5, -1, 0.01)], # TODO: why doesnt a2=0 work?
'a': [Line([0.5, 0], [0.5, 1]), Arc([0, 0.5], 0.5)],
'b': [Line([-0.5, 0], [-0.5, 1+h]), Arc([0, 0.5], 0.5)],
'c': [Arc([0, 0.5], 0.5, 1/4, 7/4)],
'd': [Line([0.5, 0], [0.5, 1+h]), Arc([0, 0.5], 0.5)],
'e': [Line([-0.5, 0.5], [0.5, 0.5]), Arc([0, 0.5], 0.5, 0, 7/4)],
'f': [Arc([0, 1+h-.25], .25, .25, 1), Line([-.25, 1+h-.25], [-.25, 0]), Line([-.5, 1], [0, 1])],
'g': [Line([0.5, 1], [0.5, 0]), Arc([0, 0.5], 0.5), Arc([0, 0], .5, 1, 2)],
'h': [Line([-.5, 0], [-.5, 1+h]), Line([.5, 0], [.5, .5]), Arc([0, 0.5], .5, 0, 1)],
'i': [Line([0, 0], [0, 1]), Arc([0, 1.25], 1/16)],
'j': [Line([0, 0], [0, 1]), Arc([-.25, 0], .25, 1, 2), Arc([0, 1.25], 1/16)],
'k': [Line([-.5, 0], [-.5, 1+h]), Line([-.5, .5], [.25, 0]), Line([-.5, .5], [.25, 1])],
'l': [Line([0, .25], [0, 1+h]), Arc([.25, .25], .25, 1, 1.5)],
'm': [Line([-.5, 0], [-.5, 1]), Arc([-.25, .75], .25, 0, 1), Arc([.25, .75], .25, 0, 1), Line([.5, 0], [.5, .75]), Line([0, 0], [0, .75])],
'n': [Line([-.5, 0], [-.5, 1]), Arc([0, 0.5], .5, 0, 1), Line([.5, 0], [.5, .5])],
'o': [Arc([0, 0.5], 0.5)],
'p': [Line([-0.5, 1], [-0.5, -d]), Arc([0, 0.5], 0.5)],
'q': [Line([0.5, 1], [0.5, -d]), Arc([0, 0.5], 0.5)],
# 'r': [Line([-.5, 0], [-.5, 1]), Arc([np.sqrt(2)/4-.5, .5], .5, .25, .75)],
'r': [Line([-.5, 0], [-.5, 1]), Arc([0, .5], .5, .25, 1)],
# 's': [Arc([0, .25], .25, -.5, .5), Arc([0, .75], .25, .5, 1.5), Line([-.25, 0], [0, 0]), Line([0, 1], [.25, 1])],
# 's': [Arc([0, .25], .25, -.75, .5), Arc([0, .75], .25, .25, 1.5)],
's': [Arc([0, .25], .25, -1, .5), Arc([0, .75], .25, 0, 1.5)],
't': [Line([0, .25], [0, 1.25]), Arc([.25, .25], .25, 1, 1.5), Line([-.25, 1], [.25, 1])],
'u': [Line([.5, 0], [.5, 1]), Arc([0, 0.5], .5, 1, 2), Line([-.5, .5], [-.5, 1])],
'v': [Line([-.5, 1], [0, 0]), Line([0, 0], [.5, 1])],
'w': [Line([-.5, 1], [-.25, 0]), Line([-.25, 0], [0, .75]), Line([0, .75], [.25, 0]), Line([.25, 0], [.5, 1])],
'x': [Line([-0.5, 1], [0.5, 0]), Line([-0.5, 0], [0.5, 1])],
'y': [Line([-.5, 1], [0, 0]), Line([-.375, -.75], [.5, 1])],
'z': [Line([-.5, 1], [.5, 1]), Line([.5, 1], [-.5, 0]), Line([-.5, 0], [.5, 0])],
'°': [Arc([0, 1+h-.25], .25)],
'.': [Arc([0, 1/16], 1/16)],
'-': [Line([-.25, .5], [.25, .5])],
'--': [Line([-.5, .5], [.5, .5])], # em dash
'_': [Line([-.5, 0], [.5, 0])],
'~': [],
'|': [Line([0, 0], [0, 1+h])],
'?': [],
'!': [],
'@': [],
'#': [],
'$': [],
'%': [],
'^': [],
'&': [],
'*': [],
'(': [],
')': [],
'[': [],
']': [],
'+': [],
'/': [],
'\\': [], # single backslash
'<': [],
'>': [],
',': [],
}
kern = 0.25
kerns = defaultdict()
def plot_string(s, offset=None):
offset = offset or [0, 0]
for n, c in enumerate(s):
for part in chars[c]:
# TODO: bounding-box kerning
# TODO: pairwise custom kerning
part.plot(offset=[offset[0] + n*(1+kern), offset[1]], color='k', linewidth=1)
def string_to_paths(s, scale=1.0, offset=None):
# TODO each letter an svg group
offset = offset or [0, 0]
pths = []
for n, c in enumerate(s):
for part in chars[c]:
x = offset[0] + scale*(n*(1+kern) + part.x)
y = offset[1] + scale*(part.y)
pths.append(np.vstack((x, y)).T)
return pths
def test_all():
# test plot all characters
plt.figure()
x = 3
plot_string('abcdefghi', [0, 0])
plot_string('jklmnopqr', [0, -1*x])
plot_string('stuvwxyz', [0, -2*x])
plot_string('0123456789', [0, -3*x])
plot_string('3.4°5-6_7|8#', [0, -4*x])
plt.axis('equal')
plt.grid(False)
def test_angle_labels():
plt.figure()
plot_string('4.0° 3.9° 3.8°', [0, 0])
plt.axis('equal')
plt.grid(False)
def test_single_character(c):
# examine a single character
plt.figure()
for part in chars[c]:
part.plot_debug(color='k')
plt.axis('equal')
if __name__ == '__main__':
test_all()
test_single_character('s')
plt.show()
| true
|